clang 19.0.0git
CGExprScalar.cpp
Go to the documentation of this file.
1//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCXXABI.h"
14#include "CGCleanup.h"
15#include "CGDebugInfo.h"
16#include "CGObjCRuntime.h"
17#include "CGOpenMPRuntime.h"
18#include "CGRecordLayout.h"
19#include "CodeGenFunction.h"
20#include "CodeGenModule.h"
21#include "ConstantEmitter.h"
22#include "TargetInfo.h"
24#include "clang/AST/Attr.h"
25#include "clang/AST/DeclObjC.h"
26#include "clang/AST/Expr.h"
31#include "llvm/ADT/APFixedPoint.h"
32#include "llvm/IR/CFG.h"
33#include "llvm/IR/Constants.h"
34#include "llvm/IR/DataLayout.h"
35#include "llvm/IR/DerivedTypes.h"
36#include "llvm/IR/FixedPointBuilder.h"
37#include "llvm/IR/Function.h"
38#include "llvm/IR/GetElementPtrTypeIterator.h"
39#include "llvm/IR/GlobalVariable.h"
40#include "llvm/IR/Intrinsics.h"
41#include "llvm/IR/IntrinsicsPowerPC.h"
42#include "llvm/IR/MatrixBuilder.h"
43#include "llvm/IR/Module.h"
44#include "llvm/Support/TypeSize.h"
45#include <cstdarg>
46#include <optional>
47
48using namespace clang;
49using namespace CodeGen;
50using llvm::Value;
51
52//===----------------------------------------------------------------------===//
53// Scalar Expression Emitter
54//===----------------------------------------------------------------------===//
55
56namespace llvm {
57extern cl::opt<bool> EnableSingleByteCoverage;
58} // namespace llvm
59
60namespace {
61
62/// Determine whether the given binary operation may overflow.
63/// Sets \p Result to the value of the operation for BO_Add, BO_Sub, BO_Mul,
64/// and signed BO_{Div,Rem}. For these opcodes, and for unsigned BO_{Div,Rem},
65/// the returned overflow check is precise. The returned value is 'true' for
66/// all other opcodes, to be conservative.
67bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
68 BinaryOperator::Opcode Opcode, bool Signed,
69 llvm::APInt &Result) {
70 // Assume overflow is possible, unless we can prove otherwise.
71 bool Overflow = true;
72 const auto &LHSAP = LHS->getValue();
73 const auto &RHSAP = RHS->getValue();
74 if (Opcode == BO_Add) {
75 Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
76 : LHSAP.uadd_ov(RHSAP, Overflow);
77 } else if (Opcode == BO_Sub) {
78 Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
79 : LHSAP.usub_ov(RHSAP, Overflow);
80 } else if (Opcode == BO_Mul) {
81 Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
82 : LHSAP.umul_ov(RHSAP, Overflow);
83 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
84 if (Signed && !RHS->isZero())
85 Result = LHSAP.sdiv_ov(RHSAP, Overflow);
86 else
87 return false;
88 }
89 return Overflow;
90}
91
92struct BinOpInfo {
93 Value *LHS;
94 Value *RHS;
95 QualType Ty; // Computation Type.
96 BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
97 FPOptions FPFeatures;
98 const Expr *E; // Entire expr, for error unsupported. May not be binop.
99
100 /// Check if the binop can result in integer overflow.
101 bool mayHaveIntegerOverflow() const {
102 // Without constant input, we can't rule out overflow.
103 auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS);
104 auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS);
105 if (!LHSCI || !RHSCI)
106 return true;
107
108 llvm::APInt Result;
109 return ::mayHaveIntegerOverflow(
110 LHSCI, RHSCI, Opcode, Ty->hasSignedIntegerRepresentation(), Result);
111 }
112
113 /// Check if the binop computes a division or a remainder.
114 bool isDivremOp() const {
115 return Opcode == BO_Div || Opcode == BO_Rem || Opcode == BO_DivAssign ||
116 Opcode == BO_RemAssign;
117 }
118
119 /// Check if the binop can result in an integer division by zero.
120 bool mayHaveIntegerDivisionByZero() const {
121 if (isDivremOp())
122 if (auto *CI = dyn_cast<llvm::ConstantInt>(RHS))
123 return CI->isZero();
124 return true;
125 }
126
127 /// Check if the binop can result in a float division by zero.
128 bool mayHaveFloatDivisionByZero() const {
129 if (isDivremOp())
130 if (auto *CFP = dyn_cast<llvm::ConstantFP>(RHS))
131 return CFP->isZero();
132 return true;
133 }
134
135 /// Check if at least one operand is a fixed point type. In such cases, this
136 /// operation did not follow usual arithmetic conversion and both operands
137 /// might not be of the same type.
138 bool isFixedPointOp() const {
139 // We cannot simply check the result type since comparison operations return
140 // an int.
141 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
142 QualType LHSType = BinOp->getLHS()->getType();
143 QualType RHSType = BinOp->getRHS()->getType();
144 return LHSType->isFixedPointType() || RHSType->isFixedPointType();
145 }
146 if (const auto *UnOp = dyn_cast<UnaryOperator>(E))
147 return UnOp->getSubExpr()->getType()->isFixedPointType();
148 return false;
149 }
150
151 /// Check if the RHS has a signed integer representation.
152 bool rhsHasSignedIntegerRepresentation() const {
153 if (const auto *BinOp = dyn_cast<BinaryOperator>(E)) {
154 QualType RHSType = BinOp->getRHS()->getType();
155 return RHSType->hasSignedIntegerRepresentation();
156 }
157 return false;
158 }
159};
160
161static bool MustVisitNullValue(const Expr *E) {
162 // If a null pointer expression's type is the C++0x nullptr_t, then
163 // it's not necessarily a simple constant and it must be evaluated
164 // for its potential side effects.
165 return E->getType()->isNullPtrType();
166}
167
168/// If \p E is a widened promoted integer, get its base (unpromoted) type.
169static std::optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
170 const Expr *E) {
171 const Expr *Base = E->IgnoreImpCasts();
172 if (E == Base)
173 return std::nullopt;
174
175 QualType BaseTy = Base->getType();
176 if (!Ctx.isPromotableIntegerType(BaseTy) ||
177 Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
178 return std::nullopt;
179
180 return BaseTy;
181}
182
183/// Check if \p E is a widened promoted integer.
184static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
185 return getUnwidenedIntegerType(Ctx, E).has_value();
186}
187
188/// Check if we can skip the overflow check for \p Op.
189static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
190 assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
191 "Expected a unary or binary operator");
192
193 // If the binop has constant inputs and we can prove there is no overflow,
194 // we can elide the overflow check.
195 if (!Op.mayHaveIntegerOverflow())
196 return true;
197
198 // If a unary op has a widened operand, the op cannot overflow.
199 if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
200 return !UO->canOverflow();
201
202 // We usually don't need overflow checks for binops with widened operands.
203 // Multiplication with promoted unsigned operands is a special case.
204 const auto *BO = cast<BinaryOperator>(Op.E);
205 auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
206 if (!OptionalLHSTy)
207 return false;
208
209 auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
210 if (!OptionalRHSTy)
211 return false;
212
213 QualType LHSTy = *OptionalLHSTy;
214 QualType RHSTy = *OptionalRHSTy;
215
216 // This is the simple case: binops without unsigned multiplication, and with
217 // widened operands. No overflow check is needed here.
218 if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
219 !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
220 return true;
221
222 // For unsigned multiplication the overflow check can be elided if either one
223 // of the unpromoted types are less than half the size of the promoted type.
224 unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
225 return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
226 (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
227}
228
229class ScalarExprEmitter
230 : public StmtVisitor<ScalarExprEmitter, Value*> {
231 CodeGenFunction &CGF;
232 CGBuilderTy &Builder;
233 bool IgnoreResultAssign;
234 llvm::LLVMContext &VMContext;
235public:
236
237 ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
238 : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
239 VMContext(cgf.getLLVMContext()) {
240 }
241
242 //===--------------------------------------------------------------------===//
243 // Utilities
244 //===--------------------------------------------------------------------===//
245
246 bool TestAndClearIgnoreResultAssign() {
247 bool I = IgnoreResultAssign;
248 IgnoreResultAssign = false;
249 return I;
250 }
251
252 llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
253 LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
254 LValue EmitCheckedLValue(const Expr *E, CodeGenFunction::TypeCheckKind TCK) {
255 return CGF.EmitCheckedLValue(E, TCK);
256 }
257
258 void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerMask>> Checks,
259 const BinOpInfo &Info);
260
261 Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
262 return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
263 }
264
265 void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
266 const AlignValueAttr *AVAttr = nullptr;
267 if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
268 const ValueDecl *VD = DRE->getDecl();
269
270 if (VD->getType()->isReferenceType()) {
271 if (const auto *TTy =
273 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
274 } else {
275 // Assumptions for function parameters are emitted at the start of the
276 // function, so there is no need to repeat that here,
277 // unless the alignment-assumption sanitizer is enabled,
278 // then we prefer the assumption over alignment attribute
279 // on IR function param.
280 if (isa<ParmVarDecl>(VD) && !CGF.SanOpts.has(SanitizerKind::Alignment))
281 return;
282
283 AVAttr = VD->getAttr<AlignValueAttr>();
284 }
285 }
286
287 if (!AVAttr)
288 if (const auto *TTy = E->getType()->getAs<TypedefType>())
289 AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
290
291 if (!AVAttr)
292 return;
293
294 Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
295 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
296 CGF.emitAlignmentAssumption(V, E, AVAttr->getLocation(), AlignmentCI);
297 }
298
299 /// EmitLoadOfLValue - Given an expression with complex type that represents a
300 /// value l-value, this method emits the address of the l-value, then loads
301 /// and returns the result.
302 Value *EmitLoadOfLValue(const Expr *E) {
303 Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
304 E->getExprLoc());
305
306 EmitLValueAlignmentAssumption(E, V);
307 return V;
308 }
309
310 /// EmitConversionToBool - Convert the specified expression value to a
311 /// boolean (i1) truth value. This is equivalent to "Val != 0".
312 Value *EmitConversionToBool(Value *Src, QualType DstTy);
313
314 /// Emit a check that a conversion from a floating-point type does not
315 /// overflow.
316 void EmitFloatConversionCheck(Value *OrigSrc, QualType OrigSrcType,
317 Value *Src, QualType SrcType, QualType DstType,
318 llvm::Type *DstTy, SourceLocation Loc);
319
320 /// Known implicit conversion check kinds.
321 /// This is used for bitfield conversion checks as well.
322 /// Keep in sync with the enum of the same name in ubsan_handlers.h
323 enum ImplicitConversionCheckKind : unsigned char {
324 ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
325 ICCK_UnsignedIntegerTruncation = 1,
326 ICCK_SignedIntegerTruncation = 2,
327 ICCK_IntegerSignChange = 3,
328 ICCK_SignedIntegerTruncationOrSignChange = 4,
329 };
330
331 /// Emit a check that an [implicit] truncation of an integer does not
332 /// discard any bits. It is not UB, so we use the value after truncation.
333 void EmitIntegerTruncationCheck(Value *Src, QualType SrcType, Value *Dst,
334 QualType DstType, SourceLocation Loc);
335
336 /// Emit a check that an [implicit] conversion of an integer does not change
337 /// the sign of the value. It is not UB, so we use the value after conversion.
338 /// NOTE: Src and Dst may be the exact same value! (point to the same thing)
339 void EmitIntegerSignChangeCheck(Value *Src, QualType SrcType, Value *Dst,
340 QualType DstType, SourceLocation Loc);
341
342 /// Emit a conversion from the specified type to the specified destination
343 /// type, both of which are LLVM scalar types.
344 struct ScalarConversionOpts {
345 bool TreatBooleanAsSigned;
346 bool EmitImplicitIntegerTruncationChecks;
347 bool EmitImplicitIntegerSignChangeChecks;
348
349 ScalarConversionOpts()
350 : TreatBooleanAsSigned(false),
351 EmitImplicitIntegerTruncationChecks(false),
352 EmitImplicitIntegerSignChangeChecks(false) {}
353
354 ScalarConversionOpts(clang::SanitizerSet SanOpts)
355 : TreatBooleanAsSigned(false),
356 EmitImplicitIntegerTruncationChecks(
357 SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation)),
358 EmitImplicitIntegerSignChangeChecks(
359 SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
360 };
361 Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
362 llvm::Type *SrcTy, llvm::Type *DstTy,
363 ScalarConversionOpts Opts);
364 Value *
365 EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
367 ScalarConversionOpts Opts = ScalarConversionOpts());
368
369 /// Convert between either a fixed point and other fixed point or fixed point
370 /// and an integer.
371 Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
373
374 /// Emit a conversion from the specified complex type to the specified
375 /// destination type, where the destination type is an LLVM scalar type.
376 Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
377 QualType SrcTy, QualType DstTy,
379
380 /// EmitNullValue - Emit a value that corresponds to null for the given type.
381 Value *EmitNullValue(QualType Ty);
382
383 /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
384 Value *EmitFloatToBoolConversion(Value *V) {
385 // Compare against 0.0 for fp scalars.
386 llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
387 return Builder.CreateFCmpUNE(V, Zero, "tobool");
388 }
389
390 /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
391 Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
392 Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
393
394 return Builder.CreateICmpNE(V, Zero, "tobool");
395 }
396
397 Value *EmitIntToBoolConversion(Value *V) {
398 // Because of the type rules of C, we often end up computing a
399 // logical value, then zero extending it to int, then wanting it
400 // as a logical value again. Optimize this common case.
401 if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
402 if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
403 Value *Result = ZI->getOperand(0);
404 // If there aren't any more uses, zap the instruction to save space.
405 // Note that there can be more uses, for example if this
406 // is the result of an assignment.
407 if (ZI->use_empty())
408 ZI->eraseFromParent();
409 return Result;
410 }
411 }
412
413 return Builder.CreateIsNotNull(V, "tobool");
414 }
415
416 //===--------------------------------------------------------------------===//
417 // Visitor Methods
418 //===--------------------------------------------------------------------===//
419
420 Value *Visit(Expr *E) {
421 ApplyDebugLocation DL(CGF, E);
423 }
424
425 Value *VisitStmt(Stmt *S) {
426 S->dump(llvm::errs(), CGF.getContext());
427 llvm_unreachable("Stmt can't have complex result type!");
428 }
429 Value *VisitExpr(Expr *S);
430
431 Value *VisitConstantExpr(ConstantExpr *E) {
432 // A constant expression of type 'void' generates no code and produces no
433 // value.
434 if (E->getType()->isVoidType())
435 return nullptr;
436
437 if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
438 if (E->isGLValue())
439 return CGF.Builder.CreateLoad(Address(
440 Result, CGF.ConvertTypeForMem(E->getType()),
442 return Result;
443 }
444 return Visit(E->getSubExpr());
445 }
446 Value *VisitParenExpr(ParenExpr *PE) {
447 return Visit(PE->getSubExpr());
448 }
449 Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
450 return Visit(E->getReplacement());
451 }
452 Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
453 return Visit(GE->getResultExpr());
454 }
455 Value *VisitCoawaitExpr(CoawaitExpr *S) {
456 return CGF.EmitCoawaitExpr(*S).getScalarVal();
457 }
458 Value *VisitCoyieldExpr(CoyieldExpr *S) {
459 return CGF.EmitCoyieldExpr(*S).getScalarVal();
460 }
461 Value *VisitUnaryCoawait(const UnaryOperator *E) {
462 return Visit(E->getSubExpr());
463 }
464
465 // Leaves.
466 Value *VisitIntegerLiteral(const IntegerLiteral *E) {
467 return Builder.getInt(E->getValue());
468 }
469 Value *VisitFixedPointLiteral(const FixedPointLiteral *E) {
470 return Builder.getInt(E->getValue());
471 }
472 Value *VisitFloatingLiteral(const FloatingLiteral *E) {
473 return llvm::ConstantFP::get(VMContext, E->getValue());
474 }
475 Value *VisitCharacterLiteral(const CharacterLiteral *E) {
476 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
477 }
478 Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
479 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
480 }
481 Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
482 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
483 }
484 Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
485 if (E->getType()->isVoidType())
486 return nullptr;
487
488 return EmitNullValue(E->getType());
489 }
490 Value *VisitGNUNullExpr(const GNUNullExpr *E) {
491 return EmitNullValue(E->getType());
492 }
493 Value *VisitOffsetOfExpr(OffsetOfExpr *E);
494 Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
495 Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
496 llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
497 return Builder.CreateBitCast(V, ConvertType(E->getType()));
498 }
499
500 Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
501 return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
502 }
503
504 Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
505 return CGF.EmitPseudoObjectRValue(E).getScalarVal();
506 }
507
508 Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
509
510 Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
511 if (E->isGLValue())
512 return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
513 E->getExprLoc());
514
515 // Otherwise, assume the mapping is the scalar directly.
517 }
518
519 // l-values.
520 Value *VisitDeclRefExpr(DeclRefExpr *E) {
521 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E))
522 return CGF.emitScalarConstant(Constant, E);
523 return EmitLoadOfLValue(E);
524 }
525
526 Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
527 return CGF.EmitObjCSelectorExpr(E);
528 }
529 Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
530 return CGF.EmitObjCProtocolExpr(E);
531 }
532 Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
533 return EmitLoadOfLValue(E);
534 }
535 Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
536 if (E->getMethodDecl() &&
538 return EmitLoadOfLValue(E);
539 return CGF.EmitObjCMessageExpr(E).getScalarVal();
540 }
541
542 Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
543 LValue LV = CGF.EmitObjCIsaExpr(E);
545 return V;
546 }
547
548 Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
549 VersionTuple Version = E->getVersion();
550
551 // If we're checking for a platform older than our minimum deployment
552 // target, we can fold the check away.
553 if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
554 return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
555
556 return CGF.EmitBuiltinAvailable(Version);
557 }
558
559 Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
560 Value *VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E);
561 Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
562 Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
563 Value *VisitMemberExpr(MemberExpr *E);
564 Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
565 Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
566 // Strictly speaking, we shouldn't be calling EmitLoadOfLValue, which
567 // transitively calls EmitCompoundLiteralLValue, here in C++ since compound
568 // literals aren't l-values in C++. We do so simply because that's the
569 // cleanest way to handle compound literals in C++.
570 // See the discussion here: https://reviews.llvm.org/D64464
571 return EmitLoadOfLValue(E);
572 }
573
574 Value *VisitInitListExpr(InitListExpr *E);
575
576 Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
577 assert(CGF.getArrayInitIndex() &&
578 "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
579 return CGF.getArrayInitIndex();
580 }
581
582 Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
583 return EmitNullValue(E->getType());
584 }
585 Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
586 CGF.CGM.EmitExplicitCastExprType(E, &CGF);
587 return VisitCastExpr(E);
588 }
589 Value *VisitCastExpr(CastExpr *E);
590
591 Value *VisitCallExpr(const CallExpr *E) {
593 return EmitLoadOfLValue(E);
594
595 Value *V = CGF.EmitCallExpr(E).getScalarVal();
596
597 EmitLValueAlignmentAssumption(E, V);
598 return V;
599 }
600
601 Value *VisitStmtExpr(const StmtExpr *E);
602
603 // Unary Operators.
604 Value *VisitUnaryPostDec(const UnaryOperator *E) {
605 LValue LV = EmitLValue(E->getSubExpr());
606 return EmitScalarPrePostIncDec(E, LV, false, false);
607 }
608 Value *VisitUnaryPostInc(const UnaryOperator *E) {
609 LValue LV = EmitLValue(E->getSubExpr());
610 return EmitScalarPrePostIncDec(E, LV, true, false);
611 }
612 Value *VisitUnaryPreDec(const UnaryOperator *E) {
613 LValue LV = EmitLValue(E->getSubExpr());
614 return EmitScalarPrePostIncDec(E, LV, false, true);
615 }
616 Value *VisitUnaryPreInc(const UnaryOperator *E) {
617 LValue LV = EmitLValue(E->getSubExpr());
618 return EmitScalarPrePostIncDec(E, LV, true, true);
619 }
620
621 llvm::Value *EmitIncDecConsiderOverflowBehavior(const UnaryOperator *E,
622 llvm::Value *InVal,
623 bool IsInc);
624
625 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
626 bool isInc, bool isPre);
627
628
629 Value *VisitUnaryAddrOf(const UnaryOperator *E) {
630 if (isa<MemberPointerType>(E->getType())) // never sugared
631 return CGF.CGM.getMemberPointerConstant(E);
632
633 return EmitLValue(E->getSubExpr()).getPointer(CGF);
634 }
635 Value *VisitUnaryDeref(const UnaryOperator *E) {
636 if (E->getType()->isVoidType())
637 return Visit(E->getSubExpr()); // the actual value should be unused
638 return EmitLoadOfLValue(E);
639 }
640
641 Value *VisitUnaryPlus(const UnaryOperator *E,
642 QualType PromotionType = QualType());
643 Value *VisitPlus(const UnaryOperator *E, QualType PromotionType);
644 Value *VisitUnaryMinus(const UnaryOperator *E,
645 QualType PromotionType = QualType());
646 Value *VisitMinus(const UnaryOperator *E, QualType PromotionType);
647
648 Value *VisitUnaryNot (const UnaryOperator *E);
649 Value *VisitUnaryLNot (const UnaryOperator *E);
650 Value *VisitUnaryReal(const UnaryOperator *E,
651 QualType PromotionType = QualType());
652 Value *VisitReal(const UnaryOperator *E, QualType PromotionType);
653 Value *VisitUnaryImag(const UnaryOperator *E,
654 QualType PromotionType = QualType());
655 Value *VisitImag(const UnaryOperator *E, QualType PromotionType);
656 Value *VisitUnaryExtension(const UnaryOperator *E) {
657 return Visit(E->getSubExpr());
658 }
659
660 // C++
661 Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
662 return EmitLoadOfLValue(E);
663 }
664 Value *VisitSourceLocExpr(SourceLocExpr *SLE) {
665 auto &Ctx = CGF.getContext();
669 SLE->getType());
670 }
671
672 Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
673 CodeGenFunction::CXXDefaultArgExprScope Scope(CGF, DAE);
674 return Visit(DAE->getExpr());
675 }
676 Value *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *DIE) {
677 CodeGenFunction::CXXDefaultInitExprScope Scope(CGF, DIE);
678 return Visit(DIE->getExpr());
679 }
680 Value *VisitCXXThisExpr(CXXThisExpr *TE) {
681 return CGF.LoadCXXThis();
682 }
683
684 Value *VisitExprWithCleanups(ExprWithCleanups *E);
685 Value *VisitCXXNewExpr(const CXXNewExpr *E) {
686 return CGF.EmitCXXNewExpr(E);
687 }
688 Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
689 CGF.EmitCXXDeleteExpr(E);
690 return nullptr;
691 }
692
693 Value *VisitTypeTraitExpr(const TypeTraitExpr *E) {
694 return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
695 }
696
697 Value *VisitConceptSpecializationExpr(const ConceptSpecializationExpr *E) {
698 return Builder.getInt1(E->isSatisfied());
699 }
700
701 Value *VisitRequiresExpr(const RequiresExpr *E) {
702 return Builder.getInt1(E->isSatisfied());
703 }
704
705 Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
706 return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
707 }
708
709 Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
710 return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
711 }
712
713 Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
714 // C++ [expr.pseudo]p1:
715 // The result shall only be used as the operand for the function call
716 // operator (), and the result of such a call has type void. The only
717 // effect is the evaluation of the postfix-expression before the dot or
718 // arrow.
719 CGF.EmitScalarExpr(E->getBase());
720 return nullptr;
721 }
722
723 Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
724 return EmitNullValue(E->getType());
725 }
726
727 Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
728 CGF.EmitCXXThrowExpr(E);
729 return nullptr;
730 }
731
732 Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
733 return Builder.getInt1(E->getValue());
734 }
735
736 // Binary Operators.
737 Value *EmitMul(const BinOpInfo &Ops) {
738 if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
739 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
741 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
742 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
743 [[fallthrough]];
745 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
746 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
747 [[fallthrough]];
749 if (CanElideOverflowCheck(CGF.getContext(), Ops))
750 return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
751 return EmitOverflowCheckedBinOp(Ops);
752 }
753 }
754
755 if (Ops.Ty->isConstantMatrixType()) {
756 llvm::MatrixBuilder MB(Builder);
757 // We need to check the types of the operands of the operator to get the
758 // correct matrix dimensions.
759 auto *BO = cast<BinaryOperator>(Ops.E);
760 auto *LHSMatTy = dyn_cast<ConstantMatrixType>(
761 BO->getLHS()->getType().getCanonicalType());
762 auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
763 BO->getRHS()->getType().getCanonicalType());
764 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
765 if (LHSMatTy && RHSMatTy)
766 return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
767 LHSMatTy->getNumColumns(),
768 RHSMatTy->getNumColumns());
769 return MB.CreateScalarMultiply(Ops.LHS, Ops.RHS);
770 }
771
772 if (Ops.Ty->isUnsignedIntegerType() &&
773 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
774 !CanElideOverflowCheck(CGF.getContext(), Ops))
775 return EmitOverflowCheckedBinOp(Ops);
776
777 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
778 // Preserve the old values
779 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
780 return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
781 }
782 if (Ops.isFixedPointOp())
783 return EmitFixedPointBinOp(Ops);
784 return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
785 }
786 /// Create a binary op that checks for overflow.
787 /// Currently only supports +, - and *.
788 Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
789
790 // Check for undefined division and modulus behaviors.
791 void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
792 llvm::Value *Zero,bool isDiv);
793 // Common helper for getting how wide LHS of shift is.
794 static Value *GetMaximumShiftAmount(Value *LHS, Value *RHS, bool RHSIsSigned);
795
796 // Used for shifting constraints for OpenCL, do mask for powers of 2, URem for
797 // non powers of two.
798 Value *ConstrainShiftValue(Value *LHS, Value *RHS, const Twine &Name);
799
800 Value *EmitDiv(const BinOpInfo &Ops);
801 Value *EmitRem(const BinOpInfo &Ops);
802 Value *EmitAdd(const BinOpInfo &Ops);
803 Value *EmitSub(const BinOpInfo &Ops);
804 Value *EmitShl(const BinOpInfo &Ops);
805 Value *EmitShr(const BinOpInfo &Ops);
806 Value *EmitAnd(const BinOpInfo &Ops) {
807 return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
808 }
809 Value *EmitXor(const BinOpInfo &Ops) {
810 return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
811 }
812 Value *EmitOr (const BinOpInfo &Ops) {
813 return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
814 }
815
816 // Helper functions for fixed point binary operations.
817 Value *EmitFixedPointBinOp(const BinOpInfo &Ops);
818
819 BinOpInfo EmitBinOps(const BinaryOperator *E,
820 QualType PromotionTy = QualType());
821
822 Value *EmitPromotedValue(Value *result, QualType PromotionType);
823 Value *EmitUnPromotedValue(Value *result, QualType ExprType);
824 Value *EmitPromoted(const Expr *E, QualType PromotionType);
825
826 LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
827 Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
828 Value *&Result);
829
830 Value *EmitCompoundAssign(const CompoundAssignOperator *E,
831 Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
832
833 QualType getPromotionType(QualType Ty) {
834 const auto &Ctx = CGF.getContext();
835 if (auto *CT = Ty->getAs<ComplexType>()) {
836 QualType ElementType = CT->getElementType();
837 if (ElementType.UseExcessPrecision(Ctx))
838 return Ctx.getComplexType(Ctx.FloatTy);
839 }
840
841 if (Ty.UseExcessPrecision(Ctx)) {
842 if (auto *VT = Ty->getAs<VectorType>()) {
843 unsigned NumElements = VT->getNumElements();
844 return Ctx.getVectorType(Ctx.FloatTy, NumElements, VT->getVectorKind());
845 }
846 return Ctx.FloatTy;
847 }
848
849 return QualType();
850 }
851
852 // Binary operators and binary compound assignment operators.
853#define HANDLEBINOP(OP) \
854 Value *VisitBin##OP(const BinaryOperator *E) { \
855 QualType promotionTy = getPromotionType(E->getType()); \
856 auto result = Emit##OP(EmitBinOps(E, promotionTy)); \
857 if (result && !promotionTy.isNull()) \
858 result = EmitUnPromotedValue(result, E->getType()); \
859 return result; \
860 } \
861 Value *VisitBin##OP##Assign(const CompoundAssignOperator *E) { \
862 return EmitCompoundAssign(E, &ScalarExprEmitter::Emit##OP); \
863 }
864 HANDLEBINOP(Mul)
865 HANDLEBINOP(Div)
866 HANDLEBINOP(Rem)
867 HANDLEBINOP(Add)
868 HANDLEBINOP(Sub)
869 HANDLEBINOP(Shl)
870 HANDLEBINOP(Shr)
872 HANDLEBINOP(Xor)
874#undef HANDLEBINOP
875
876 // Comparisons.
877 Value *EmitCompare(const BinaryOperator *E, llvm::CmpInst::Predicate UICmpOpc,
878 llvm::CmpInst::Predicate SICmpOpc,
879 llvm::CmpInst::Predicate FCmpOpc, bool IsSignaling);
880#define VISITCOMP(CODE, UI, SI, FP, SIG) \
881 Value *VisitBin##CODE(const BinaryOperator *E) { \
882 return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
883 llvm::FCmpInst::FP, SIG); }
884 VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT, true)
885 VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT, true)
886 VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE, true)
887 VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE, true)
888 VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ, false)
889 VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE, false)
890#undef VISITCOMP
891
892 Value *VisitBinAssign (const BinaryOperator *E);
893
894 Value *VisitBinLAnd (const BinaryOperator *E);
895 Value *VisitBinLOr (const BinaryOperator *E);
896 Value *VisitBinComma (const BinaryOperator *E);
897
898 Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
899 Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
900
901 Value *VisitCXXRewrittenBinaryOperator(CXXRewrittenBinaryOperator *E) {
902 return Visit(E->getSemanticForm());
903 }
904
905 // Other Operators.
906 Value *VisitBlockExpr(const BlockExpr *BE);
907 Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
908 Value *VisitChooseExpr(ChooseExpr *CE);
909 Value *VisitVAArgExpr(VAArgExpr *VE);
910 Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
911 return CGF.EmitObjCStringLiteral(E);
912 }
913 Value *VisitObjCBoxedExpr(ObjCBoxedExpr *E) {
914 return CGF.EmitObjCBoxedExpr(E);
915 }
916 Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
917 return CGF.EmitObjCArrayLiteral(E);
918 }
919 Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
920 return CGF.EmitObjCDictionaryLiteral(E);
921 }
922 Value *VisitAsTypeExpr(AsTypeExpr *CE);
923 Value *VisitAtomicExpr(AtomicExpr *AE);
924 Value *VisitPackIndexingExpr(PackIndexingExpr *E) {
925 return Visit(E->getSelectedExpr());
926 }
927};
928} // end anonymous namespace.
929
930//===----------------------------------------------------------------------===//
931// Utilities
932//===----------------------------------------------------------------------===//
933
934/// EmitConversionToBool - Convert the specified expression value to a
935/// boolean (i1) truth value. This is equivalent to "Val != 0".
936Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
937 assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
938
939 if (SrcType->isRealFloatingType())
940 return EmitFloatToBoolConversion(Src);
941
942 if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
943 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
944
945 assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
946 "Unknown scalar type to convert");
947
948 if (isa<llvm::IntegerType>(Src->getType()))
949 return EmitIntToBoolConversion(Src);
950
951 assert(isa<llvm::PointerType>(Src->getType()));
952 return EmitPointerToBoolConversion(Src, SrcType);
953}
954
955void ScalarExprEmitter::EmitFloatConversionCheck(
956 Value *OrigSrc, QualType OrigSrcType, Value *Src, QualType SrcType,
957 QualType DstType, llvm::Type *DstTy, SourceLocation Loc) {
958 assert(SrcType->isFloatingType() && "not a conversion from floating point");
959 if (!isa<llvm::IntegerType>(DstTy))
960 return;
961
962 CodeGenFunction::SanitizerScope SanScope(&CGF);
963 using llvm::APFloat;
964 using llvm::APSInt;
965
966 llvm::Value *Check = nullptr;
967 const llvm::fltSemantics &SrcSema =
968 CGF.getContext().getFloatTypeSemantics(OrigSrcType);
969
970 // Floating-point to integer. This has undefined behavior if the source is
971 // +-Inf, NaN, or doesn't fit into the destination type (after truncation
972 // to an integer).
973 unsigned Width = CGF.getContext().getIntWidth(DstType);
975
976 APSInt Min = APSInt::getMinValue(Width, Unsigned);
977 APFloat MinSrc(SrcSema, APFloat::uninitialized);
978 if (MinSrc.convertFromAPInt(Min, !Unsigned, APFloat::rmTowardZero) &
979 APFloat::opOverflow)
980 // Don't need an overflow check for lower bound. Just check for
981 // -Inf/NaN.
982 MinSrc = APFloat::getInf(SrcSema, true);
983 else
984 // Find the largest value which is too small to represent (before
985 // truncation toward zero).
986 MinSrc.subtract(APFloat(SrcSema, 1), APFloat::rmTowardNegative);
987
988 APSInt Max = APSInt::getMaxValue(Width, Unsigned);
989 APFloat MaxSrc(SrcSema, APFloat::uninitialized);
990 if (MaxSrc.convertFromAPInt(Max, !Unsigned, APFloat::rmTowardZero) &
991 APFloat::opOverflow)
992 // Don't need an overflow check for upper bound. Just check for
993 // +Inf/NaN.
994 MaxSrc = APFloat::getInf(SrcSema, false);
995 else
996 // Find the smallest value which is too large to represent (before
997 // truncation toward zero).
998 MaxSrc.add(APFloat(SrcSema, 1), APFloat::rmTowardPositive);
999
1000 // If we're converting from __half, convert the range to float to match
1001 // the type of src.
1002 if (OrigSrcType->isHalfType()) {
1003 const llvm::fltSemantics &Sema =
1004 CGF.getContext().getFloatTypeSemantics(SrcType);
1005 bool IsInexact;
1006 MinSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1007 MaxSrc.convert(Sema, APFloat::rmTowardZero, &IsInexact);
1008 }
1009
1010 llvm::Value *GE =
1011 Builder.CreateFCmpOGT(Src, llvm::ConstantFP::get(VMContext, MinSrc));
1012 llvm::Value *LE =
1013 Builder.CreateFCmpOLT(Src, llvm::ConstantFP::get(VMContext, MaxSrc));
1014 Check = Builder.CreateAnd(GE, LE);
1015
1016 llvm::Constant *StaticArgs[] = {CGF.EmitCheckSourceLocation(Loc),
1017 CGF.EmitCheckTypeDescriptor(OrigSrcType),
1018 CGF.EmitCheckTypeDescriptor(DstType)};
1019 CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
1020 SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
1021}
1022
1023// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1024// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1025static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1026 std::pair<llvm::Value *, SanitizerMask>>
1028 QualType DstType, CGBuilderTy &Builder) {
1029 llvm::Type *SrcTy = Src->getType();
1030 llvm::Type *DstTy = Dst->getType();
1031 (void)DstTy; // Only used in assert()
1032
1033 // This should be truncation of integral types.
1034 assert(Src != Dst);
1035 assert(SrcTy->getScalarSizeInBits() > Dst->getType()->getScalarSizeInBits());
1036 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1037 "non-integer llvm type");
1038
1039 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1040 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1041
1042 // If both (src and dst) types are unsigned, then it's an unsigned truncation.
1043 // Else, it is a signed truncation.
1044 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1045 SanitizerMask Mask;
1046 if (!SrcSigned && !DstSigned) {
1047 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1048 Mask = SanitizerKind::ImplicitUnsignedIntegerTruncation;
1049 } else {
1050 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1051 Mask = SanitizerKind::ImplicitSignedIntegerTruncation;
1052 }
1053
1054 llvm::Value *Check = nullptr;
1055 // 1. Extend the truncated value back to the same width as the Src.
1056 Check = Builder.CreateIntCast(Dst, SrcTy, DstSigned, "anyext");
1057 // 2. Equality-compare with the original source value
1058 Check = Builder.CreateICmpEQ(Check, Src, "truncheck");
1059 // If the comparison result is 'i1 false', then the truncation was lossy.
1060 return std::make_pair(Kind, std::make_pair(Check, Mask));
1061}
1062
1064 QualType SrcType, QualType DstType) {
1065 return SrcType->isIntegerType() && DstType->isIntegerType();
1066}
1067
1068void ScalarExprEmitter::EmitIntegerTruncationCheck(Value *Src, QualType SrcType,
1069 Value *Dst, QualType DstType,
1071 if (!CGF.SanOpts.hasOneOf(SanitizerKind::ImplicitIntegerTruncation))
1072 return;
1073
1074 // We only care about int->int conversions here.
1075 // We ignore conversions to/from pointer and/or bool.
1077 DstType))
1078 return;
1079
1080 unsigned SrcBits = Src->getType()->getScalarSizeInBits();
1081 unsigned DstBits = Dst->getType()->getScalarSizeInBits();
1082 // This must be truncation. Else we do not care.
1083 if (SrcBits <= DstBits)
1084 return;
1085
1086 assert(!DstType->isBooleanType() && "we should not get here with booleans.");
1087
1088 // If the integer sign change sanitizer is enabled,
1089 // and we are truncating from larger unsigned type to smaller signed type,
1090 // let that next sanitizer deal with it.
1091 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1092 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1093 if (CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange) &&
1094 (!SrcSigned && DstSigned))
1095 return;
1096
1097 CodeGenFunction::SanitizerScope SanScope(&CGF);
1098
1099 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1100 std::pair<llvm::Value *, SanitizerMask>>
1101 Check =
1102 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1103 // If the comparison result is 'i1 false', then the truncation was lossy.
1104
1105 // Do we care about this type of truncation?
1106 if (!CGF.SanOpts.has(Check.second.second))
1107 return;
1108
1109 llvm::Constant *StaticArgs[] = {
1111 CGF.EmitCheckTypeDescriptor(DstType),
1112 llvm::ConstantInt::get(Builder.getInt8Ty(), Check.first),
1113 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1114
1115 CGF.EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1116 {Src, Dst});
1117}
1118
1119static llvm::Value *EmitIsNegativeTestHelper(Value *V, QualType VType,
1120 const char *Name,
1121 CGBuilderTy &Builder) {
1122 bool VSigned = VType->isSignedIntegerOrEnumerationType();
1123 llvm::Type *VTy = V->getType();
1124 if (!VSigned) {
1125 // If the value is unsigned, then it is never negative.
1126 return llvm::ConstantInt::getFalse(VTy->getContext());
1127 }
1128 llvm::Constant *Zero = llvm::ConstantInt::get(VTy, 0);
1129 return Builder.CreateICmp(llvm::ICmpInst::ICMP_SLT, V, Zero,
1130 llvm::Twine(Name) + "." + V->getName() +
1131 ".negativitycheck");
1132}
1133
1134// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1135// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1136static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1137 std::pair<llvm::Value *, SanitizerMask>>
1139 QualType DstType, CGBuilderTy &Builder) {
1140 llvm::Type *SrcTy = Src->getType();
1141 llvm::Type *DstTy = Dst->getType();
1142
1143 assert(isa<llvm::IntegerType>(SrcTy) && isa<llvm::IntegerType>(DstTy) &&
1144 "non-integer llvm type");
1145
1146 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1147 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1148 (void)SrcSigned; // Only used in assert()
1149 (void)DstSigned; // Only used in assert()
1150 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1151 unsigned DstBits = DstTy->getScalarSizeInBits();
1152 (void)SrcBits; // Only used in assert()
1153 (void)DstBits; // Only used in assert()
1154
1155 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1156 "either the widths should be different, or the signednesses.");
1157
1158 // 1. Was the old Value negative?
1159 llvm::Value *SrcIsNegative =
1160 EmitIsNegativeTestHelper(Src, SrcType, "src", Builder);
1161 // 2. Is the new Value negative?
1162 llvm::Value *DstIsNegative =
1163 EmitIsNegativeTestHelper(Dst, DstType, "dst", Builder);
1164 // 3. Now, was the 'negativity status' preserved during the conversion?
1165 // NOTE: conversion from negative to zero is considered to change the sign.
1166 // (We want to get 'false' when the conversion changed the sign)
1167 // So we should just equality-compare the negativity statuses.
1168 llvm::Value *Check = nullptr;
1169 Check = Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "signchangecheck");
1170 // If the comparison result is 'false', then the conversion changed the sign.
1171 return std::make_pair(
1172 ScalarExprEmitter::ICCK_IntegerSignChange,
1173 std::make_pair(Check, SanitizerKind::ImplicitIntegerSignChange));
1174}
1175
1176void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
1177 Value *Dst, QualType DstType,
1179 if (!CGF.SanOpts.has(SanitizerKind::ImplicitIntegerSignChange))
1180 return;
1181
1182 llvm::Type *SrcTy = Src->getType();
1183 llvm::Type *DstTy = Dst->getType();
1184
1185 // We only care about int->int conversions here.
1186 // We ignore conversions to/from pointer and/or bool.
1188 DstType))
1189 return;
1190
1191 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1192 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1193 unsigned SrcBits = SrcTy->getScalarSizeInBits();
1194 unsigned DstBits = DstTy->getScalarSizeInBits();
1195
1196 // Now, we do not need to emit the check in *all* of the cases.
1197 // We can avoid emitting it in some obvious cases where it would have been
1198 // dropped by the opt passes (instcombine) always anyways.
1199 // If it's a cast between effectively the same type, no check.
1200 // NOTE: this is *not* equivalent to checking the canonical types.
1201 if (SrcSigned == DstSigned && SrcBits == DstBits)
1202 return;
1203 // At least one of the values needs to have signed type.
1204 // If both are unsigned, then obviously, neither of them can be negative.
1205 if (!SrcSigned && !DstSigned)
1206 return;
1207 // If the conversion is to *larger* *signed* type, then no check is needed.
1208 // Because either sign-extension happens (so the sign will remain),
1209 // or zero-extension will happen (the sign bit will be zero.)
1210 if ((DstBits > SrcBits) && DstSigned)
1211 return;
1212 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1213 (SrcBits > DstBits) && SrcSigned) {
1214 // If the signed integer truncation sanitizer is enabled,
1215 // and this is a truncation from signed type, then no check is needed.
1216 // Because here sign change check is interchangeable with truncation check.
1217 return;
1218 }
1219 // That's it. We can't rule out any more cases with the data we have.
1220
1221 CodeGenFunction::SanitizerScope SanScope(&CGF);
1222
1223 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1224 std::pair<llvm::Value *, SanitizerMask>>
1225 Check;
1226
1227 // Each of these checks needs to return 'false' when an issue was detected.
1228 ImplicitConversionCheckKind CheckKind;
1230 // So we can 'and' all the checks together, and still get 'false',
1231 // if at least one of the checks detected an issue.
1232
1233 Check = EmitIntegerSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1234 CheckKind = Check.first;
1235 Checks.emplace_back(Check.second);
1236
1237 if (CGF.SanOpts.has(SanitizerKind::ImplicitSignedIntegerTruncation) &&
1238 (SrcBits > DstBits) && !SrcSigned && DstSigned) {
1239 // If the signed integer truncation sanitizer was enabled,
1240 // and we are truncating from larger unsigned type to smaller signed type,
1241 // let's handle the case we skipped in that check.
1242 Check =
1243 EmitIntegerTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1244 CheckKind = ICCK_SignedIntegerTruncationOrSignChange;
1245 Checks.emplace_back(Check.second);
1246 // If the comparison result is 'i1 false', then the truncation was lossy.
1247 }
1248
1249 llvm::Constant *StaticArgs[] = {
1251 CGF.EmitCheckTypeDescriptor(DstType),
1252 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1253 llvm::ConstantInt::get(Builder.getInt32Ty(), 0)};
1254 // EmitCheck() will 'and' all the checks together.
1255 CGF.EmitCheck(Checks, SanitizerHandler::ImplicitConversion, StaticArgs,
1256 {Src, Dst});
1257}
1258
1259// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1260// Returns 'i1 false' when the truncation Src -> Dst was lossy.
1261static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1262 std::pair<llvm::Value *, SanitizerMask>>
1264 QualType DstType, CGBuilderTy &Builder) {
1265 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1266 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1267
1268 ScalarExprEmitter::ImplicitConversionCheckKind Kind;
1269 if (!SrcSigned && !DstSigned)
1270 Kind = ScalarExprEmitter::ICCK_UnsignedIntegerTruncation;
1271 else
1272 Kind = ScalarExprEmitter::ICCK_SignedIntegerTruncation;
1273
1274 llvm::Value *Check = nullptr;
1275 // 1. Extend the truncated value back to the same width as the Src.
1276 Check = Builder.CreateIntCast(Dst, Src->getType(), DstSigned, "bf.anyext");
1277 // 2. Equality-compare with the original source value
1278 Check = Builder.CreateICmpEQ(Check, Src, "bf.truncheck");
1279 // If the comparison result is 'i1 false', then the truncation was lossy.
1280
1281 return std::make_pair(
1282 Kind, std::make_pair(Check, SanitizerKind::ImplicitBitfieldConversion));
1283}
1284
1285// Should be called within CodeGenFunction::SanitizerScope RAII scope.
1286// Returns 'i1 false' when the conversion Src -> Dst changed the sign.
1287static std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1288 std::pair<llvm::Value *, SanitizerMask>>
1290 QualType DstType, CGBuilderTy &Builder) {
1291 // 1. Was the old Value negative?
1292 llvm::Value *SrcIsNegative =
1293 EmitIsNegativeTestHelper(Src, SrcType, "bf.src", Builder);
1294 // 2. Is the new Value negative?
1295 llvm::Value *DstIsNegative =
1296 EmitIsNegativeTestHelper(Dst, DstType, "bf.dst", Builder);
1297 // 3. Now, was the 'negativity status' preserved during the conversion?
1298 // NOTE: conversion from negative to zero is considered to change the sign.
1299 // (We want to get 'false' when the conversion changed the sign)
1300 // So we should just equality-compare the negativity statuses.
1301 llvm::Value *Check = nullptr;
1302 Check =
1303 Builder.CreateICmpEQ(SrcIsNegative, DstIsNegative, "bf.signchangecheck");
1304 // If the comparison result is 'false', then the conversion changed the sign.
1305 return std::make_pair(
1306 ScalarExprEmitter::ICCK_IntegerSignChange,
1307 std::make_pair(Check, SanitizerKind::ImplicitBitfieldConversion));
1308}
1309
1310void CodeGenFunction::EmitBitfieldConversionCheck(Value *Src, QualType SrcType,
1311 Value *Dst, QualType DstType,
1312 const CGBitFieldInfo &Info,
1314
1315 if (!SanOpts.has(SanitizerKind::ImplicitBitfieldConversion))
1316 return;
1317
1318 // We only care about int->int conversions here.
1319 // We ignore conversions to/from pointer and/or bool.
1321 DstType))
1322 return;
1323
1324 if (DstType->isBooleanType() || SrcType->isBooleanType())
1325 return;
1326
1327 // This should be truncation of integral types.
1328 assert(isa<llvm::IntegerType>(Src->getType()) &&
1329 isa<llvm::IntegerType>(Dst->getType()) && "non-integer llvm type");
1330
1331 // TODO: Calculate src width to avoid emitting code
1332 // for unecessary cases.
1333 unsigned SrcBits = ConvertType(SrcType)->getScalarSizeInBits();
1334 unsigned DstBits = Info.Size;
1335
1336 bool SrcSigned = SrcType->isSignedIntegerOrEnumerationType();
1337 bool DstSigned = DstType->isSignedIntegerOrEnumerationType();
1338
1339 CodeGenFunction::SanitizerScope SanScope(this);
1340
1341 std::pair<ScalarExprEmitter::ImplicitConversionCheckKind,
1342 std::pair<llvm::Value *, SanitizerMask>>
1343 Check;
1344
1345 // Truncation
1346 bool EmitTruncation = DstBits < SrcBits;
1347 // If Dst is signed and Src unsigned, we want to be more specific
1348 // about the CheckKind we emit, in this case we want to emit
1349 // ICCK_SignedIntegerTruncationOrSignChange.
1350 bool EmitTruncationFromUnsignedToSigned =
1351 EmitTruncation && DstSigned && !SrcSigned;
1352 // Sign change
1353 bool SameTypeSameSize = SrcSigned == DstSigned && SrcBits == DstBits;
1354 bool BothUnsigned = !SrcSigned && !DstSigned;
1355 bool LargerSigned = (DstBits > SrcBits) && DstSigned;
1356 // We can avoid emitting sign change checks in some obvious cases
1357 // 1. If Src and Dst have the same signedness and size
1358 // 2. If both are unsigned sign check is unecessary!
1359 // 3. If Dst is signed and bigger than Src, either
1360 // sign-extension or zero-extension will make sure
1361 // the sign remains.
1362 bool EmitSignChange = !SameTypeSameSize && !BothUnsigned && !LargerSigned;
1363
1364 if (EmitTruncation)
1365 Check =
1366 EmitBitfieldTruncationCheckHelper(Src, SrcType, Dst, DstType, Builder);
1367 else if (EmitSignChange) {
1368 assert(((SrcBits != DstBits) || (SrcSigned != DstSigned)) &&
1369 "either the widths should be different, or the signednesses.");
1370 Check =
1371 EmitBitfieldSignChangeCheckHelper(Src, SrcType, Dst, DstType, Builder);
1372 } else
1373 return;
1374
1375 ScalarExprEmitter::ImplicitConversionCheckKind CheckKind = Check.first;
1376 if (EmitTruncationFromUnsignedToSigned)
1377 CheckKind = ScalarExprEmitter::ICCK_SignedIntegerTruncationOrSignChange;
1378
1379 llvm::Constant *StaticArgs[] = {
1381 EmitCheckTypeDescriptor(DstType),
1382 llvm::ConstantInt::get(Builder.getInt8Ty(), CheckKind),
1383 llvm::ConstantInt::get(Builder.getInt32Ty(), Info.Size)};
1384
1385 EmitCheck(Check.second, SanitizerHandler::ImplicitConversion, StaticArgs,
1386 {Src, Dst});
1387}
1388
1389Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
1390 QualType DstType, llvm::Type *SrcTy,
1391 llvm::Type *DstTy,
1392 ScalarConversionOpts Opts) {
1393 // The Element types determine the type of cast to perform.
1394 llvm::Type *SrcElementTy;
1395 llvm::Type *DstElementTy;
1396 QualType SrcElementType;
1397 QualType DstElementType;
1398 if (SrcType->isMatrixType() && DstType->isMatrixType()) {
1399 SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1400 DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1401 SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
1402 DstElementType = DstType->castAs<MatrixType>()->getElementType();
1403 } else {
1404 assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
1405 "cannot cast between matrix and non-matrix types");
1406 SrcElementTy = SrcTy;
1407 DstElementTy = DstTy;
1408 SrcElementType = SrcType;
1409 DstElementType = DstType;
1410 }
1411
1412 if (isa<llvm::IntegerType>(SrcElementTy)) {
1413 bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
1414 if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
1415 InputSigned = true;
1416 }
1417
1418 if (isa<llvm::IntegerType>(DstElementTy))
1419 return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1420 if (InputSigned)
1421 return Builder.CreateSIToFP(Src, DstTy, "conv");
1422 return Builder.CreateUIToFP(Src, DstTy, "conv");
1423 }
1424
1425 if (isa<llvm::IntegerType>(DstElementTy)) {
1426 assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
1427 bool IsSigned = DstElementType->isSignedIntegerOrEnumerationType();
1428
1429 // If we can't recognize overflow as undefined behavior, assume that
1430 // overflow saturates. This protects against normal optimizations if we are
1431 // compiling with non-standard FP semantics.
1432 if (!CGF.CGM.getCodeGenOpts().StrictFloatCastOverflow) {
1433 llvm::Intrinsic::ID IID =
1434 IsSigned ? llvm::Intrinsic::fptosi_sat : llvm::Intrinsic::fptoui_sat;
1435 return Builder.CreateCall(CGF.CGM.getIntrinsic(IID, {DstTy, SrcTy}), Src);
1436 }
1437
1438 if (IsSigned)
1439 return Builder.CreateFPToSI(Src, DstTy, "conv");
1440 return Builder.CreateFPToUI(Src, DstTy, "conv");
1441 }
1442
1443 if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
1444 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1445 return Builder.CreateFPExt(Src, DstTy, "conv");
1446}
1447
1448/// Emit a conversion from the specified type to the specified destination type,
1449/// both of which are LLVM scalar types.
1450Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
1451 QualType DstType,
1453 ScalarConversionOpts Opts) {
1454 // All conversions involving fixed point types should be handled by the
1455 // EmitFixedPoint family functions. This is done to prevent bloating up this
1456 // function more, and although fixed point numbers are represented by
1457 // integers, we do not want to follow any logic that assumes they should be
1458 // treated as integers.
1459 // TODO(leonardchan): When necessary, add another if statement checking for
1460 // conversions to fixed point types from other types.
1461 if (SrcType->isFixedPointType()) {
1462 if (DstType->isBooleanType())
1463 // It is important that we check this before checking if the dest type is
1464 // an integer because booleans are technically integer types.
1465 // We do not need to check the padding bit on unsigned types if unsigned
1466 // padding is enabled because overflow into this bit is undefined
1467 // behavior.
1468 return Builder.CreateIsNotNull(Src, "tobool");
1469 if (DstType->isFixedPointType() || DstType->isIntegerType() ||
1470 DstType->isRealFloatingType())
1471 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1472
1473 llvm_unreachable(
1474 "Unhandled scalar conversion from a fixed point type to another type.");
1475 } else if (DstType->isFixedPointType()) {
1476 if (SrcType->isIntegerType() || SrcType->isRealFloatingType())
1477 // This also includes converting booleans and enums to fixed point types.
1478 return EmitFixedPointConversion(Src, SrcType, DstType, Loc);
1479
1480 llvm_unreachable(
1481 "Unhandled scalar conversion to a fixed point type from another type.");
1482 }
1483
1484 QualType NoncanonicalSrcType = SrcType;
1485 QualType NoncanonicalDstType = DstType;
1486
1487 SrcType = CGF.getContext().getCanonicalType(SrcType);
1488 DstType = CGF.getContext().getCanonicalType(DstType);
1489 if (SrcType == DstType) return Src;
1490
1491 if (DstType->isVoidType()) return nullptr;
1492
1493 llvm::Value *OrigSrc = Src;
1494 QualType OrigSrcType = SrcType;
1495 llvm::Type *SrcTy = Src->getType();
1496
1497 // Handle conversions to bool first, they are special: comparisons against 0.
1498 if (DstType->isBooleanType())
1499 return EmitConversionToBool(Src, SrcType);
1500
1501 llvm::Type *DstTy = ConvertType(DstType);
1502
1503 // Cast from half through float if half isn't a native type.
1504 if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1505 // Cast to FP using the intrinsic if the half type itself isn't supported.
1506 if (DstTy->isFloatingPointTy()) {
1508 return Builder.CreateCall(
1509 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16, DstTy),
1510 Src);
1511 } else {
1512 // Cast to other types through float, using either the intrinsic or FPExt,
1513 // depending on whether the half type itself is supported
1514 // (as opposed to operations on half, available with NativeHalfType).
1516 Src = Builder.CreateCall(
1517 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
1518 CGF.CGM.FloatTy),
1519 Src);
1520 } else {
1521 Src = Builder.CreateFPExt(Src, CGF.CGM.FloatTy, "conv");
1522 }
1523 SrcType = CGF.getContext().FloatTy;
1524 SrcTy = CGF.FloatTy;
1525 }
1526 }
1527
1528 // Ignore conversions like int -> uint.
1529 if (SrcTy == DstTy) {
1530 if (Opts.EmitImplicitIntegerSignChangeChecks)
1531 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Src,
1532 NoncanonicalDstType, Loc);
1533
1534 return Src;
1535 }
1536
1537 // Handle pointer conversions next: pointers can only be converted to/from
1538 // other pointers and integers. Check for pointer types in terms of LLVM, as
1539 // some native types (like Obj-C id) may map to a pointer type.
1540 if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
1541 // The source value may be an integer, or a pointer.
1542 if (isa<llvm::PointerType>(SrcTy))
1543 return Src;
1544
1545 assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
1546 // First, convert to the correct width so that we control the kind of
1547 // extension.
1548 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
1549 bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
1550 llvm::Value* IntResult =
1551 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
1552 // Then, cast to pointer.
1553 return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
1554 }
1555
1556 if (isa<llvm::PointerType>(SrcTy)) {
1557 // Must be an ptr to int cast.
1558 assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
1559 return Builder.CreatePtrToInt(Src, DstTy, "conv");
1560 }
1561
1562 // A scalar can be splatted to an extended vector of the same element type
1563 if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
1564 // Sema should add casts to make sure that the source expression's type is
1565 // the same as the vector's element type (sans qualifiers)
1566 assert(DstType->castAs<ExtVectorType>()->getElementType().getTypePtr() ==
1567 SrcType.getTypePtr() &&
1568 "Splatted expr doesn't match with vector element type?");
1569
1570 // Splat the element across to all elements
1571 unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
1572 return Builder.CreateVectorSplat(NumElements, Src, "splat");
1573 }
1574
1575 if (SrcType->isMatrixType() && DstType->isMatrixType())
1576 return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1577
1578 if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
1579 // Allow bitcast from vector to integer/fp of the same size.
1580 llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
1581 llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
1582 if (SrcSize == DstSize)
1583 return Builder.CreateBitCast(Src, DstTy, "conv");
1584
1585 // Conversions between vectors of different sizes are not allowed except
1586 // when vectors of half are involved. Operations on storage-only half
1587 // vectors require promoting half vector operands to float vectors and
1588 // truncating the result, which is either an int or float vector, to a
1589 // short or half vector.
1590
1591 // Source and destination are both expected to be vectors.
1592 llvm::Type *SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
1593 llvm::Type *DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
1594 (void)DstElementTy;
1595
1596 assert(((SrcElementTy->isIntegerTy() &&
1597 DstElementTy->isIntegerTy()) ||
1598 (SrcElementTy->isFloatingPointTy() &&
1599 DstElementTy->isFloatingPointTy())) &&
1600 "unexpected conversion between a floating-point vector and an "
1601 "integer vector");
1602
1603 // Truncate an i32 vector to an i16 vector.
1604 if (SrcElementTy->isIntegerTy())
1605 return Builder.CreateIntCast(Src, DstTy, false, "conv");
1606
1607 // Truncate a float vector to a half vector.
1608 if (SrcSize > DstSize)
1609 return Builder.CreateFPTrunc(Src, DstTy, "conv");
1610
1611 // Promote a half vector to a float vector.
1612 return Builder.CreateFPExt(Src, DstTy, "conv");
1613 }
1614
1615 // Finally, we have the arithmetic types: real int/float.
1616 Value *Res = nullptr;
1617 llvm::Type *ResTy = DstTy;
1618
1619 // An overflowing conversion has undefined behavior if either the source type
1620 // or the destination type is a floating-point type. However, we consider the
1621 // range of representable values for all floating-point types to be
1622 // [-inf,+inf], so no overflow can ever happen when the destination type is a
1623 // floating-point type.
1624 if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
1625 OrigSrcType->isFloatingType())
1626 EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType, DstTy,
1627 Loc);
1628
1629 // Cast to half through float if half isn't a native type.
1630 if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
1631 // Make sure we cast in a single step if from another FP type.
1632 if (SrcTy->isFloatingPointTy()) {
1633 // Use the intrinsic if the half type itself isn't supported
1634 // (as opposed to operations on half, available with NativeHalfType).
1636 return Builder.CreateCall(
1637 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, SrcTy), Src);
1638 // If the half type is supported, just use an fptrunc.
1639 return Builder.CreateFPTrunc(Src, DstTy);
1640 }
1641 DstTy = CGF.FloatTy;
1642 }
1643
1644 Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
1645
1646 if (DstTy != ResTy) {
1648 assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
1649 Res = Builder.CreateCall(
1650 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16, CGF.CGM.FloatTy),
1651 Res);
1652 } else {
1653 Res = Builder.CreateFPTrunc(Res, ResTy, "conv");
1654 }
1655 }
1656
1657 if (Opts.EmitImplicitIntegerTruncationChecks)
1658 EmitIntegerTruncationCheck(Src, NoncanonicalSrcType, Res,
1659 NoncanonicalDstType, Loc);
1660
1661 if (Opts.EmitImplicitIntegerSignChangeChecks)
1662 EmitIntegerSignChangeCheck(Src, NoncanonicalSrcType, Res,
1663 NoncanonicalDstType, Loc);
1664
1665 return Res;
1666}
1667
1668Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
1669 QualType DstTy,
1671 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
1672 llvm::Value *Result;
1673 if (SrcTy->isRealFloatingType())
1674 Result = FPBuilder.CreateFloatingToFixed(Src,
1675 CGF.getContext().getFixedPointSemantics(DstTy));
1676 else if (DstTy->isRealFloatingType())
1677 Result = FPBuilder.CreateFixedToFloating(Src,
1679 ConvertType(DstTy));
1680 else {
1681 auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
1682 auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
1683
1684 if (DstTy->isIntegerType())
1685 Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
1686 DstFPSema.getWidth(),
1687 DstFPSema.isSigned());
1688 else if (SrcTy->isIntegerType())
1689 Result = FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
1690 DstFPSema);
1691 else
1692 Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
1693 }
1694 return Result;
1695}
1696
1697/// Emit a conversion from the specified complex type to the specified
1698/// destination type, where the destination type is an LLVM scalar type.
1699Value *ScalarExprEmitter::EmitComplexToScalarConversion(
1702 // Get the source element type.
1703 SrcTy = SrcTy->castAs<ComplexType>()->getElementType();
1704
1705 // Handle conversions to bool first, they are special: comparisons against 0.
1706 if (DstTy->isBooleanType()) {
1707 // Complex != 0 -> (Real != 0) | (Imag != 0)
1708 Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1709 Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy, Loc);
1710 return Builder.CreateOr(Src.first, Src.second, "tobool");
1711 }
1712
1713 // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
1714 // the imaginary part of the complex value is discarded and the value of the
1715 // real part is converted according to the conversion rules for the
1716 // corresponding real type.
1717 return EmitScalarConversion(Src.first, SrcTy, DstTy, Loc);
1718}
1719
1720Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
1721 return CGF.EmitFromMemory(CGF.CGM.EmitNullConstant(Ty), Ty);
1722}
1723
1724/// Emit a sanitization check for the given "binary" operation (which
1725/// might actually be a unary increment which has been lowered to a binary
1726/// operation). The check passes if all values in \p Checks (which are \c i1),
1727/// are \c true.
1728void ScalarExprEmitter::EmitBinOpCheck(
1729 ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
1730 assert(CGF.IsSanitizerScope);
1731 SanitizerHandler Check;
1734
1735 BinaryOperatorKind Opcode = Info.Opcode;
1738
1739 StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
1740 const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
1741 if (UO && UO->getOpcode() == UO_Minus) {
1742 Check = SanitizerHandler::NegateOverflow;
1743 StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
1744 DynamicData.push_back(Info.RHS);
1745 } else {
1746 if (BinaryOperator::isShiftOp(Opcode)) {
1747 // Shift LHS negative or too large, or RHS out of bounds.
1748 Check = SanitizerHandler::ShiftOutOfBounds;
1749 const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
1750 StaticData.push_back(
1751 CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
1752 StaticData.push_back(
1753 CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
1754 } else if (Opcode == BO_Div || Opcode == BO_Rem) {
1755 // Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
1756 Check = SanitizerHandler::DivremOverflow;
1757 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1758 } else {
1759 // Arithmetic overflow (+, -, *).
1760 switch (Opcode) {
1761 case BO_Add: Check = SanitizerHandler::AddOverflow; break;
1762 case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
1763 case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
1764 default: llvm_unreachable("unexpected opcode for bin op check");
1765 }
1766 StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
1767 }
1768 DynamicData.push_back(Info.LHS);
1769 DynamicData.push_back(Info.RHS);
1770 }
1771
1772 CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
1773}
1774
1775//===----------------------------------------------------------------------===//
1776// Visitor Methods
1777//===----------------------------------------------------------------------===//
1778
1779Value *ScalarExprEmitter::VisitExpr(Expr *E) {
1780 CGF.ErrorUnsupported(E, "scalar expression");
1781 if (E->getType()->isVoidType())
1782 return nullptr;
1783 return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
1784}
1785
1786Value *
1787ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
1788 ASTContext &Context = CGF.getContext();
1789 unsigned AddrSpace =
1791 llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
1792 E->ComputeName(Context), "__usn_str", AddrSpace);
1793
1794 llvm::Type *ExprTy = ConvertType(E->getType());
1795 return Builder.CreatePointerBitCastOrAddrSpaceCast(GlobalConstStr, ExprTy,
1796 "usn_addr_cast");
1797}
1798
1799Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
1800 // Vector Mask Case
1801 if (E->getNumSubExprs() == 2) {
1802 Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
1803 Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
1804 Value *Mask;
1805
1806 auto *LTy = cast<llvm::FixedVectorType>(LHS->getType());
1807 unsigned LHSElts = LTy->getNumElements();
1808
1809 Mask = RHS;
1810
1811 auto *MTy = cast<llvm::FixedVectorType>(Mask->getType());
1812
1813 // Mask off the high bits of each shuffle index.
1814 Value *MaskBits =
1815 llvm::ConstantInt::get(MTy, llvm::NextPowerOf2(LHSElts - 1) - 1);
1816 Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
1817
1818 // newv = undef
1819 // mask = mask & maskbits
1820 // for each elt
1821 // n = extract mask i
1822 // x = extract val n
1823 // newv = insert newv, x, i
1824 auto *RTy = llvm::FixedVectorType::get(LTy->getElementType(),
1825 MTy->getNumElements());
1826 Value* NewV = llvm::PoisonValue::get(RTy);
1827 for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
1828 Value *IIndx = llvm::ConstantInt::get(CGF.SizeTy, i);
1829 Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
1830
1831 Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
1832 NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
1833 }
1834 return NewV;
1835 }
1836
1837 Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
1838 Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
1839
1840 SmallVector<int, 32> Indices;
1841 for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
1842 llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
1843 // Check for -1 and output it as undef in the IR.
1844 if (Idx.isSigned() && Idx.isAllOnes())
1845 Indices.push_back(-1);
1846 else
1847 Indices.push_back(Idx.getZExtValue());
1848 }
1849
1850 return Builder.CreateShuffleVector(V1, V2, Indices, "shuffle");
1851}
1852
1853Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
1854 QualType SrcType = E->getSrcExpr()->getType(),
1855 DstType = E->getType();
1856
1857 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
1858
1859 SrcType = CGF.getContext().getCanonicalType(SrcType);
1860 DstType = CGF.getContext().getCanonicalType(DstType);
1861 if (SrcType == DstType) return Src;
1862
1863 assert(SrcType->isVectorType() &&
1864 "ConvertVector source type must be a vector");
1865 assert(DstType->isVectorType() &&
1866 "ConvertVector destination type must be a vector");
1867
1868 llvm::Type *SrcTy = Src->getType();
1869 llvm::Type *DstTy = ConvertType(DstType);
1870
1871 // Ignore conversions like int -> uint.
1872 if (SrcTy == DstTy)
1873 return Src;
1874
1875 QualType SrcEltType = SrcType->castAs<VectorType>()->getElementType(),
1876 DstEltType = DstType->castAs<VectorType>()->getElementType();
1877
1878 assert(SrcTy->isVectorTy() &&
1879 "ConvertVector source IR type must be a vector");
1880 assert(DstTy->isVectorTy() &&
1881 "ConvertVector destination IR type must be a vector");
1882
1883 llvm::Type *SrcEltTy = cast<llvm::VectorType>(SrcTy)->getElementType(),
1884 *DstEltTy = cast<llvm::VectorType>(DstTy)->getElementType();
1885
1886 if (DstEltType->isBooleanType()) {
1887 assert((SrcEltTy->isFloatingPointTy() ||
1888 isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
1889
1890 llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
1891 if (SrcEltTy->isFloatingPointTy()) {
1892 return Builder.CreateFCmpUNE(Src, Zero, "tobool");
1893 } else {
1894 return Builder.CreateICmpNE(Src, Zero, "tobool");
1895 }
1896 }
1897
1898 // We have the arithmetic types: real int/float.
1899 Value *Res = nullptr;
1900
1901 if (isa<llvm::IntegerType>(SrcEltTy)) {
1902 bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
1903 if (isa<llvm::IntegerType>(DstEltTy))
1904 Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
1905 else if (InputSigned)
1906 Res = Builder.CreateSIToFP(Src, DstTy, "conv");
1907 else
1908 Res = Builder.CreateUIToFP(Src, DstTy, "conv");
1909 } else if (isa<llvm::IntegerType>(DstEltTy)) {
1910 assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
1911 if (DstEltType->isSignedIntegerOrEnumerationType())
1912 Res = Builder.CreateFPToSI(Src, DstTy, "conv");
1913 else
1914 Res = Builder.CreateFPToUI(Src, DstTy, "conv");
1915 } else {
1916 assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
1917 "Unknown real conversion");
1918 if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
1919 Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
1920 else
1921 Res = Builder.CreateFPExt(Src, DstTy, "conv");
1922 }
1923
1924 return Res;
1925}
1926
1927Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
1928 if (CodeGenFunction::ConstantEmission Constant = CGF.tryEmitAsConstant(E)) {
1929 CGF.EmitIgnoredExpr(E->getBase());
1930 return CGF.emitScalarConstant(Constant, E);
1931 } else {
1934 llvm::APSInt Value = Result.Val.getInt();
1935 CGF.EmitIgnoredExpr(E->getBase());
1936 return Builder.getInt(Value);
1937 }
1938 }
1939
1940 return EmitLoadOfLValue(E);
1941}
1942
1943Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
1944 TestAndClearIgnoreResultAssign();
1945
1946 // Emit subscript expressions in rvalue context's. For most cases, this just
1947 // loads the lvalue formed by the subscript expr. However, we have to be
1948 // careful, because the base of a vector subscript is occasionally an rvalue,
1949 // so we can't get it as an lvalue.
1950 if (!E->getBase()->getType()->isVectorType() &&
1952 return EmitLoadOfLValue(E);
1953
1954 // Handle the vector case. The base must be a vector, the index must be an
1955 // integer value.
1956 Value *Base = Visit(E->getBase());
1957 Value *Idx = Visit(E->getIdx());
1958 QualType IdxTy = E->getIdx()->getType();
1959
1960 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
1961 CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
1962
1963 return Builder.CreateExtractElement(Base, Idx, "vecext");
1964}
1965
1966Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
1967 TestAndClearIgnoreResultAssign();
1968
1969 // Handle the vector case. The base must be a vector, the index must be an
1970 // integer value.
1971 Value *RowIdx = Visit(E->getRowIdx());
1972 Value *ColumnIdx = Visit(E->getColumnIdx());
1973
1974 const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
1975 unsigned NumRows = MatrixTy->getNumRows();
1976 llvm::MatrixBuilder MB(Builder);
1977 Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
1978 if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
1979 MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
1980
1981 Value *Matrix = Visit(E->getBase());
1982
1983 // TODO: Should we emit bounds checks with SanitizerKind::ArrayBounds?
1984 return Builder.CreateExtractElement(Matrix, Idx, "matrixext");
1985}
1986
1987static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
1988 unsigned Off) {
1989 int MV = SVI->getMaskValue(Idx);
1990 if (MV == -1)
1991 return -1;
1992 return Off + MV;
1993}
1994
1995static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty) {
1996 assert(llvm::ConstantInt::isValueValidForType(I32Ty, C->getZExtValue()) &&
1997 "Index operand too large for shufflevector mask!");
1998 return C->getZExtValue();
1999}
2000
2001Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
2002 bool Ignore = TestAndClearIgnoreResultAssign();
2003 (void)Ignore;
2004 assert (Ignore == false && "init list ignored");
2005 unsigned NumInitElements = E->getNumInits();
2006
2007 if (E->hadArrayRangeDesignator())
2008 CGF.ErrorUnsupported(E, "GNU array range designator extension");
2009
2010 llvm::VectorType *VType =
2011 dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
2012
2013 if (!VType) {
2014 if (NumInitElements == 0) {
2015 // C++11 value-initialization for the scalar.
2016 return EmitNullValue(E->getType());
2017 }
2018 // We have a scalar in braces. Just use the first element.
2019 return Visit(E->getInit(0));
2020 }
2021
2022 if (isa<llvm::ScalableVectorType>(VType)) {
2023 if (NumInitElements == 0) {
2024 // C++11 value-initialization for the vector.
2025 return EmitNullValue(E->getType());
2026 }
2027
2028 if (NumInitElements == 1) {
2029 Expr *InitVector = E->getInit(0);
2030
2031 // Initialize from another scalable vector of the same type.
2032 if (InitVector->getType() == E->getType())
2033 return Visit(InitVector);
2034 }
2035
2036 llvm_unreachable("Unexpected initialization of a scalable vector!");
2037 }
2038
2039 unsigned ResElts = cast<llvm::FixedVectorType>(VType)->getNumElements();
2040
2041 // Loop over initializers collecting the Value for each, and remembering
2042 // whether the source was swizzle (ExtVectorElementExpr). This will allow
2043 // us to fold the shuffle for the swizzle into the shuffle for the vector
2044 // initializer, since LLVM optimizers generally do not want to touch
2045 // shuffles.
2046 unsigned CurIdx = 0;
2047 bool VIsPoisonShuffle = false;
2048 llvm::Value *V = llvm::PoisonValue::get(VType);
2049 for (unsigned i = 0; i != NumInitElements; ++i) {
2050 Expr *IE = E->getInit(i);
2051 Value *Init = Visit(IE);
2053
2054 llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
2055
2056 // Handle scalar elements. If the scalar initializer is actually one
2057 // element of a different vector of the same width, use shuffle instead of
2058 // extract+insert.
2059 if (!VVT) {
2060 if (isa<ExtVectorElementExpr>(IE)) {
2061 llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
2062
2063 if (cast<llvm::FixedVectorType>(EI->getVectorOperandType())
2064 ->getNumElements() == ResElts) {
2065 llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
2066 Value *LHS = nullptr, *RHS = nullptr;
2067 if (CurIdx == 0) {
2068 // insert into poison -> shuffle (src, poison)
2069 // shufflemask must use an i32
2070 Args.push_back(getAsInt32(C, CGF.Int32Ty));
2071 Args.resize(ResElts, -1);
2072
2073 LHS = EI->getVectorOperand();
2074 RHS = V;
2075 VIsPoisonShuffle = true;
2076 } else if (VIsPoisonShuffle) {
2077 // insert into poison shuffle && size match -> shuffle (v, src)
2078 llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
2079 for (unsigned j = 0; j != CurIdx; ++j)
2080 Args.push_back(getMaskElt(SVV, j, 0));
2081 Args.push_back(ResElts + C->getZExtValue());
2082 Args.resize(ResElts, -1);
2083
2084 LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2085 RHS = EI->getVectorOperand();
2086 VIsPoisonShuffle = false;
2087 }
2088 if (!Args.empty()) {
2089 V = Builder.CreateShuffleVector(LHS, RHS, Args);
2090 ++CurIdx;
2091 continue;
2092 }
2093 }
2094 }
2095 V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
2096 "vecinit");
2097 VIsPoisonShuffle = false;
2098 ++CurIdx;
2099 continue;
2100 }
2101
2102 unsigned InitElts = cast<llvm::FixedVectorType>(VVT)->getNumElements();
2103
2104 // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
2105 // input is the same width as the vector being constructed, generate an
2106 // optimized shuffle of the swizzle input into the result.
2107 unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
2108 if (isa<ExtVectorElementExpr>(IE)) {
2109 llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
2110 Value *SVOp = SVI->getOperand(0);
2111 auto *OpTy = cast<llvm::FixedVectorType>(SVOp->getType());
2112
2113 if (OpTy->getNumElements() == ResElts) {
2114 for (unsigned j = 0; j != CurIdx; ++j) {
2115 // If the current vector initializer is a shuffle with poison, merge
2116 // this shuffle directly into it.
2117 if (VIsPoisonShuffle) {
2118 Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0));
2119 } else {
2120 Args.push_back(j);
2121 }
2122 }
2123 for (unsigned j = 0, je = InitElts; j != je; ++j)
2124 Args.push_back(getMaskElt(SVI, j, Offset));
2125 Args.resize(ResElts, -1);
2126
2127 if (VIsPoisonShuffle)
2128 V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
2129
2130 Init = SVOp;
2131 }
2132 }
2133
2134 // Extend init to result vector length, and then shuffle its contribution
2135 // to the vector initializer into V.
2136 if (Args.empty()) {
2137 for (unsigned j = 0; j != InitElts; ++j)
2138 Args.push_back(j);
2139 Args.resize(ResElts, -1);
2140 Init = Builder.CreateShuffleVector(Init, Args, "vext");
2141
2142 Args.clear();
2143 for (unsigned j = 0; j != CurIdx; ++j)
2144 Args.push_back(j);
2145 for (unsigned j = 0; j != InitElts; ++j)
2146 Args.push_back(j + Offset);
2147 Args.resize(ResElts, -1);
2148 }
2149
2150 // If V is poison, make sure it ends up on the RHS of the shuffle to aid
2151 // merging subsequent shuffles into this one.
2152 if (CurIdx == 0)
2153 std::swap(V, Init);
2154 V = Builder.CreateShuffleVector(V, Init, Args, "vecinit");
2155 VIsPoisonShuffle = isa<llvm::PoisonValue>(Init);
2156 CurIdx += InitElts;
2157 }
2158
2159 // FIXME: evaluate codegen vs. shuffling against constant null vector.
2160 // Emit remaining default initializers.
2161 llvm::Type *EltTy = VType->getElementType();
2162
2163 // Emit remaining default initializers
2164 for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
2165 Value *Idx = Builder.getInt32(CurIdx);
2166 llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
2167 V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
2168 }
2169 return V;
2170}
2171
2173 const Expr *E = CE->getSubExpr();
2174
2175 if (CE->getCastKind() == CK_UncheckedDerivedToBase)
2176 return false;
2177
2178 if (isa<CXXThisExpr>(E->IgnoreParens())) {
2179 // We always assume that 'this' is never null.
2180 return false;
2181 }
2182
2183 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2184 // And that glvalue casts are never null.
2185 if (ICE->isGLValue())
2186 return false;
2187 }
2188
2189 return true;
2190}
2191
2192// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
2193// have to handle a more broad range of conversions than explicit casts, as they
2194// handle things like function to ptr-to-function decay etc.
2195Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
2196 Expr *E = CE->getSubExpr();
2197 QualType DestTy = CE->getType();
2198 CastKind Kind = CE->getCastKind();
2199 CodeGenFunction::CGFPOptionsRAII FPOptions(CGF, CE);
2200
2201 // These cases are generally not written to ignore the result of
2202 // evaluating their sub-expressions, so we clear this now.
2203 bool Ignored = TestAndClearIgnoreResultAssign();
2204
2205 // Since almost all cast kinds apply to scalars, this switch doesn't have
2206 // a default case, so the compiler will warn on a missing case. The cases
2207 // are in the same order as in the CastKind enum.
2208 switch (Kind) {
2209 case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
2210 case CK_BuiltinFnToFnPtr:
2211 llvm_unreachable("builtin functions are handled elsewhere");
2212
2213 case CK_LValueBitCast:
2214 case CK_ObjCObjectLValueCast: {
2215 Address Addr = EmitLValue(E).getAddress(CGF);
2216 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2217 LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
2218 return EmitLoadOfLValue(LV, CE->getExprLoc());
2219 }
2220
2221 case CK_LValueToRValueBitCast: {
2222 LValue SourceLVal = CGF.EmitLValue(E);
2223 Address Addr = SourceLVal.getAddress(CGF).withElementType(
2224 CGF.ConvertTypeForMem(DestTy));
2225 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2227 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2228 }
2229
2230 case CK_CPointerToObjCPointerCast:
2231 case CK_BlockPointerToObjCPointerCast:
2232 case CK_AnyPointerToBlockPointerCast:
2233 case CK_BitCast: {
2234 Value *Src = Visit(const_cast<Expr*>(E));
2235 llvm::Type *SrcTy = Src->getType();
2236 llvm::Type *DstTy = ConvertType(DestTy);
2237 assert(
2238 (!SrcTy->isPtrOrPtrVectorTy() || !DstTy->isPtrOrPtrVectorTy() ||
2239 SrcTy->getPointerAddressSpace() == DstTy->getPointerAddressSpace()) &&
2240 "Address-space cast must be used to convert address spaces");
2241
2242 if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
2243 if (auto *PT = DestTy->getAs<PointerType>()) {
2245 PT->getPointeeType(),
2246 Address(Src,
2249 CGF.getPointerAlign()),
2250 /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
2251 CE->getBeginLoc());
2252 }
2253 }
2254
2255 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2256 const QualType SrcType = E->getType();
2257
2258 if (SrcType.mayBeNotDynamicClass() && DestTy.mayBeDynamicClass()) {
2259 // Casting to pointer that could carry dynamic information (provided by
2260 // invariant.group) requires launder.
2261 Src = Builder.CreateLaunderInvariantGroup(Src);
2262 } else if (SrcType.mayBeDynamicClass() && DestTy.mayBeNotDynamicClass()) {
2263 // Casting to pointer that does not carry dynamic information (provided
2264 // by invariant.group) requires stripping it. Note that we don't do it
2265 // if the source could not be dynamic type and destination could be
2266 // dynamic because dynamic information is already laundered. It is
2267 // because launder(strip(src)) == launder(src), so there is no need to
2268 // add extra strip before launder.
2269 Src = Builder.CreateStripInvariantGroup(Src);
2270 }
2271 }
2272
2273 // Update heapallocsite metadata when there is an explicit pointer cast.
2274 if (auto *CI = dyn_cast<llvm::CallBase>(Src)) {
2275 if (CI->getMetadata("heapallocsite") && isa<ExplicitCastExpr>(CE) &&
2276 !isa<CastExpr>(E)) {
2277 QualType PointeeType = DestTy->getPointeeType();
2278 if (!PointeeType.isNull())
2279 CGF.getDebugInfo()->addHeapAllocSiteMetadata(CI, PointeeType,
2280 CE->getExprLoc());
2281 }
2282 }
2283
2284 // If Src is a fixed vector and Dst is a scalable vector, and both have the
2285 // same element type, use the llvm.vector.insert intrinsic to perform the
2286 // bitcast.
2287 if (auto *FixedSrcTy = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
2288 if (auto *ScalableDstTy = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
2289 // If we are casting a fixed i8 vector to a scalable i1 predicate
2290 // vector, use a vector insert and bitcast the result.
2291 if (ScalableDstTy->getElementType()->isIntegerTy(1) &&
2292 ScalableDstTy->getElementCount().isKnownMultipleOf(8) &&
2293 FixedSrcTy->getElementType()->isIntegerTy(8)) {
2294 ScalableDstTy = llvm::ScalableVectorType::get(
2295 FixedSrcTy->getElementType(),
2296 ScalableDstTy->getElementCount().getKnownMinValue() / 8);
2297 }
2298 if (FixedSrcTy->getElementType() == ScalableDstTy->getElementType()) {
2299 llvm::Value *UndefVec = llvm::UndefValue::get(ScalableDstTy);
2300 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2301 llvm::Value *Result = Builder.CreateInsertVector(
2302 ScalableDstTy, UndefVec, Src, Zero, "cast.scalable");
2303 if (Result->getType() != DstTy)
2304 Result = Builder.CreateBitCast(Result, DstTy);
2305 return Result;
2306 }
2307 }
2308 }
2309
2310 // If Src is a scalable vector and Dst is a fixed vector, and both have the
2311 // same element type, use the llvm.vector.extract intrinsic to perform the
2312 // bitcast.
2313 if (auto *ScalableSrcTy = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
2314 if (auto *FixedDstTy = dyn_cast<llvm::FixedVectorType>(DstTy)) {
2315 // If we are casting a scalable i1 predicate vector to a fixed i8
2316 // vector, bitcast the source and use a vector extract.
2317 if (ScalableSrcTy->getElementType()->isIntegerTy(1) &&
2318 ScalableSrcTy->getElementCount().isKnownMultipleOf(8) &&
2319 FixedDstTy->getElementType()->isIntegerTy(8)) {
2320 ScalableSrcTy = llvm::ScalableVectorType::get(
2321 FixedDstTy->getElementType(),
2322 ScalableSrcTy->getElementCount().getKnownMinValue() / 8);
2323 Src = Builder.CreateBitCast(Src, ScalableSrcTy);
2324 }
2325 if (ScalableSrcTy->getElementType() == FixedDstTy->getElementType()) {
2326 llvm::Value *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
2327 return Builder.CreateExtractVector(DstTy, Src, Zero, "cast.fixed");
2328 }
2329 }
2330 }
2331
2332 // Perform VLAT <-> VLST bitcast through memory.
2333 // TODO: since the llvm.vector.{insert,extract} intrinsics
2334 // require the element types of the vectors to be the same, we
2335 // need to keep this around for bitcasts between VLAT <-> VLST where
2336 // the element types of the vectors are not the same, until we figure
2337 // out a better way of doing these casts.
2338 if ((isa<llvm::FixedVectorType>(SrcTy) &&
2339 isa<llvm::ScalableVectorType>(DstTy)) ||
2340 (isa<llvm::ScalableVectorType>(SrcTy) &&
2341 isa<llvm::FixedVectorType>(DstTy))) {
2342 Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
2343 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
2344 CGF.EmitStoreOfScalar(Src, LV);
2345 Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
2346 LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
2348 return EmitLoadOfLValue(DestLV, CE->getExprLoc());
2349 }
2350 return Builder.CreateBitCast(Src, DstTy);
2351 }
2352 case CK_AddressSpaceConversion: {
2354 if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
2355 Result.Val.isNullPointer()) {
2356 // If E has side effect, it is emitted even if its final result is a
2357 // null pointer. In that case, a DCE pass should be able to
2358 // eliminate the useless instructions emitted during translating E.
2359 if (Result.HasSideEffects)
2360 Visit(E);
2361 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
2362 ConvertType(DestTy)), DestTy);
2363 }
2364 // Since target may map different address spaces in AST to the same address
2365 // space, an address space conversion may end up as a bitcast.
2367 CGF, Visit(E), E->getType()->getPointeeType().getAddressSpace(),
2368 DestTy->getPointeeType().getAddressSpace(), ConvertType(DestTy));
2369 }
2370 case CK_AtomicToNonAtomic:
2371 case CK_NonAtomicToAtomic:
2372 case CK_UserDefinedConversion:
2373 return Visit(const_cast<Expr*>(E));
2374
2375 case CK_NoOp: {
2376 return CE->changesVolatileQualification() ? EmitLoadOfLValue(CE)
2377 : Visit(const_cast<Expr *>(E));
2378 }
2379
2380 case CK_BaseToDerived: {
2381 const CXXRecordDecl *DerivedClassDecl = DestTy->getPointeeCXXRecordDecl();
2382 assert(DerivedClassDecl && "BaseToDerived arg isn't a C++ object pointer!");
2383
2385 Address Derived =
2386 CGF.GetAddressOfDerivedClass(Base, DerivedClassDecl,
2387 CE->path_begin(), CE->path_end(),
2389
2390 // C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
2391 // performed and the object is not of the derived type.
2392 if (CGF.sanitizePerformTypeCheck())
2394 Derived, DestTy->getPointeeType());
2395
2396 if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
2397 CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
2398 /*MayBeNull=*/true,
2400 CE->getBeginLoc());
2401
2402 return CGF.getAsNaturalPointerTo(Derived, CE->getType()->getPointeeType());
2403 }
2404 case CK_UncheckedDerivedToBase:
2405 case CK_DerivedToBase: {
2406 // The EmitPointerWithAlignment path does this fine; just discard
2407 // the alignment.
2409 CE->getType()->getPointeeType());
2410 }
2411
2412 case CK_Dynamic: {
2414 const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
2415 return CGF.EmitDynamicCast(V, DCE);
2416 }
2417
2418 case CK_ArrayToPointerDecay:
2420 CE->getType()->getPointeeType());
2421 case CK_FunctionToPointerDecay:
2422 return EmitLValue(E).getPointer(CGF);
2423
2424 case CK_NullToPointer:
2425 if (MustVisitNullValue(E))
2426 CGF.EmitIgnoredExpr(E);
2427
2428 return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
2429 DestTy);
2430
2431 case CK_NullToMemberPointer: {
2432 if (MustVisitNullValue(E))
2433 CGF.EmitIgnoredExpr(E);
2434
2435 const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
2436 return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
2437 }
2438
2439 case CK_ReinterpretMemberPointer:
2440 case CK_BaseToDerivedMemberPointer:
2441 case CK_DerivedToBaseMemberPointer: {
2442 Value *Src = Visit(E);
2443
2444 // Note that the AST doesn't distinguish between checked and
2445 // unchecked member pointer conversions, so we always have to
2446 // implement checked conversions here. This is inefficient when
2447 // actual control flow may be required in order to perform the
2448 // check, which it is for data member pointers (but not member
2449 // function pointers on Itanium and ARM).
2450 return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
2451 }
2452
2453 case CK_ARCProduceObject:
2454 return CGF.EmitARCRetainScalarExpr(E);
2455 case CK_ARCConsumeObject:
2456 return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
2457 case CK_ARCReclaimReturnedObject:
2458 return CGF.EmitARCReclaimReturnedObject(E, /*allowUnsafe*/ Ignored);
2459 case CK_ARCExtendBlockObject:
2460 return CGF.EmitARCExtendBlockObject(E);
2461
2462 case CK_CopyAndAutoreleaseBlockObject:
2463 return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
2464
2465 case CK_FloatingRealToComplex:
2466 case CK_FloatingComplexCast:
2467 case CK_IntegralRealToComplex:
2468 case CK_IntegralComplexCast:
2469 case CK_IntegralComplexToFloatingComplex:
2470 case CK_FloatingComplexToIntegralComplex:
2471 case CK_ConstructorConversion:
2472 case CK_ToUnion:
2473 case CK_HLSLArrayRValue:
2474 llvm_unreachable("scalar cast to non-scalar value");
2475
2476 case CK_LValueToRValue:
2477 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
2478 assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
2479 return Visit(const_cast<Expr*>(E));
2480
2481 case CK_IntegralToPointer: {
2482 Value *Src = Visit(const_cast<Expr*>(E));
2483
2484 // First, convert to the correct width so that we control the kind of
2485 // extension.
2486 auto DestLLVMTy = ConvertType(DestTy);
2487 llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
2488 bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
2489 llvm::Value* IntResult =
2490 Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
2491
2492 auto *IntToPtr = Builder.CreateIntToPtr(IntResult, DestLLVMTy);
2493
2494 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2495 // Going from integer to pointer that could be dynamic requires reloading
2496 // dynamic information from invariant.group.
2497 if (DestTy.mayBeDynamicClass())
2498 IntToPtr = Builder.CreateLaunderInvariantGroup(IntToPtr);
2499 }
2500 return IntToPtr;
2501 }
2502 case CK_PointerToIntegral: {
2503 assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
2504 auto *PtrExpr = Visit(E);
2505
2506 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
2507 const QualType SrcType = E->getType();
2508
2509 // Casting to integer requires stripping dynamic information as it does
2510 // not carries it.
2511 if (SrcType.mayBeDynamicClass())
2512 PtrExpr = Builder.CreateStripInvariantGroup(PtrExpr);
2513 }
2514
2515 return Builder.CreatePtrToInt(PtrExpr, ConvertType(DestTy));
2516 }
2517 case CK_ToVoid: {
2518 CGF.EmitIgnoredExpr(E);
2519 return nullptr;
2520 }
2521 case CK_MatrixCast: {
2522 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2523 CE->getExprLoc());
2524 }
2525 case CK_VectorSplat: {
2526 llvm::Type *DstTy = ConvertType(DestTy);
2527 Value *Elt = Visit(const_cast<Expr *>(E));
2528 // Splat the element across to all elements
2529 llvm::ElementCount NumElements =
2530 cast<llvm::VectorType>(DstTy)->getElementCount();
2531 return Builder.CreateVectorSplat(NumElements, Elt, "splat");
2532 }
2533
2534 case CK_FixedPointCast:
2535 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2536 CE->getExprLoc());
2537
2538 case CK_FixedPointToBoolean:
2539 assert(E->getType()->isFixedPointType() &&
2540 "Expected src type to be fixed point type");
2541 assert(DestTy->isBooleanType() && "Expected dest type to be boolean type");
2542 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2543 CE->getExprLoc());
2544
2545 case CK_FixedPointToIntegral:
2546 assert(E->getType()->isFixedPointType() &&
2547 "Expected src type to be fixed point type");
2548 assert(DestTy->isIntegerType() && "Expected dest type to be an integer");
2549 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2550 CE->getExprLoc());
2551
2552 case CK_IntegralToFixedPoint:
2553 assert(E->getType()->isIntegerType() &&
2554 "Expected src type to be an integer");
2555 assert(DestTy->isFixedPointType() &&
2556 "Expected dest type to be fixed point type");
2557 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2558 CE->getExprLoc());
2559
2560 case CK_IntegralCast: {
2561 if (E->getType()->isExtVectorType() && DestTy->isExtVectorType()) {
2562 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2563 return Builder.CreateIntCast(Visit(E), ConvertType(DestTy),
2565 "conv");
2566 }
2567 ScalarConversionOpts Opts;
2568 if (auto *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
2569 if (!ICE->isPartOfExplicitCast())
2570 Opts = ScalarConversionOpts(CGF.SanOpts);
2571 }
2572 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2573 CE->getExprLoc(), Opts);
2574 }
2575 case CK_IntegralToFloating: {
2576 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2577 // TODO: Support constrained FP intrinsics.
2578 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2579 if (SrcElTy->isSignedIntegerOrEnumerationType())
2580 return Builder.CreateSIToFP(Visit(E), ConvertType(DestTy), "conv");
2581 return Builder.CreateUIToFP(Visit(E), ConvertType(DestTy), "conv");
2582 }
2583 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2584 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2585 CE->getExprLoc());
2586 }
2587 case CK_FloatingToIntegral: {
2588 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2589 // TODO: Support constrained FP intrinsics.
2590 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2591 if (DstElTy->isSignedIntegerOrEnumerationType())
2592 return Builder.CreateFPToSI(Visit(E), ConvertType(DestTy), "conv");
2593 return Builder.CreateFPToUI(Visit(E), ConvertType(DestTy), "conv");
2594 }
2595 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2596 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2597 CE->getExprLoc());
2598 }
2599 case CK_FloatingCast: {
2600 if (E->getType()->isVectorType() && DestTy->isVectorType()) {
2601 // TODO: Support constrained FP intrinsics.
2602 QualType SrcElTy = E->getType()->castAs<VectorType>()->getElementType();
2603 QualType DstElTy = DestTy->castAs<VectorType>()->getElementType();
2604 if (DstElTy->castAs<BuiltinType>()->getKind() <
2605 SrcElTy->castAs<BuiltinType>()->getKind())
2606 return Builder.CreateFPTrunc(Visit(E), ConvertType(DestTy), "conv");
2607 return Builder.CreateFPExt(Visit(E), ConvertType(DestTy), "conv");
2608 }
2609 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2610 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2611 CE->getExprLoc());
2612 }
2613 case CK_FixedPointToFloating:
2614 case CK_FloatingToFixedPoint: {
2615 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2616 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2617 CE->getExprLoc());
2618 }
2619 case CK_BooleanToSignedIntegral: {
2620 ScalarConversionOpts Opts;
2621 Opts.TreatBooleanAsSigned = true;
2622 return EmitScalarConversion(Visit(E), E->getType(), DestTy,
2623 CE->getExprLoc(), Opts);
2624 }
2625 case CK_IntegralToBoolean:
2626 return EmitIntToBoolConversion(Visit(E));
2627 case CK_PointerToBoolean:
2628 return EmitPointerToBoolConversion(Visit(E), E->getType());
2629 case CK_FloatingToBoolean: {
2630 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, CE);
2631 return EmitFloatToBoolConversion(Visit(E));
2632 }
2633 case CK_MemberPointerToBoolean: {
2634 llvm::Value *MemPtr = Visit(E);
2635 const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
2636 return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
2637 }
2638
2639 case CK_FloatingComplexToReal:
2640 case CK_IntegralComplexToReal:
2641 return CGF.EmitComplexExpr(E, false, true).first;
2642
2643 case CK_FloatingComplexToBoolean:
2644 case CK_IntegralComplexToBoolean: {
2646
2647 // TODO: kill this function off, inline appropriate case here
2648 return EmitComplexToScalarConversion(V, E->getType(), DestTy,
2649 CE->getExprLoc());
2650 }
2651
2652 case CK_ZeroToOCLOpaqueType: {
2653 assert((DestTy->isEventT() || DestTy->isQueueT() ||
2654 DestTy->isOCLIntelSubgroupAVCType()) &&
2655 "CK_ZeroToOCLEvent cast on non-event type");
2656 return llvm::Constant::getNullValue(ConvertType(DestTy));
2657 }
2658
2659 case CK_IntToOCLSampler:
2660 return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
2661
2662 case CK_HLSLVectorTruncation: {
2663 assert(DestTy->isVectorType() && "Expected dest type to be vector type");
2664 Value *Vec = Visit(const_cast<Expr *>(E));
2666 unsigned NumElts = DestTy->castAs<VectorType>()->getNumElements();
2667 for (unsigned I = 0; I != NumElts; ++I)
2668 Mask.push_back(I);
2669
2670 return Builder.CreateShuffleVector(Vec, Mask, "trunc");
2671 }
2672
2673 } // end of switch
2674
2675 llvm_unreachable("unknown scalar cast");
2676}
2677
2678Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
2679 CodeGenFunction::StmtExprEvaluation eval(CGF);
2680 Address RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
2681 !E->getType()->isVoidType());
2682 if (!RetAlloca.isValid())
2683 return nullptr;
2684 return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
2685 E->getExprLoc());
2686}
2687
2688Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
2689 CodeGenFunction::RunCleanupsScope Scope(CGF);
2690 Value *V = Visit(E->getSubExpr());
2691 // Defend against dominance problems caused by jumps out of expression
2692 // evaluation through the shared cleanup block.
2693 Scope.ForceCleanup({&V});
2694 return V;
2695}
2696
2697//===----------------------------------------------------------------------===//
2698// Unary Operators
2699//===----------------------------------------------------------------------===//
2700
2702 llvm::Value *InVal, bool IsInc,
2703 FPOptions FPFeatures) {
2704 BinOpInfo BinOp;
2705 BinOp.LHS = InVal;
2706 BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
2707 BinOp.Ty = E->getType();
2708 BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
2709 BinOp.FPFeatures = FPFeatures;
2710 BinOp.E = E;
2711 return BinOp;
2712}
2713
2714llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
2715 const UnaryOperator *E, llvm::Value *InVal, bool IsInc) {
2716 llvm::Value *Amount =
2717 llvm::ConstantInt::get(InVal->getType(), IsInc ? 1 : -1, true);
2718 StringRef Name = IsInc ? "inc" : "dec";
2719 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
2721 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2722 return Builder.CreateAdd(InVal, Amount, Name);
2723 [[fallthrough]];
2725 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
2726 return Builder.CreateNSWAdd(InVal, Amount, Name);
2727 [[fallthrough]];
2729 if (!E->canOverflow())
2730 return Builder.CreateNSWAdd(InVal, Amount, Name);
2731 return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2732 E, InVal, IsInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2733 }
2734 llvm_unreachable("Unknown SignedOverflowBehaviorTy");
2735}
2736
2737namespace {
2738/// Handles check and update for lastprivate conditional variables.
2739class OMPLastprivateConditionalUpdateRAII {
2740private:
2741 CodeGenFunction &CGF;
2742 const UnaryOperator *E;
2743
2744public:
2745 OMPLastprivateConditionalUpdateRAII(CodeGenFunction &CGF,
2746 const UnaryOperator *E)
2747 : CGF(CGF), E(E) {}
2748 ~OMPLastprivateConditionalUpdateRAII() {
2749 if (CGF.getLangOpts().OpenMP)
2751 CGF, E->getSubExpr());
2752 }
2753};
2754} // namespace
2755
2756llvm::Value *
2757ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
2758 bool isInc, bool isPre) {
2759 OMPLastprivateConditionalUpdateRAII OMPRegion(CGF, E);
2760 QualType type = E->getSubExpr()->getType();
2761 llvm::PHINode *atomicPHI = nullptr;
2762 llvm::Value *value;
2763 llvm::Value *input;
2764 llvm::Value *Previous = nullptr;
2765 QualType SrcType = E->getType();
2766
2767 int amount = (isInc ? 1 : -1);
2768 bool isSubtraction = !isInc;
2769
2770 if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
2771 type = atomicTy->getValueType();
2772 if (isInc && type->isBooleanType()) {
2773 llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
2774 if (isPre) {
2775 Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
2776 ->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
2777 return Builder.getTrue();
2778 }
2779 // For atomic bool increment, we just store true and return it for
2780 // preincrement, do an atomic swap with true for postincrement
2781 return Builder.CreateAtomicRMW(
2782 llvm::AtomicRMWInst::Xchg, LV.getAddress(CGF), True,
2783 llvm::AtomicOrdering::SequentiallyConsistent);
2784 }
2785 // Special case for atomic increment / decrement on integers, emit
2786 // atomicrmw instructions. We skip this if we want to be doing overflow
2787 // checking, and fall into the slow path with the atomic cmpxchg loop.
2788 if (!type->isBooleanType() && type->isIntegerType() &&
2789 !(type->isUnsignedIntegerType() &&
2790 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
2791 CGF.getLangOpts().getSignedOverflowBehavior() !=
2793 llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
2794 llvm::AtomicRMWInst::Sub;
2795 llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
2796 llvm::Instruction::Sub;
2797 llvm::Value *amt = CGF.EmitToMemory(
2798 llvm::ConstantInt::get(ConvertType(type), 1, true), type);
2799 llvm::Value *old =
2800 Builder.CreateAtomicRMW(aop, LV.getAddress(CGF), amt,
2801 llvm::AtomicOrdering::SequentiallyConsistent);
2802 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2803 }
2804 // Special case for atomic increment/decrement on floats
2805 if (type->isFloatingType()) {
2806 llvm::AtomicRMWInst::BinOp aop =
2807 isInc ? llvm::AtomicRMWInst::FAdd : llvm::AtomicRMWInst::FSub;
2808 llvm::Instruction::BinaryOps op =
2809 isInc ? llvm::Instruction::FAdd : llvm::Instruction::FSub;
2810 llvm::Value *amt = llvm::ConstantFP::get(
2811 VMContext, llvm::APFloat(static_cast<float>(1.0)));
2812 llvm::Value *old =
2813 Builder.CreateAtomicRMW(aop, LV.getAddress(CGF), amt,
2814 llvm::AtomicOrdering::SequentiallyConsistent);
2815 return isPre ? Builder.CreateBinOp(op, old, amt) : old;
2816 }
2817 value = EmitLoadOfLValue(LV, E->getExprLoc());
2818 input = value;
2819 // For every other atomic operation, we need to emit a load-op-cmpxchg loop
2820 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
2821 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
2822 value = CGF.EmitToMemory(value, type);
2823 Builder.CreateBr(opBB);
2824 Builder.SetInsertPoint(opBB);
2825 atomicPHI = Builder.CreatePHI(value->getType(), 2);
2826 atomicPHI->addIncoming(value, startBB);
2827 value = atomicPHI;
2828 } else {
2829 value = EmitLoadOfLValue(LV, E->getExprLoc());
2830 input = value;
2831 }
2832
2833 // Special case of integer increment that we have to check first: bool++.
2834 // Due to promotion rules, we get:
2835 // bool++ -> bool = bool + 1
2836 // -> bool = (int)bool + 1
2837 // -> bool = ((int)bool + 1 != 0)
2838 // An interesting aspect of this is that increment is always true.
2839 // Decrement does not have this property.
2840 if (isInc && type->isBooleanType()) {
2841 value = Builder.getTrue();
2842
2843 // Most common case by far: integer increment.
2844 } else if (type->isIntegerType()) {
2845 QualType promotedType;
2846 bool canPerformLossyDemotionCheck = false;
2848 promotedType = CGF.getContext().getPromotedIntegerType(type);
2849 assert(promotedType != type && "Shouldn't promote to the same type.");
2850 canPerformLossyDemotionCheck = true;
2851 canPerformLossyDemotionCheck &=
2853 CGF.getContext().getCanonicalType(promotedType);
2854 canPerformLossyDemotionCheck &=
2856 type, promotedType);
2857 assert((!canPerformLossyDemotionCheck ||
2858 type->isSignedIntegerOrEnumerationType() ||
2859 promotedType->isSignedIntegerOrEnumerationType() ||
2860 ConvertType(type)->getScalarSizeInBits() ==
2861 ConvertType(promotedType)->getScalarSizeInBits()) &&
2862 "The following check expects that if we do promotion to different "
2863 "underlying canonical type, at least one of the types (either "
2864 "base or promoted) will be signed, or the bitwidths will match.");
2865 }
2866 if (CGF.SanOpts.hasOneOf(
2867 SanitizerKind::ImplicitIntegerArithmeticValueChange |
2868 SanitizerKind::ImplicitBitfieldConversion) &&
2869 canPerformLossyDemotionCheck) {
2870 // While `x += 1` (for `x` with width less than int) is modeled as
2871 // promotion+arithmetics+demotion, and we can catch lossy demotion with
2872 // ease; inc/dec with width less than int can't overflow because of
2873 // promotion rules, so we omit promotion+demotion, which means that we can
2874 // not catch lossy "demotion". Because we still want to catch these cases
2875 // when the sanitizer is enabled, we perform the promotion, then perform
2876 // the increment/decrement in the wider type, and finally
2877 // perform the demotion. This will catch lossy demotions.
2878
2879 // We have a special case for bitfields defined using all the bits of the
2880 // type. In this case we need to do the same trick as for the integer
2881 // sanitizer checks, i.e., promotion -> increment/decrement -> demotion.
2882
2883 value = EmitScalarConversion(value, type, promotedType, E->getExprLoc());
2884 Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2885 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2886 // Do pass non-default ScalarConversionOpts so that sanitizer check is
2887 // emitted if LV is not a bitfield, otherwise the bitfield sanitizer
2888 // checks will take care of the conversion.
2889 ScalarConversionOpts Opts;
2890 if (!LV.isBitField())
2891 Opts = ScalarConversionOpts(CGF.SanOpts);
2892 else if (CGF.SanOpts.has(SanitizerKind::ImplicitBitfieldConversion)) {
2893 Previous = value;
2894 SrcType = promotedType;
2895 }
2896
2897 value = EmitScalarConversion(value, promotedType, type, E->getExprLoc(),
2898 Opts);
2899
2900 // Note that signed integer inc/dec with width less than int can't
2901 // overflow because of promotion rules; we're just eliding a few steps
2902 // here.
2903 } else if (E->canOverflow() && type->isSignedIntegerOrEnumerationType()) {
2904 value = EmitIncDecConsiderOverflowBehavior(E, value, isInc);
2905 } else if (E->canOverflow() && type->isUnsignedIntegerType() &&
2906 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
2907 value = EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(
2908 E, value, isInc, E->getFPFeaturesInEffect(CGF.getLangOpts())));
2909 } else {
2910 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount, true);
2911 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2912 }
2913
2914 // Next most common: pointer increment.
2915 } else if (const PointerType *ptr = type->getAs<PointerType>()) {
2916 QualType type = ptr->getPointeeType();
2917
2918 // VLA types don't have constant size.
2919 if (const VariableArrayType *vla
2921 llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
2922 if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
2923 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
2925 value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
2926 else
2927 value = CGF.EmitCheckedInBoundsGEP(
2928 elemTy, value, numElts, /*SignedIndices=*/false, isSubtraction,
2929 E->getExprLoc(), "vla.inc");
2930
2931 // Arithmetic on function pointers (!) is just +-1.
2932 } else if (type->isFunctionType()) {
2933 llvm::Value *amt = Builder.getInt32(amount);
2934
2936 value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
2937 else
2938 value =
2939 CGF.EmitCheckedInBoundsGEP(CGF.Int8Ty, value, amt,
2940 /*SignedIndices=*/false, isSubtraction,
2941 E->getExprLoc(), "incdec.funcptr");
2942
2943 // For everything else, we can just do a simple increment.
2944 } else {
2945 llvm::Value *amt = Builder.getInt32(amount);
2946 llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
2948 value = Builder.CreateGEP(elemTy, value, amt, "incdec.ptr");
2949 else
2950 value = CGF.EmitCheckedInBoundsGEP(
2951 elemTy, value, amt, /*SignedIndices=*/false, isSubtraction,
2952 E->getExprLoc(), "incdec.ptr");
2953 }
2954
2955 // Vector increment/decrement.
2956 } else if (type->isVectorType()) {
2957 if (type->hasIntegerRepresentation()) {
2958 llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
2959
2960 value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
2961 } else {
2962 value = Builder.CreateFAdd(
2963 value,
2964 llvm::ConstantFP::get(value->getType(), amount),
2965 isInc ? "inc" : "dec");
2966 }
2967
2968 // Floating point.
2969 } else if (type->isRealFloatingType()) {
2970 // Add the inc/dec to the real part.
2971 llvm::Value *amt;
2972 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
2973
2974 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
2975 // Another special case: half FP increment should be done via float
2977 value = Builder.CreateCall(
2978 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
2979 CGF.CGM.FloatTy),
2980 input, "incdec.conv");
2981 } else {
2982 value = Builder.CreateFPExt(input, CGF.CGM.FloatTy, "incdec.conv");
2983 }
2984 }
2985
2986 if (value->getType()->isFloatTy())
2987 amt = llvm::ConstantFP::get(VMContext,
2988 llvm::APFloat(static_cast<float>(amount)));
2989 else if (value->getType()->isDoubleTy())
2990 amt = llvm::ConstantFP::get(VMContext,
2991 llvm::APFloat(static_cast<double>(amount)));
2992 else {
2993 // Remaining types are Half, Bfloat16, LongDouble, __ibm128 or __float128.
2994 // Convert from float.
2995 llvm::APFloat F(static_cast<float>(amount));
2996 bool ignored;
2997 const llvm::fltSemantics *FS;
2998 // Don't use getFloatTypeSemantics because Half isn't
2999 // necessarily represented using the "half" LLVM type.
3000 if (value->getType()->isFP128Ty())
3001 FS = &CGF.getTarget().getFloat128Format();
3002 else if (value->getType()->isHalfTy())
3003 FS = &CGF.getTarget().getHalfFormat();
3004 else if (value->getType()->isBFloatTy())
3005 FS = &CGF.getTarget().getBFloat16Format();
3006 else if (value->getType()->isPPC_FP128Ty())
3007 FS = &CGF.getTarget().getIbm128Format();
3008 else
3009 FS = &CGF.getTarget().getLongDoubleFormat();
3010 F.convert(*FS, llvm::APFloat::rmTowardZero, &ignored);
3011 amt = llvm::ConstantFP::get(VMContext, F);
3012 }
3013 value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
3014
3015 if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
3017 value = Builder.CreateCall(
3018 CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
3019 CGF.CGM.FloatTy),
3020 value, "incdec.conv");
3021 } else {
3022 value = Builder.CreateFPTrunc(value, input->getType(), "incdec.conv");
3023 }
3024 }
3025
3026 // Fixed-point types.
3027 } else if (type->isFixedPointType()) {
3028 // Fixed-point types are tricky. In some cases, it isn't possible to
3029 // represent a 1 or a -1 in the type at all. Piggyback off of
3030 // EmitFixedPointBinOp to avoid having to reimplement saturation.
3031 BinOpInfo Info;
3032 Info.E = E;
3033 Info.Ty = E->getType();
3034 Info.Opcode = isInc ? BO_Add : BO_Sub;
3035 Info.LHS = value;
3036 Info.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
3037 // If the type is signed, it's better to represent this as +(-1) or -(-1),
3038 // since -1 is guaranteed to be representable.
3039 if (type->isSignedFixedPointType()) {
3040 Info.Opcode = isInc ? BO_Sub : BO_Add;
3041 Info.RHS = Builder.CreateNeg(Info.RHS);
3042 }
3043 // Now, convert from our invented integer literal to the type of the unary
3044 // op. This will upscale and saturate if necessary. This value can become
3045 // undef in some cases.
3046 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
3047 auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
3048 Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
3049 value = EmitFixedPointBinOp(Info);
3050
3051 // Objective-C pointer types.
3052 } else {
3053 const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
3054
3056 if (!isInc) size = -size;
3057 llvm::Value *sizeValue =
3058 llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
3059
3061 value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
3062 else
3063 value = CGF.EmitCheckedInBoundsGEP(
3064 CGF.Int8Ty, value, sizeValue, /*SignedIndices=*/false, isSubtraction,
3065 E->getExprLoc(), "incdec.objptr");
3066 value = Builder.CreateBitCast(value, input->getType());
3067 }
3068
3069 if (atomicPHI) {
3070 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3071 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3072 auto Pair = CGF.EmitAtomicCompareExchange(
3073 LV, RValue::get(atomicPHI), RValue::get(value), E->getExprLoc());
3074 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), type);
3075 llvm::Value *success = Pair.second;
3076 atomicPHI->addIncoming(old, curBlock);
3077 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3078 Builder.SetInsertPoint(contBB);
3079 return isPre ? value : input;
3080 }
3081
3082 // Store the updated result through the lvalue.
3083 if (LV.isBitField()) {
3084 Value *Src = Previous ? Previous : value;
3085 CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
3086 CGF.EmitBitfieldConversionCheck(Src, SrcType, value, E->getType(),
3087 LV.getBitFieldInfo(), E->getExprLoc());
3088 } else
3089 CGF.EmitStoreThroughLValue(RValue::get(value), LV);
3090
3091 // If this is a postinc, return the value read from memory, otherwise use the
3092 // updated value.
3093 return isPre ? value : input;
3094}
3095
3096
3097Value *ScalarExprEmitter::VisitUnaryPlus(const UnaryOperator *E,
3098 QualType PromotionType) {
3099 QualType promotionTy = PromotionType.isNull()
3100 ? getPromotionType(E->getSubExpr()->getType())
3101 : PromotionType;
3102 Value *result = VisitPlus(E, promotionTy);
3103 if (result && !promotionTy.isNull())
3104 result = EmitUnPromotedValue(result, E->getType());
3105 return result;
3106}
3107
3108Value *ScalarExprEmitter::VisitPlus(const UnaryOperator *E,
3109 QualType PromotionType) {
3110 // This differs from gcc, though, most likely due to a bug in gcc.
3111 TestAndClearIgnoreResultAssign();
3112 if (!PromotionType.isNull())
3113 return CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3114 return Visit(E->getSubExpr());
3115}
3116
3117Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E,
3118 QualType PromotionType) {
3119 QualType promotionTy = PromotionType.isNull()
3120 ? getPromotionType(E->getSubExpr()->getType())
3121 : PromotionType;
3122 Value *result = VisitMinus(E, promotionTy);
3123 if (result && !promotionTy.isNull())
3124 result = EmitUnPromotedValue(result, E->getType());
3125 return result;
3126}
3127
3128Value *ScalarExprEmitter::VisitMinus(const UnaryOperator *E,
3129 QualType PromotionType) {
3130 TestAndClearIgnoreResultAssign();
3131 Value *Op;
3132 if (!PromotionType.isNull())
3133 Op = CGF.EmitPromotedScalarExpr(E->getSubExpr(), PromotionType);
3134 else
3135 Op = Visit(E->getSubExpr());
3136
3137 // Generate a unary FNeg for FP ops.
3138 if (Op->getType()->isFPOrFPVectorTy())
3139 return Builder.CreateFNeg(Op, "fneg");
3140
3141 // Emit unary minus with EmitSub so we handle overflow cases etc.
3142 BinOpInfo BinOp;
3143 BinOp.RHS = Op;
3144 BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
3145 BinOp.Ty = E->getType();
3146 BinOp.Opcode = BO_Sub;
3147 BinOp.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3148 BinOp.E = E;
3149 return EmitSub(BinOp);
3150}
3151
3152Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
3153 TestAndClearIgnoreResultAssign();
3154 Value *Op = Visit(E->getSubExpr());
3155 return Builder.CreateNot(Op, "not");
3156}
3157
3158Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
3159 // Perform vector logical not on comparison with zero vector.
3160 if (E->getType()->isVectorType() &&
3161 E->getType()->castAs<VectorType>()->getVectorKind() ==
3163 Value *Oper = Visit(E->getSubExpr());
3164 Value *Zero = llvm::Constant::getNullValue(Oper->getType());
3165 Value *Result;
3166 if (Oper->getType()->isFPOrFPVectorTy()) {
3167 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
3168 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
3169 Result = Builder.CreateFCmp(llvm::CmpInst::FCMP_OEQ, Oper, Zero, "cmp");
3170 } else
3171 Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
3172 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
3173 }
3174
3175 // Compare operand to zero.
3176 Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
3177
3178 // Invert value.
3179 // TODO: Could dynamically modify easy computations here. For example, if
3180 // the operand is an icmp ne, turn into icmp eq.
3181 BoolVal = Builder.CreateNot(BoolVal, "lnot");
3182
3183 // ZExt result to the expr type.
3184 return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
3185}
3186
3187Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
3188 // Try folding the offsetof to a constant.
3189 Expr::EvalResult EVResult;
3190 if (E->EvaluateAsInt(EVResult, CGF.getContext())) {
3191 llvm::APSInt Value = EVResult.Val.getInt();
3192 return Builder.getInt(Value);
3193 }
3194
3195 // Loop over the components of the offsetof to compute the value.
3196 unsigned n = E->getNumComponents();
3197 llvm::Type* ResultType = ConvertType(E->getType());
3198 llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
3199 QualType CurrentType = E->getTypeSourceInfo()->getType();
3200 for (unsigned i = 0; i != n; ++i) {
3201 OffsetOfNode ON = E->getComponent(i);
3202 llvm::Value *Offset = nullptr;
3203 switch (ON.getKind()) {
3204 case OffsetOfNode::Array: {
3205 // Compute the index
3206 Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
3207 llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
3208 bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
3209 Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
3210
3211 // Save the element type
3212 CurrentType =
3213 CGF.getContext().getAsArrayType(CurrentType)->getElementType();
3214
3215 // Compute the element size
3216 llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
3217 CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
3218
3219 // Multiply out to compute the result
3220 Offset = Builder.CreateMul(Idx, ElemSize);
3221 break;
3222 }
3223
3224 case OffsetOfNode::Field: {
3225 FieldDecl *MemberDecl = ON.getField();
3226 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3227 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3228
3229 // Compute the index of the field in its parent.
3230 unsigned i = 0;
3231 // FIXME: It would be nice if we didn't have to loop here!
3232 for (RecordDecl::field_iterator Field = RD->field_begin(),
3233 FieldEnd = RD->field_end();
3234 Field != FieldEnd; ++Field, ++i) {
3235 if (*Field == MemberDecl)
3236 break;
3237 }
3238 assert(i < RL.getFieldCount() && "offsetof field in wrong type");
3239
3240 // Compute the offset to the field
3241 int64_t OffsetInt = RL.getFieldOffset(i) /
3242 CGF.getContext().getCharWidth();
3243 Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
3244
3245 // Save the element type.
3246 CurrentType = MemberDecl->getType();
3247 break;
3248 }
3249
3251 llvm_unreachable("dependent __builtin_offsetof");
3252
3253 case OffsetOfNode::Base: {
3254 if (ON.getBase()->isVirtual()) {
3255 CGF.ErrorUnsupported(E, "virtual base in offsetof");
3256 continue;
3257 }
3258
3259 RecordDecl *RD = CurrentType->castAs<RecordType>()->getDecl();
3260 const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
3261
3262 // Save the element type.
3263 CurrentType = ON.getBase()->getType();
3264
3265 // Compute the offset to the base.
3266 auto *BaseRT = CurrentType->castAs<RecordType>();
3267 auto *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
3268 CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
3269 Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
3270 break;
3271 }
3272 }
3273 Result = Builder.CreateAdd(Result, Offset);
3274 }
3275 return Result;
3276}
3277
3278/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
3279/// argument of the sizeof expression as an integer.
3280Value *
3281ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
3282 const UnaryExprOrTypeTraitExpr *E) {
3283 QualType TypeToSize = E->getTypeOfArgument();
3284 if (auto Kind = E->getKind();
3285 Kind == UETT_SizeOf || Kind == UETT_DataSizeOf) {
3286 if (const VariableArrayType *VAT =
3287 CGF.getContext().getAsVariableArrayType(TypeToSize)) {
3288 if (E->isArgumentType()) {
3289 // sizeof(type) - make sure to emit the VLA size.
3290 CGF.EmitVariablyModifiedType(TypeToSize);
3291 } else {
3292 // C99 6.5.3.4p2: If the argument is an expression of type
3293 // VLA, it is evaluated.
3295 }
3296
3297 auto VlaSize = CGF.getVLASize(VAT);
3298 llvm::Value *size = VlaSize.NumElts;
3299
3300 // Scale the number of non-VLA elements by the non-VLA element size.
3301 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(VlaSize.Type);
3302 if (!eltSize.isOne())
3303 size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), size);
3304
3305 return size;
3306 }
3307 } else if (E->getKind() == UETT_OpenMPRequiredSimdAlign) {
3308 auto Alignment =
3309 CGF.getContext()
3312 .getQuantity();
3313 return llvm::ConstantInt::get(CGF.SizeTy, Alignment);
3314 } else if (E->getKind() == UETT_VectorElements) {
3315 auto *VecTy = cast<llvm::VectorType>(ConvertType(E->getTypeOfArgument()));
3316 return Builder.CreateElementCount(CGF.SizeTy, VecTy->getElementCount());
3317 }
3318
3319 // If this isn't sizeof(vla), the result must be constant; use the constant
3320 // folding logic so we don't have to duplicate it here.
3321 return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
3322}
3323
3324Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E,
3325 QualType PromotionType) {
3326 QualType promotionTy = PromotionType.isNull()
3327 ? getPromotionType(E->getSubExpr()->getType())
3328 : PromotionType;
3329 Value *result = VisitReal(E, promotionTy);
3330 if (result && !promotionTy.isNull())
3331 result = EmitUnPromotedValue(result, E->getType());
3332 return result;
3333}
3334
3335Value *ScalarExprEmitter::VisitReal(const UnaryOperator *E,
3336 QualType PromotionType) {
3337 Expr *Op = E->getSubExpr();
3338 if (Op->getType()->isAnyComplexType()) {
3339 // If it's an l-value, load through the appropriate subobject l-value.
3340 // Note that we have to ask E because Op might be an l-value that
3341 // this won't work for, e.g. an Obj-C property.
3342 if (E->isGLValue()) {
3343 if (!PromotionType.isNull()) {
3345 Op, /*IgnoreReal*/ IgnoreResultAssign, /*IgnoreImag*/ true);
3346 if (result.first)
3347 result.first = CGF.EmitPromotedValue(result, PromotionType).first;
3348 return result.first;
3349 } else {
3350 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3351 .getScalarVal();
3352 }
3353 }
3354 // Otherwise, calculate and project.
3355 return CGF.EmitComplexExpr(Op, false, true).first;
3356 }
3357
3358 if (!PromotionType.isNull())
3359 return CGF.EmitPromotedScalarExpr(Op, PromotionType);
3360 return Visit(Op);
3361}
3362
3363Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E,
3364 QualType PromotionType) {
3365 QualType promotionTy = PromotionType.isNull()
3366 ? getPromotionType(E->getSubExpr()->getType())
3367 : PromotionType;
3368 Value *result = VisitImag(E, promotionTy);
3369 if (result && !promotionTy.isNull())
3370 result = EmitUnPromotedValue(result, E->getType());
3371 return result;
3372}
3373
3374Value *ScalarExprEmitter::VisitImag(const UnaryOperator *E,
3375 QualType PromotionType) {
3376 Expr *Op = E->getSubExpr();
3377 if (Op->getType()->isAnyComplexType()) {
3378 // If it's an l-value, load through the appropriate subobject l-value.
3379 // Note that we have to ask E because Op might be an l-value that
3380 // this won't work for, e.g. an Obj-C property.
3381 if (Op->isGLValue()) {
3382 if (!PromotionType.isNull()) {
3384 Op, /*IgnoreReal*/ true, /*IgnoreImag*/ IgnoreResultAssign);
3385 if (result.second)
3386 result.second = CGF.EmitPromotedValue(result, PromotionType).second;
3387 return result.second;
3388 } else {
3389 return CGF.EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc())
3390 .getScalarVal();
3391 }
3392 }
3393 // Otherwise, calculate and project.
3394 return CGF.EmitComplexExpr(Op, true, false).second;
3395 }
3396
3397 // __imag on a scalar returns zero. Emit the subexpr to ensure side
3398 // effects are evaluated, but not the actual value.
3399 if (Op->isGLValue())
3400 CGF.EmitLValue(Op);
3401 else if (!PromotionType.isNull())
3402 CGF.EmitPromotedScalarExpr(Op, PromotionType);
3403 else
3404 CGF.EmitScalarExpr(Op, true);
3405 if (!PromotionType.isNull())
3406 return llvm::Constant::getNullValue(ConvertType(PromotionType));
3407 return llvm::Constant::getNullValue(ConvertType(E->getType()));
3408}
3409
3410//===----------------------------------------------------------------------===//
3411// Binary Operators
3412//===----------------------------------------------------------------------===//
3413
3414Value *ScalarExprEmitter::EmitPromotedValue(Value *result,
3415 QualType PromotionType) {
3416 return CGF.Builder.CreateFPExt(result, ConvertType(PromotionType), "ext");
3417}
3418
3419Value *ScalarExprEmitter::EmitUnPromotedValue(Value *result,
3420 QualType ExprType) {
3421 return CGF.Builder.CreateFPTrunc(result, ConvertType(ExprType), "unpromotion");
3422}
3423
3424Value *ScalarExprEmitter::EmitPromoted(const Expr *E, QualType PromotionType) {
3425 E = E->IgnoreParens();
3426 if (auto BO = dyn_cast<BinaryOperator>(E)) {
3427 switch (BO->getOpcode()) {
3428#define HANDLE_BINOP(OP) \
3429 case BO_##OP: \
3430 return Emit##OP(EmitBinOps(BO, PromotionType));
3431 HANDLE_BINOP(Add)
3432 HANDLE_BINOP(Sub)
3433 HANDLE_BINOP(Mul)
3434 HANDLE_BINOP(Div)
3435#undef HANDLE_BINOP
3436 default:
3437 break;
3438 }
3439 } else if (auto UO = dyn_cast<UnaryOperator>(E)) {
3440 switch (UO->getOpcode()) {
3441 case UO_Imag:
3442 return VisitImag(UO, PromotionType);
3443 case UO_Real:
3444 return VisitReal(UO, PromotionType);
3445 case UO_Minus:
3446 return VisitMinus(UO, PromotionType);
3447 case UO_Plus:
3448 return VisitPlus(UO, PromotionType);
3449 default:
3450 break;
3451 }
3452 }
3453 auto result = Visit(const_cast<Expr *>(E));
3454 if (result) {
3455 if (!PromotionType.isNull())
3456 return EmitPromotedValue(result, PromotionType);
3457 else
3458 return EmitUnPromotedValue(result, E->getType());
3459 }
3460 return result;
3461}
3462
3463BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E,
3464 QualType PromotionType) {
3465 TestAndClearIgnoreResultAssign();
3466 BinOpInfo Result;
3467 Result.LHS = CGF.EmitPromotedScalarExpr(E->getLHS(), PromotionType);
3468 Result.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionType);
3469 if (!PromotionType.isNull())
3470 Result.Ty = PromotionType;
3471 else
3472 Result.Ty = E->getType();
3473 Result.Opcode = E->getOpcode();
3474 Result.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3475 Result.E = E;
3476 return Result;
3477}
3478
3479LValue ScalarExprEmitter::EmitCompoundAssignLValue(
3480 const CompoundAssignOperator *E,
3481 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
3482 Value *&Result) {
3483 QualType LHSTy = E->getLHS()->getType();
3484 BinOpInfo OpInfo;
3485
3488
3489 // Emit the RHS first. __block variables need to have the rhs evaluated
3490 // first, plus this should improve codegen a little.
3491
3492 QualType PromotionTypeCR;
3493 PromotionTypeCR = getPromotionType(E->getComputationResultType());
3494 if (PromotionTypeCR.isNull())
3495 PromotionTypeCR = E->getComputationResultType();
3496 QualType PromotionTypeLHS = getPromotionType(E->getComputationLHSType());
3497 QualType PromotionTypeRHS = getPromotionType(E->getRHS()->getType());
3498 if (!PromotionTypeRHS.isNull())
3499 OpInfo.RHS = CGF.EmitPromotedScalarExpr(E->getRHS(), PromotionTypeRHS);
3500 else
3501 OpInfo.RHS = Visit(E->getRHS());
3502 OpInfo.Ty = PromotionTypeCR;
3503 OpInfo.Opcode = E->getOpcode();
3504 OpInfo.FPFeatures = E->getFPFeaturesInEffect(CGF.getLangOpts());
3505 OpInfo.E = E;
3506 // Load/convert the LHS.
3507 LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
3508
3509 llvm::PHINode *atomicPHI = nullptr;
3510 if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
3511 QualType type = atomicTy->getValueType();
3512 if (!type->isBooleanType() && type->isIntegerType() &&
3513 !(type->isUnsignedIntegerType() &&
3514 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
3515 CGF.getLangOpts().getSignedOverflowBehavior() !=
3517 llvm::AtomicRMWInst::BinOp AtomicOp = llvm::AtomicRMWInst::BAD_BINOP;
3518 llvm::Instruction::BinaryOps Op;
3519 switch (OpInfo.Opcode) {
3520 // We don't have atomicrmw operands for *, %, /, <<, >>
3521 case BO_MulAssign: case BO_DivAssign:
3522 case BO_RemAssign:
3523 case BO_ShlAssign:
3524 case BO_ShrAssign:
3525 break;
3526 case BO_AddAssign:
3527 AtomicOp = llvm::AtomicRMWInst::Add;
3528 Op = llvm::Instruction::Add;
3529 break;
3530 case BO_SubAssign:
3531 AtomicOp = llvm::AtomicRMWInst::Sub;
3532 Op = llvm::Instruction::Sub;
3533 break;
3534 case BO_AndAssign:
3535 AtomicOp = llvm::AtomicRMWInst::And;
3536 Op = llvm::Instruction::And;
3537 break;
3538 case BO_XorAssign:
3539 AtomicOp = llvm::AtomicRMWInst::Xor;
3540 Op = llvm::Instruction::Xor;
3541 break;
3542 case BO_OrAssign:
3543 AtomicOp = llvm::AtomicRMWInst::Or;
3544 Op = llvm::Instruction::Or;
3545 break;
3546 default:
3547 llvm_unreachable("Invalid compound assignment type");
3548 }
3549 if (AtomicOp != llvm::AtomicRMWInst::BAD_BINOP) {
3550 llvm::Value *Amt = CGF.EmitToMemory(
3551 EmitScalarConversion(OpInfo.RHS, E->getRHS()->getType(), LHSTy,
3552 E->getExprLoc()),
3553 LHSTy);
3554 Value *OldVal = Builder.CreateAtomicRMW(
3555 AtomicOp, LHSLV.getAddress(CGF), Amt,
3556 llvm::AtomicOrdering::SequentiallyConsistent);
3557
3558 // Since operation is atomic, the result type is guaranteed to be the
3559 // same as the input in LLVM terms.
3560 Result = Builder.CreateBinOp(Op, OldVal, Amt);
3561 return LHSLV;
3562 }
3563 }
3564 // FIXME: For floating point types, we should be saving and restoring the
3565 // floating point environment in the loop.
3566 llvm::BasicBlock *startBB = Builder.GetInsertBlock();
3567 llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
3568 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3569 OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
3570 Builder.CreateBr(opBB);
3571 Builder.SetInsertPoint(opBB);
3572 atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
3573 atomicPHI->addIncoming(OpInfo.LHS, startBB);
3574 OpInfo.LHS = atomicPHI;
3575 }
3576 else
3577 OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
3578
3579 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, OpInfo.FPFeatures);
3581 if (!PromotionTypeLHS.isNull())
3582 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, PromotionTypeLHS,
3583 E->getExprLoc());
3584 else
3585 OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
3587
3588 // Expand the binary operator.
3589 Result = (this->*Func)(OpInfo);
3590
3591 // Convert the result back to the LHS type,
3592 // potentially with Implicit Conversion sanitizer check.
3593 // If LHSLV is a bitfield, use default ScalarConversionOpts
3594 // to avoid emit any implicit integer checks.
3595 Value *Previous = nullptr;
3596 if (LHSLV.isBitField()) {
3597 Previous = Result;
3598 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc);
3599 } else
3600 Result = EmitScalarConversion(Result, PromotionTypeCR, LHSTy, Loc,
3601 ScalarConversionOpts(CGF.SanOpts));
3602
3603 if (atomicPHI) {
3604 llvm::BasicBlock *curBlock = Builder.GetInsertBlock();
3605 llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
3606 auto Pair = CGF.EmitAtomicCompareExchange(
3607 LHSLV, RValue::get(atomicPHI), RValue::get(Result), E->getExprLoc());
3608 llvm::Value *old = CGF.EmitToMemory(Pair.first.getScalarVal(), LHSTy);
3609 llvm::Value *success = Pair.second;
3610 atomicPHI->addIncoming(old, curBlock);
3611 Builder.CreateCondBr(success, contBB, atomicPHI->getParent());
3612 Builder.SetInsertPoint(contBB);
3613 return LHSLV;
3614 }
3615
3616 // Store the result value into the LHS lvalue. Bit-fields are handled
3617 // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
3618 // 'An assignment expression has the value of the left operand after the
3619 // assignment...'.
3620 if (LHSLV.isBitField()) {
3621 Value *Src = Previous ? Previous : Result;
3622 QualType SrcType = E->getRHS()->getType();
3623 QualType DstType = E->getLHS()->getType();
3625 CGF.EmitBitfieldConversionCheck(Src, SrcType, Result, DstType,
3626 LHSLV.getBitFieldInfo(), E->getExprLoc());
3627 } else
3629
3630 if (CGF.getLangOpts().OpenMP)
3632 E->getLHS());
3633 return LHSLV;
3634}
3635
3636Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
3637 Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
3638 bool Ignore = TestAndClearIgnoreResultAssign();
3639 Value *RHS = nullptr;
3640 LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
3641
3642 // If the result is clearly ignored, return now.
3643 if (Ignore)
3644 return nullptr;
3645
3646 // The result of an assignment in C is the assigned r-value.
3647 if (!CGF.getLangOpts().CPlusPlus)
3648 return RHS;
3649
3650 // If the lvalue is non-volatile, return the computed value of the assignment.
3651 if (!LHS.isVolatileQualified())
3652 return RHS;
3653
3654 // Otherwise, reload the value.
3655 return EmitLoadOfLValue(LHS, E->getExprLoc());
3656}
3657
3658void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
3659 const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
3661
3662 if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
3663 Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
3664 SanitizerKind::IntegerDivideByZero));
3665 }
3666
3667 const auto *BO = cast<BinaryOperator>(Ops.E);
3668 if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
3669 Ops.Ty->hasSignedIntegerRepresentation() &&
3670 !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS()) &&
3671 Ops.mayHaveIntegerOverflow()) {
3672 llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
3673
3674 llvm::Value *IntMin =
3675 Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
3676 llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
3677
3678 llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
3679 llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
3680 llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
3681 Checks.push_back(
3682 std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
3683 }
3684
3685 if (Checks.size() > 0)
3686 EmitBinOpCheck(Checks, Ops);
3687}
3688
3689Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
3690 {
3691 CodeGenFunction::SanitizerScope SanScope(&CGF);
3692 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3693 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3694 Ops.Ty->isIntegerType() &&
3695 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3696 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3697 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
3698 } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
3699 Ops.Ty->isRealFloatingType() &&
3700 Ops.mayHaveFloatDivisionByZero()) {
3701 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3702 llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
3703 EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
3704 Ops);
3705 }
3706 }
3707
3708 if (Ops.Ty->isConstantMatrixType()) {
3709 llvm::MatrixBuilder MB(Builder);
3710 // We need to check the types of the operands of the operator to get the
3711 // correct matrix dimensions.
3712 auto *BO = cast<BinaryOperator>(Ops.E);
3713 (void)BO;
3714 assert(
3715 isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
3716 "first operand must be a matrix");
3717 assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
3718 "second operand must be an arithmetic type");
3719 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3720 return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
3721 Ops.Ty->hasUnsignedIntegerRepresentation());
3722 }
3723
3724 if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
3725 llvm::Value *Val;
3726 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
3727 Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
3728 CGF.SetDivFPAccuracy(Val);
3729 return Val;
3730 }
3731 else if (Ops.isFixedPointOp())
3732 return EmitFixedPointBinOp(Ops);
3733 else if (Ops.Ty->hasUnsignedIntegerRepresentation())
3734 return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
3735 else
3736 return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
3737}
3738
3739Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
3740 // Rem in C can't be a floating point type: C99 6.5.5p2.
3741 if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
3742 CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
3743 Ops.Ty->isIntegerType() &&
3744 (Ops.mayHaveIntegerDivisionByZero() || Ops.mayHaveIntegerOverflow())) {
3745 CodeGenFunction::SanitizerScope SanScope(&CGF);
3746 llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
3747 EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
3748 }
3749
3750 if (Ops.Ty->hasUnsignedIntegerRepresentation())
3751 return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
3752 else
3753 return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
3754}
3755
3756Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
3757 unsigned IID;
3758 unsigned OpID = 0;
3759 SanitizerHandler OverflowKind;
3760
3761 bool isSigned = Ops.Ty->isSignedIntegerOrEnumerationType();
3762 switch (Ops.Opcode) {
3763 case BO_Add:
3764 case BO_AddAssign:
3765 OpID = 1;
3766 IID = isSigned ? llvm::Intrinsic::sadd_with_overflow :
3767 llvm::Intrinsic::uadd_with_overflow;
3768 OverflowKind = SanitizerHandler::AddOverflow;
3769 break;
3770 case BO_Sub:
3771 case BO_SubAssign:
3772 OpID = 2;
3773 IID = isSigned ? llvm::Intrinsic::ssub_with_overflow :
3774 llvm::Intrinsic::usub_with_overflow;
3775 OverflowKind = SanitizerHandler::SubOverflow;
3776 break;
3777 case BO_Mul:
3778 case BO_MulAssign:
3779 OpID = 3;
3780 IID = isSigned ? llvm::Intrinsic::smul_with_overflow :
3781 llvm::Intrinsic::umul_with_overflow;
3782 OverflowKind = SanitizerHandler::MulOverflow;
3783 break;
3784 default:
3785 llvm_unreachable("Unsupported operation for overflow detection");
3786 }
3787 OpID <<= 1;
3788 if (isSigned)
3789 OpID |= 1;
3790
3791 CodeGenFunction::SanitizerScope SanScope(&CGF);
3792 llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
3793
3794 llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
3795
3796 Value *resultAndOverflow = Builder.CreateCall(intrinsic, {Ops.LHS, Ops.RHS});
3797 Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
3798 Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
3799
3800 // Handle overflow with llvm.trap if no custom handler has been specified.
3801 const std::string *handlerName =
3803 if (handlerName->empty()) {
3804 // If the signed-integer-overflow sanitizer is enabled, emit a call to its
3805 // runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
3806 if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
3807 llvm::Value *NotOverflow = Builder.CreateNot(overflow);
3808 SanitizerMask Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
3809 : SanitizerKind::UnsignedIntegerOverflow;
3810 EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
3811 } else
3812 CGF.EmitTrapCheck(Builder.CreateNot(overflow), OverflowKind);
3813 return result;
3814 }
3815
3816 // Branch in case of overflow.
3817 llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
3818 llvm::BasicBlock *continueBB =
3819 CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
3820 llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
3821
3822 Builder.CreateCondBr(overflow, overflowBB, continueBB);
3823
3824 // If an overflow handler is set, then we want to call it and then use its
3825 // result, if it returns.
3826 Builder.SetInsertPoint(overflowBB);
3827
3828 // Get the overflow handler.
3829 llvm::Type *Int8Ty = CGF.Int8Ty;
3830 llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
3831 llvm::FunctionType *handlerTy =
3832 llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
3833 llvm::FunctionCallee handler =
3834 CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
3835
3836 // Sign extend the args to 64-bit, so that we can use the same handler for
3837 // all types of overflow.
3838 llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
3839 llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
3840
3841 // Call the handler with the two arguments, the operation, and the size of
3842 // the result.
3843 llvm::Value *handlerArgs[] = {
3844 lhs,
3845 rhs,
3846 Builder.getInt8(OpID),
3847 Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth())
3848 };
3849 llvm::Value *handlerResult =
3850 CGF.EmitNounwindRuntimeCall(handler, handlerArgs);
3851
3852 // Truncate the result back to the desired size.
3853 handlerResult = Builder.CreateTrunc(handlerResult, opTy);
3854 Builder.CreateBr(continueBB);
3855
3856 Builder.SetInsertPoint(continueBB);
3857 llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
3858 phi->addIncoming(result, initialBB);
3859 phi->addIncoming(handlerResult, overflowBB);
3860
3861 return phi;
3862}
3863
3864/// Emit pointer + index arithmetic.
3866 const BinOpInfo &op,
3867 bool isSubtraction) {
3868 // Must have binary (not unary) expr here. Unary pointer
3869 // increment/decrement doesn't use this path.
3870 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
3871
3872 Value *pointer = op.LHS;
3873 Expr *pointerOperand = expr->getLHS();
3874 Value *index = op.RHS;
3875 Expr *indexOperand = expr->getRHS();
3876
3877 // In a subtraction, the LHS is always the pointer.
3878 if (!isSubtraction && !pointer->getType()->isPointerTy()) {
3879 std::swap(pointer, index);
3880 std::swap(pointerOperand, indexOperand);
3881 }
3882
3883 bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
3884
3885 unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
3886 auto &DL = CGF.CGM.getDataLayout();
3887 auto PtrTy = cast<llvm::PointerType>(pointer->getType());
3888
3889 // Some versions of glibc and gcc use idioms (particularly in their malloc
3890 // routines) that add a pointer-sized integer (known to be a pointer value)
3891 // to a null pointer in order to cast the value back to an integer or as
3892 // part of a pointer alignment algorithm. This is undefined behavior, but
3893 // we'd like to be able to compile programs that use it.
3894 //
3895 // Normally, we'd generate a GEP with a null-pointer base here in response
3896 // to that code, but it's also UB to dereference a pointer created that
3897 // way. Instead (as an acknowledged hack to tolerate the idiom) we will
3898 // generate a direct cast of the integer value to a pointer.
3899 //
3900 // The idiom (p = nullptr + N) is not met if any of the following are true:
3901 //
3902 // The operation is subtraction.
3903 // The index is not pointer-sized.
3904 // The pointer type is not byte-sized.
3905 //
3907 op.Opcode,
3908 expr->getLHS(),
3909 expr->getRHS()))
3910 return CGF.Builder.CreateIntToPtr(index, pointer->getType());
3911
3912 if (width != DL.getIndexTypeSizeInBits(PtrTy)) {
3913 // Zero-extend or sign-extend the pointer value according to
3914 // whether the index is signed or not.
3915 index = CGF.Builder.CreateIntCast(index, DL.getIndexType(PtrTy), isSigned,
3916 "idx.ext");
3917 }
3918
3919 // If this is subtraction, negate the index.
3920 if (isSubtraction)
3921 index = CGF.Builder.CreateNeg(index, "idx.neg");
3922
3923 if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
3924 CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
3925 /*Accessed*/ false);
3926
3928 = pointerOperand->getType()->getAs<PointerType>();
3929 if (!pointerType) {
3930 QualType objectType = pointerOperand->getType()
3932 ->getPointeeType();
3933 llvm::Value *objectSize
3934 = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
3935
3936 index = CGF.Builder.CreateMul(index, objectSize);
3937
3938 Value *result =
3939 CGF.Builder.CreateGEP(CGF.Int8Ty, pointer, index, "add.ptr");
3940 return CGF.Builder.CreateBitCast(result, pointer->getType());
3941 }
3942
3943 QualType elementType = pointerType->getPointeeType();
3944 if (const VariableArrayType *vla
3945 = CGF.getContext().getAsVariableArrayType(elementType)) {
3946 // The element count here is the total number of non-VLA elements.
3947 llvm::Value *numElements = CGF.getVLASize(vla).NumElts;
3948
3949 // Effectively, the multiply by the VLA size is part of the GEP.
3950 // GEP indexes are signed, and scaling an index isn't permitted to
3951 // signed-overflow, so we use the same semantics for our explicit
3952 // multiply. We suppress this if overflow is not undefined behavior.
3953 llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
3955 index = CGF.Builder.CreateMul(index, numElements, "vla.index");
3956 pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
3957 } else {
3958 index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
3959 pointer = CGF.EmitCheckedInBoundsGEP(
3960 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
3961 "add.ptr");
3962 }
3963 return pointer;
3964 }
3965
3966 // Explicitly handle GNU void* and function pointer arithmetic extensions. The
3967 // GNU void* casts amount to no-ops since our void* type is i8*, but this is
3968 // future proof.
3969 llvm::Type *elemTy;
3970 if (elementType->isVoidType() || elementType->isFunctionType())
3971 elemTy = CGF.Int8Ty;
3972 else
3973 elemTy = CGF.ConvertTypeForMem(elementType);
3974
3976 return CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
3977
3978 return CGF.EmitCheckedInBoundsGEP(
3979 elemTy, pointer, index, isSigned, isSubtraction, op.E->getExprLoc(),
3980 "add.ptr");
3981}
3982
3983// Construct an fmuladd intrinsic to represent a fused mul-add of MulOp and
3984// Addend. Use negMul and negAdd to negate the first operand of the Mul or
3985// the add operand respectively. This allows fmuladd to represent a*b-c, or
3986// c-a*b. Patterns in LLVM should catch the negated forms and translate them to
3987// efficient operations.
3988static Value* buildFMulAdd(llvm::Instruction *MulOp, Value *Addend,
3989 const CodeGenFunction &CGF, CGBuilderTy &Builder,
3990 bool negMul, bool negAdd) {
3991 Value *MulOp0 = MulOp->getOperand(0);
3992 Value *MulOp1 = MulOp->getOperand(1);
3993 if (negMul)
3994 MulOp0 = Builder.CreateFNeg(MulOp0, "neg");
3995 if (negAdd)
3996 Addend = Builder.CreateFNeg(Addend, "neg");
3997
3998 Value *FMulAdd = nullptr;
3999 if (Builder.getIsFPConstrained()) {
4000 assert(isa<llvm::ConstrainedFPIntrinsic>(MulOp) &&
4001 "Only constrained operation should be created when Builder is in FP "
4002 "constrained mode");
4003 FMulAdd = Builder.CreateConstrainedFPCall(
4004 CGF.CGM.getIntrinsic(llvm::Intrinsic::experimental_constrained_fmuladd,
4005 Addend->getType()),
4006 {MulOp0, MulOp1, Addend});
4007 } else {
4008 FMulAdd = Builder.CreateCall(
4009 CGF.CGM.getIntrinsic(llvm::Intrinsic::fmuladd, Addend->getType()),
4010 {MulOp0, MulOp1, Addend});
4011 }
4012 MulOp->eraseFromParent();
4013
4014 return FMulAdd;
4015}
4016
4017// Check whether it would be legal to emit an fmuladd intrinsic call to
4018// represent op and if so, build the fmuladd.
4019//
4020// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
4021// Does NOT check the type of the operation - it's assumed that this function
4022// will be called from contexts where it's known that the type is contractable.
4023static Value* tryEmitFMulAdd(const BinOpInfo &op,
4024 const CodeGenFunction &CGF, CGBuilderTy &Builder,
4025 bool isSub=false) {
4026
4027 assert((op.Opcode == BO_Add || op.Opcode == BO_AddAssign ||
4028 op.Opcode == BO_Sub || op.Opcode == BO_SubAssign) &&
4029 "Only fadd/fsub can be the root of an fmuladd.");
4030
4031 // Check whether this op is marked as fusable.
4032 if (!op.FPFeatures.allowFPContractWithinStatement())
4033 return nullptr;
4034
4035 Value *LHS = op.LHS;
4036 Value *RHS = op.RHS;
4037
4038 // Peek through fneg to look for fmul. Make sure fneg has no users, and that
4039 // it is the only use of its operand.
4040 bool NegLHS = false;
4041 if (auto *LHSUnOp = dyn_cast<llvm::UnaryOperator>(LHS)) {
4042 if (LHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4043 LHSUnOp->use_empty() && LHSUnOp->getOperand(0)->hasOneUse()) {
4044 LHS = LHSUnOp->getOperand(0);
4045 NegLHS = true;
4046 }
4047 }
4048
4049 bool NegRHS = false;
4050 if (auto *RHSUnOp = dyn_cast<llvm::UnaryOperator>(RHS)) {
4051 if (RHSUnOp->getOpcode() == llvm::Instruction::FNeg &&
4052 RHSUnOp->use_empty() && RHSUnOp->getOperand(0)->hasOneUse()) {
4053 RHS = RHSUnOp->getOperand(0);
4054 NegRHS = true;
4055 }
4056 }
4057
4058 // We have a potentially fusable op. Look for a mul on one of the operands.
4059 // Also, make sure that the mul result isn't used directly. In that case,
4060 // there's no point creating a muladd operation.
4061 if (auto *LHSBinOp = dyn_cast<llvm::BinaryOperator>(LHS)) {
4062 if (LHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4063 (LHSBinOp->use_empty() || NegLHS)) {
4064 // If we looked through fneg, erase it.
4065 if (NegLHS)
4066 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4067 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4068 }
4069 }
4070 if (auto *RHSBinOp = dyn_cast<llvm::BinaryOperator>(RHS)) {
4071 if (RHSBinOp->getOpcode() == llvm::Instruction::FMul &&
4072 (RHSBinOp->use_empty() || NegRHS)) {
4073 // If we looked through fneg, erase it.
4074 if (NegRHS)
4075 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4076 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4077 }
4078 }
4079
4080 if (auto *LHSBinOp = dyn_cast<llvm::CallBase>(LHS)) {
4081 if (LHSBinOp->getIntrinsicID() ==
4082 llvm::Intrinsic::experimental_constrained_fmul &&
4083 (LHSBinOp->use_empty() || NegLHS)) {
4084 // If we looked through fneg, erase it.
4085 if (NegLHS)
4086 cast<llvm::Instruction>(op.LHS)->eraseFromParent();
4087 return buildFMulAdd(LHSBinOp, op.RHS, CGF, Builder, NegLHS, isSub);
4088 }
4089 }
4090 if (auto *RHSBinOp = dyn_cast<llvm::CallBase>(RHS)) {
4091 if (RHSBinOp->getIntrinsicID() ==
4092 llvm::Intrinsic::experimental_constrained_fmul &&
4093 (RHSBinOp->use_empty() || NegRHS)) {
4094 // If we looked through fneg, erase it.
4095 if (NegRHS)
4096 cast<llvm::Instruction>(op.RHS)->eraseFromParent();
4097 return buildFMulAdd(RHSBinOp, op.LHS, CGF, Builder, isSub ^ NegRHS, false);
4098 }
4099 }
4100
4101 return nullptr;
4102}
4103
4104Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
4105 if (op.LHS->getType()->isPointerTy() ||
4106 op.RHS->getType()->isPointerTy())
4108
4109 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4110 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4112 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4113 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4114 [[fallthrough]];
4116 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4117 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4118 [[fallthrough]];
4120 if (CanElideOverflowCheck(CGF.getContext(), op))
4121 return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
4122 return EmitOverflowCheckedBinOp(op);
4123 }
4124 }
4125
4126 // For vector and matrix adds, try to fold into a fmuladd.
4127 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4128 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4129 // Try to form an fmuladd.
4130 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
4131 return FMulAdd;
4132 }
4133
4134 if (op.Ty->isConstantMatrixType()) {
4135 llvm::MatrixBuilder MB(Builder);
4136 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4137 return MB.CreateAdd(op.LHS, op.RHS);
4138 }
4139
4140 if (op.Ty->isUnsignedIntegerType() &&
4141 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4142 !CanElideOverflowCheck(CGF.getContext(), op))
4143 return EmitOverflowCheckedBinOp(op);
4144
4145 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4146 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4147 return Builder.CreateFAdd(op.LHS, op.RHS, "add");
4148 }
4149
4150 if (op.isFixedPointOp())
4151 return EmitFixedPointBinOp(op);
4152
4153 return Builder.CreateAdd(op.LHS, op.RHS, "add");
4154}
4155
4156/// The resulting value must be calculated with exact precision, so the operands
4157/// may not be the same type.
4158Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
4159 using llvm::APSInt;
4160 using llvm::ConstantInt;
4161
4162 // This is either a binary operation where at least one of the operands is
4163 // a fixed-point type, or a unary operation where the operand is a fixed-point
4164 // type. The result type of a binary operation is determined by
4165 // Sema::handleFixedPointConversions().
4166 QualType ResultTy = op.Ty;
4167 QualType LHSTy, RHSTy;
4168 if (const auto *BinOp = dyn_cast<BinaryOperator>(op.E)) {
4169 RHSTy = BinOp->getRHS()->getType();
4170 if (const auto *CAO = dyn_cast<CompoundAssignOperator>(BinOp)) {
4171 // For compound assignment, the effective type of the LHS at this point
4172 // is the computation LHS type, not the actual LHS type, and the final
4173 // result type is not the type of the expression but rather the
4174 // computation result type.
4175 LHSTy = CAO->getComputationLHSType();
4176 ResultTy = CAO->getComputationResultType();
4177 } else
4178 LHSTy = BinOp->getLHS()->getType();
4179 } else if (const auto *UnOp = dyn_cast<UnaryOperator>(op.E)) {
4180 LHSTy = UnOp->getSubExpr()->getType();
4181 RHSTy = UnOp->getSubExpr()->getType();
4182 }
4183 ASTContext &Ctx = CGF.getContext();
4184 Value *LHS = op.LHS;
4185 Value *RHS = op.RHS;
4186
4187 auto LHSFixedSema = Ctx.getFixedPointSemantics(LHSTy);
4188 auto RHSFixedSema = Ctx.getFixedPointSemantics(RHSTy);
4189 auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
4190 auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
4191
4192 // Perform the actual operation.
4193 Value *Result;
4194 llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
4195 switch (op.Opcode) {
4196 case BO_AddAssign:
4197 case BO_Add:
4198 Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
4199 break;
4200 case BO_SubAssign:
4201 case BO_Sub:
4202 Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
4203 break;
4204 case BO_MulAssign:
4205 case BO_Mul:
4206 Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
4207 break;
4208 case BO_DivAssign:
4209 case BO_Div:
4210 Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
4211 break;
4212 case BO_ShlAssign:
4213 case BO_Shl:
4214 Result = FPBuilder.CreateShl(LHS, LHSFixedSema, RHS);
4215 break;
4216 case BO_ShrAssign:
4217 case BO_Shr:
4218 Result = FPBuilder.CreateShr(LHS, LHSFixedSema, RHS);
4219 break;
4220 case BO_LT:
4221 return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4222 case BO_GT:
4223 return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
4224 case BO_LE:
4225 return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4226 case BO_GE:
4227 return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4228 case BO_EQ:
4229 // For equality operations, we assume any padding bits on unsigned types are
4230 // zero'd out. They could be overwritten through non-saturating operations
4231 // that cause overflow, but this leads to undefined behavior.
4232 return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
4233 case BO_NE:
4234 return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
4235 case BO_Cmp:
4236 case BO_LAnd:
4237 case BO_LOr:
4238 llvm_unreachable("Found unimplemented fixed point binary operation");
4239 case BO_PtrMemD:
4240 case BO_PtrMemI:
4241 case BO_Rem:
4242 case BO_Xor:
4243 case BO_And:
4244 case BO_Or:
4245 case BO_Assign:
4246 case BO_RemAssign:
4247 case BO_AndAssign:
4248 case BO_XorAssign:
4249 case BO_OrAssign:
4250 case BO_Comma:
4251 llvm_unreachable("Found unsupported binary operation for fixed point types.");
4252 }
4253
4254 bool IsShift = BinaryOperator::isShiftOp(op.Opcode) ||
4256 // Convert to the result type.
4257 return FPBuilder.CreateFixedToFixed(Result, IsShift ? LHSFixedSema
4258 : CommonFixedSema,
4259 ResultFixedSema);
4260}
4261
4262Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
4263 // The LHS is always a pointer if either side is.
4264 if (!op.LHS->getType()->isPointerTy()) {
4265 if (op.Ty->isSignedIntegerOrEnumerationType()) {
4266 switch (CGF.getLangOpts().getSignedOverflowBehavior()) {
4268 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4269 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4270 [[fallthrough]];
4272 if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
4273 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4274 [[fallthrough]];
4276 if (CanElideOverflowCheck(CGF.getContext(), op))
4277 return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
4278 return EmitOverflowCheckedBinOp(op);
4279 }
4280 }
4281
4282 // For vector and matrix subs, try to fold into a fmuladd.
4283 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4284 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4285 // Try to form an fmuladd.
4286 if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
4287 return FMulAdd;
4288 }
4289
4290 if (op.Ty->isConstantMatrixType()) {
4291 llvm::MatrixBuilder MB(Builder);
4292 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4293 return MB.CreateSub(op.LHS, op.RHS);
4294 }
4295
4296 if (op.Ty->isUnsignedIntegerType() &&
4297 CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
4298 !CanElideOverflowCheck(CGF.getContext(), op))
4299 return EmitOverflowCheckedBinOp(op);
4300
4301 if (op.LHS->getType()->isFPOrFPVectorTy()) {
4302 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
4303 return Builder.CreateFSub(op.LHS, op.RHS, "sub");
4304 }
4305
4306 if (op.isFixedPointOp())
4307 return EmitFixedPointBinOp(op);
4308
4309 return Builder.CreateSub(op.LHS, op.RHS, "sub");
4310 }
4311
4312 // If the RHS is not a pointer, then we have normal pointer
4313 // arithmetic.
4314 if (!op.RHS->getType()->isPointerTy())
4316
4317 // Otherwise, this is a pointer subtraction.
4318
4319 // Do the raw subtraction part.
4320 llvm::Value *LHS
4321 = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
4322 llvm::Value *RHS
4323 = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
4324 Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
4325
4326 // Okay, figure out the element size.
4327 const BinaryOperator *expr = cast<BinaryOperator>(op.E);
4328 QualType elementType = expr->getLHS()->getType()->getPointeeType();
4329
4330 llvm::Value *divisor = nullptr;
4331
4332 // For a variable-length array, this is going to be non-constant.
4333 if (const VariableArrayType *vla
4334 = CGF.getContext().getAsVariableArrayType(elementType)) {
4335 auto VlaSize = CGF.getVLASize(vla);
4336 elementType = VlaSize.Type;
4337 divisor = VlaSize.NumElts;
4338
4339 // Scale the number of non-VLA elements by the non-VLA element size.
4340 CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
4341 if (!eltSize.isOne())
4342 divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
4343
4344 // For everything elese, we can just compute it, safe in the
4345 // assumption that Sema won't let anything through that we can't
4346 // safely compute the size of.
4347 } else {
4348 CharUnits elementSize;
4349 // Handle GCC extension for pointer arithmetic on void* and
4350 // function pointer types.
4351 if (elementType->isVoidType() || elementType->isFunctionType())
4352 elementSize = CharUnits::One();
4353 else
4354 elementSize = CGF.getContext().getTypeSizeInChars(elementType);
4355
4356 // Don't even emit the divide for element size of 1.
4357 if (elementSize.isOne())
4358 return diffInChars;
4359
4360 divisor = CGF.CGM.getSize(elementSize);
4361 }
4362
4363 // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
4364 // pointer difference in C is only defined in the case where both operands
4365 // are pointing to elements of an array.
4366 return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
4367}
4368
4369Value *ScalarExprEmitter::GetMaximumShiftAmount(Value *LHS, Value *RHS,
4370 bool RHSIsSigned) {
4371 llvm::IntegerType *Ty;
4372 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4373 Ty = cast<llvm::IntegerType>(VT->getElementType());
4374 else
4375 Ty = cast<llvm::IntegerType>(LHS->getType());
4376 // For a given type of LHS the maximum shift amount is width(LHS)-1, however
4377 // it can occur that width(LHS)-1 > range(RHS). Since there is no check for
4378 // this in ConstantInt::get, this results in the value getting truncated.
4379 // Constrain the return value to be max(RHS) in this case.
4380 llvm::Type *RHSTy = RHS->getType();
4381 llvm::APInt RHSMax =
4382 RHSIsSigned ? llvm::APInt::getSignedMaxValue(RHSTy->getScalarSizeInBits())
4383 : llvm::APInt::getMaxValue(RHSTy->getScalarSizeInBits());
4384 if (RHSMax.ult(Ty->getBitWidth()))
4385 return llvm::ConstantInt::get(RHSTy, RHSMax);
4386 return llvm::ConstantInt::get(RHSTy, Ty->getBitWidth() - 1);
4387}
4388
4389Value *ScalarExprEmitter::ConstrainShiftValue(Value *LHS, Value *RHS,
4390 const Twine &Name) {
4391 llvm::IntegerType *Ty;
4392 if (auto *VT = dyn_cast<llvm::VectorType>(LHS->getType()))
4393 Ty = cast<llvm::IntegerType>(VT->getElementType());
4394 else
4395 Ty = cast<llvm::IntegerType>(LHS->getType());
4396
4397 if (llvm::isPowerOf2_64(Ty->getBitWidth()))
4398 return Builder.CreateAnd(RHS, GetMaximumShiftAmount(LHS, RHS, false), Name);
4399
4400 return Builder.CreateURem(
4401 RHS, llvm::ConstantInt::get(RHS->getType(), Ty->getBitWidth()), Name);
4402}
4403
4404Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
4405 // TODO: This misses out on the sanitizer check below.
4406 if (Ops.isFixedPointOp())
4407 return EmitFixedPointBinOp(Ops);
4408
4409 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4410 // RHS to the same size as the LHS.
4411 Value *RHS = Ops.RHS;
4412 if (Ops.LHS->getType() != RHS->getType())
4413 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4414
4415 bool SanitizeSignedBase = CGF.SanOpts.has(SanitizerKind::ShiftBase) &&
4416 Ops.Ty->hasSignedIntegerRepresentation() &&
4418 !CGF.getLangOpts().CPlusPlus20;
4419 bool SanitizeUnsignedBase =
4420 CGF.SanOpts.has(SanitizerKind::UnsignedShiftBase) &&
4421 Ops.Ty->hasUnsignedIntegerRepresentation();
4422 bool SanitizeBase = SanitizeSignedBase || SanitizeUnsignedBase;
4423 bool SanitizeExponent = CGF.SanOpts.has(SanitizerKind::ShiftExponent);
4424 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4425 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4426 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shl.mask");
4427 else if ((SanitizeBase || SanitizeExponent) &&
4428 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4429 CodeGenFunction::SanitizerScope SanScope(&CGF);
4431 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4432 llvm::Value *WidthMinusOne =
4433 GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned);
4434 llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
4435
4436 if (SanitizeExponent) {
4437 Checks.push_back(
4438 std::make_pair(ValidExponent, SanitizerKind::ShiftExponent));
4439 }
4440
4441 if (SanitizeBase) {
4442 // Check whether we are shifting any non-zero bits off the top of the
4443 // integer. We only emit this check if exponent is valid - otherwise
4444 // instructions below will have undefined behavior themselves.
4445 llvm::BasicBlock *Orig = Builder.GetInsertBlock();
4446 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4447 llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
4448 Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
4449 llvm::Value *PromotedWidthMinusOne =
4450 (RHS == Ops.RHS) ? WidthMinusOne
4451 : GetMaximumShiftAmount(Ops.LHS, RHS, RHSIsSigned);
4452 CGF.EmitBlock(CheckShiftBase);
4453 llvm::Value *BitsShiftedOff = Builder.CreateLShr(
4454 Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
4455 /*NUW*/ true, /*NSW*/ true),
4456 "shl.check");
4457 if (SanitizeUnsignedBase || CGF.getLangOpts().CPlusPlus) {
4458 // In C99, we are not permitted to shift a 1 bit into the sign bit.
4459 // Under C++11's rules, shifting a 1 bit into the sign bit is
4460 // OK, but shifting a 1 bit out of it is not. (C89 and C++03 don't
4461 // define signed left shifts, so we use the C99 and C++11 rules there).
4462 // Unsigned shifts can always shift into the top bit.
4463 llvm::Value *One = llvm::ConstantInt::get(BitsShiftedOff->getType(), 1);
4464 BitsShiftedOff = Builder.CreateLShr(BitsShiftedOff, One);
4465 }
4466 llvm::Value *Zero = llvm::ConstantInt::get(BitsShiftedOff->getType(), 0);
4467 llvm::Value *ValidBase = Builder.CreateICmpEQ(BitsShiftedOff, Zero);
4468 CGF.EmitBlock(Cont);
4469 llvm::PHINode *BaseCheck = Builder.CreatePHI(ValidBase->getType(), 2);
4470 BaseCheck->addIncoming(Builder.getTrue(), Orig);
4471 BaseCheck->addIncoming(ValidBase, CheckShiftBase);
4472 Checks.push_back(std::make_pair(
4473 BaseCheck, SanitizeSignedBase ? SanitizerKind::ShiftBase
4474 : SanitizerKind::UnsignedShiftBase));
4475 }
4476
4477 assert(!Checks.empty());
4478 EmitBinOpCheck(Checks, Ops);
4479 }
4480
4481 return Builder.CreateShl(Ops.LHS, RHS, "shl");
4482}
4483
4484Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
4485 // TODO: This misses out on the sanitizer check below.
4486 if (Ops.isFixedPointOp())
4487 return EmitFixedPointBinOp(Ops);
4488
4489 // LLVM requires the LHS and RHS to be the same type: promote or truncate the
4490 // RHS to the same size as the LHS.
4491 Value *RHS = Ops.RHS;
4492 if (Ops.LHS->getType() != RHS->getType())
4493 RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
4494
4495 // OpenCL 6.3j: shift values are effectively % word size of LHS.
4496 if (CGF.getLangOpts().OpenCL || CGF.getLangOpts().HLSL)
4497 RHS = ConstrainShiftValue(Ops.LHS, RHS, "shr.mask");
4498 else if (CGF.SanOpts.has(SanitizerKind::ShiftExponent) &&
4499 isa<llvm::IntegerType>(Ops.LHS->getType())) {
4500 CodeGenFunction::SanitizerScope SanScope(&CGF);
4501 bool RHSIsSigned = Ops.rhsHasSignedIntegerRepresentation();
4502 llvm::Value *Valid = Builder.CreateICmpULE(
4503 Ops.RHS, GetMaximumShiftAmount(Ops.LHS, Ops.RHS, RHSIsSigned));
4504 EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::ShiftExponent), Ops);
4505 }
4506
4507 if (Ops.Ty->hasUnsignedIntegerRepresentation())
4508 return Builder.CreateLShr(Ops.LHS, RHS, "shr");
4509 return Builder.CreateAShr(Ops.LHS, RHS, "shr");
4510}
4511
4513// return corresponding comparison intrinsic for given vector type
4514static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
4515 BuiltinType::Kind ElemKind) {
4516 switch (ElemKind) {
4517 default: llvm_unreachable("unexpected element type");
4518 case BuiltinType::Char_U:
4519 case BuiltinType::UChar:
4520 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4521 llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
4522 case BuiltinType::Char_S:
4523 case BuiltinType::SChar:
4524 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
4525 llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
4526 case BuiltinType::UShort:
4527 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4528 llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
4529 case BuiltinType::Short:
4530 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
4531 llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
4532 case BuiltinType::UInt:
4533 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4534 llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
4535 case BuiltinType::Int:
4536 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
4537 llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
4538 case BuiltinType::ULong:
4539 case BuiltinType::ULongLong:
4540 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4541 llvm::Intrinsic::ppc_altivec_vcmpgtud_p;
4542 case BuiltinType::Long:
4543 case BuiltinType::LongLong:
4544 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequd_p :
4545 llvm::Intrinsic::ppc_altivec_vcmpgtsd_p;
4546 case BuiltinType::Float:
4547 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
4548 llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
4549 case BuiltinType::Double:
4550 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_vsx_xvcmpeqdp_p :
4551 llvm::Intrinsic::ppc_vsx_xvcmpgtdp_p;
4552 case BuiltinType::UInt128:
4553 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4554 : llvm::Intrinsic::ppc_altivec_vcmpgtuq_p;
4555 case BuiltinType::Int128:
4556 return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequq_p
4557 : llvm::Intrinsic::ppc_altivec_vcmpgtsq_p;
4558 }
4559}
4560
4561Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,
4562 llvm::CmpInst::Predicate UICmpOpc,
4563 llvm::CmpInst::Predicate SICmpOpc,
4564 llvm::CmpInst::Predicate FCmpOpc,
4565 bool IsSignaling) {
4566 TestAndClearIgnoreResultAssign();
4567 Value *Result;
4568 QualType LHSTy = E->getLHS()->getType();
4569 QualType RHSTy = E->getRHS()->getType();
4570 if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
4571 assert(E->getOpcode() == BO_EQ ||
4572 E->getOpcode() == BO_NE);
4573 Value *LHS = CGF.EmitScalarExpr(E->getLHS());
4574 Value *RHS = CGF.EmitScalarExpr(E->getRHS());
4576 CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
4577 } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
4578 BinOpInfo BOInfo = EmitBinOps(E);
4579 Value *LHS = BOInfo.LHS;
4580 Value *RHS = BOInfo.RHS;
4581
4582 // If AltiVec, the comparison results in a numeric type, so we use
4583 // intrinsics comparing vectors and giving 0 or 1 as a result
4584 if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
4585 // constants for mapping CR6 register bits to predicate result
4586 enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
4587
4588 llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
4589
4590 // in several cases vector arguments order will be reversed
4591 Value *FirstVecArg = LHS,
4592 *SecondVecArg = RHS;
4593
4594 QualType ElTy = LHSTy->castAs<VectorType>()->getElementType();
4595 BuiltinType::Kind ElementKind = ElTy->castAs<BuiltinType>()->getKind();
4596
4597 switch(E->getOpcode()) {
4598 default: llvm_unreachable("is not a comparison operation");
4599 case BO_EQ:
4600 CR6 = CR6_LT;
4601 ID = GetIntrinsic(VCMPEQ, ElementKind);
4602 break;
4603 case BO_NE:
4604 CR6 = CR6_EQ;
4605 ID = GetIntrinsic(VCMPEQ, ElementKind);
4606 break;
4607 case BO_LT:
4608 CR6 = CR6_LT;
4609 ID = GetIntrinsic(VCMPGT, ElementKind);
4610 std::swap(FirstVecArg, SecondVecArg);
4611 break;
4612 case BO_GT:
4613 CR6 = CR6_LT;
4614 ID = GetIntrinsic(VCMPGT, ElementKind);
4615 break;
4616 case BO_LE:
4617 if (ElementKind == BuiltinType::Float) {
4618 CR6 = CR6_LT;
4619 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4620 std::swap(FirstVecArg, SecondVecArg);
4621 }
4622 else {
4623 CR6 = CR6_EQ;
4624 ID = GetIntrinsic(VCMPGT, ElementKind);
4625 }
4626 break;
4627 case BO_GE:
4628 if (ElementKind == BuiltinType::Float) {
4629 CR6 = CR6_LT;
4630 ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
4631 }
4632 else {
4633 CR6 = CR6_EQ;
4634 ID = GetIntrinsic(VCMPGT, ElementKind);
4635 std::swap(FirstVecArg, SecondVecArg);
4636 }
4637 break;
4638 }
4639
4640 Value *CR6Param = Builder.getInt32(CR6);
4641 llvm::Function *F = CGF.CGM.getIntrinsic(ID);
4642 Result = Builder.CreateCall(F, {CR6Param, FirstVecArg, SecondVecArg});
4643
4644 // The result type of intrinsic may not be same as E->getType().
4645 // If E->getType() is not BoolTy, EmitScalarConversion will do the
4646 // conversion work. If E->getType() is BoolTy, EmitScalarConversion will
4647 // do nothing, if ResultTy is not i1 at the same time, it will cause
4648 // crash later.
4649 llvm::IntegerType *ResultTy = cast<llvm::IntegerType>(Result->getType());
4650 if (ResultTy->getBitWidth() > 1 &&
4651 E->getType() == CGF.getContext().BoolTy)
4652 Result = Builder.CreateTrunc(Result, Builder.getInt1Ty());
4653 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4654 E->getExprLoc());
4655 }
4656
4657 if (BOInfo.isFixedPointOp()) {
4658 Result = EmitFixedPointBinOp(BOInfo);
4659 } else if (LHS->getType()->isFPOrFPVectorTy()) {
4660 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, BOInfo.FPFeatures);
4661 if (!IsSignaling)
4662 Result = Builder.CreateFCmp(FCmpOpc, LHS, RHS, "cmp");
4663 else
4664 Result = Builder.CreateFCmpS(FCmpOpc, LHS, RHS, "cmp");
4665 } else if (LHSTy->hasSignedIntegerRepresentation()) {
4666 Result = Builder.CreateICmp(SICmpOpc, LHS, RHS, "cmp");
4667 } else {
4668 // Unsigned integers and pointers.
4669
4670 if (CGF.CGM.getCodeGenOpts().StrictVTablePointers &&
4671 !isa<llvm::ConstantPointerNull>(LHS) &&
4672 !isa<llvm::ConstantPointerNull>(RHS)) {
4673
4674 // Dynamic information is required to be stripped for comparisons,
4675 // because it could leak the dynamic information. Based on comparisons
4676 // of pointers to dynamic objects, the optimizer can replace one pointer
4677 // with another, which might be incorrect in presence of invariant
4678 // groups. Comparison with null is safe because null does not carry any
4679 // dynamic information.
4680 if (LHSTy.mayBeDynamicClass())
4681 LHS = Builder.CreateStripInvariantGroup(LHS);
4682 if (RHSTy.mayBeDynamicClass())
4683 RHS = Builder.CreateStripInvariantGroup(RHS);
4684 }
4685
4686 Result = Builder.CreateICmp(UICmpOpc, LHS, RHS, "cmp");
4687 }
4688
4689 // If this is a vector comparison, sign extend the result to the appropriate
4690 // vector integer type and return it (don't convert to bool).
4691 if (LHSTy->isVectorType())
4692 return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
4693
4694 } else {
4695 // Complex Comparison: can only be an equality comparison.
4697 QualType CETy;
4698 if (auto *CTy = LHSTy->getAs<ComplexType>()) {
4699 LHS = CGF.EmitComplexExpr(E->getLHS());
4700 CETy = CTy->getElementType();
4701 } else {
4702 LHS.first = Visit(E->getLHS());
4703 LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
4704 CETy = LHSTy;
4705 }
4706 if (auto *CTy = RHSTy->getAs<ComplexType>()) {
4707 RHS = CGF.EmitComplexExpr(E->getRHS());
4708 assert(CGF.getContext().hasSameUnqualifiedType(CETy,
4709 CTy->getElementType()) &&
4710 "The element types must always match.");
4711 (void)CTy;
4712 } else {
4713 RHS.first = Visit(E->getRHS());
4714 RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
4715 assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
4716 "The element types must always match.");
4717 }
4718
4719 Value *ResultR, *ResultI;
4720 if (CETy->isRealFloatingType()) {
4721 // As complex comparisons can only be equality comparisons, they
4722 // are never signaling comparisons.
4723 ResultR = Builder.CreateFCmp(FCmpOpc, LHS.first, RHS.first, "cmp.r");
4724 ResultI = Builder.CreateFCmp(FCmpOpc, LHS.second, RHS.second, "cmp.i");
4725 } else {
4726 // Complex comparisons can only be equality comparisons. As such, signed
4727 // and unsigned opcodes are the same.
4728 ResultR = Builder.CreateICmp(UICmpOpc, LHS.first, RHS.first, "cmp.r");
4729 ResultI = Builder.CreateICmp(UICmpOpc, LHS.second, RHS.second, "cmp.i");
4730 }
4731
4732 if (E->getOpcode() == BO_EQ) {
4733 Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
4734 } else {
4735 assert(E->getOpcode() == BO_NE &&
4736 "Complex comparison other than == or != ?");
4737 Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
4738 }
4739 }
4740
4741 return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType(),
4742 E->getExprLoc());
4743}
4744
4746 const BinaryOperator *E, Value **Previous, QualType *SrcType) {
4747 // In case we have the integer or bitfield sanitizer checks enabled
4748 // we want to get the expression before scalar conversion.
4749 if (auto *ICE = dyn_cast<ImplicitCastExpr>(E->getRHS())) {
4750 CastKind Kind = ICE->getCastKind();
4751 if (Kind == CK_IntegralCast || Kind == CK_LValueToRValue) {
4752 *SrcType = ICE->getSubExpr()->getType();
4753 *Previous = EmitScalarExpr(ICE->getSubExpr());
4754 // Pass default ScalarConversionOpts to avoid emitting
4755 // integer sanitizer checks as E refers to bitfield.
4756 return EmitScalarConversion(*Previous, *SrcType, ICE->getType(),
4757 ICE->getExprLoc());
4758 }
4759 }
4760 return EmitScalarExpr(E->getRHS());
4761}
4762
4763Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
4764 bool Ignore = TestAndClearIgnoreResultAssign();
4765
4766 Value *RHS;
4767 LValue LHS;
4768
4769 switch (E->getLHS()->getType().getObjCLifetime()) {
4771 std::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
4772 break;
4773
4775 std::tie(LHS, RHS) = CGF.EmitARCStoreAutoreleasing(E);
4776 break;
4777
4779 std::tie(LHS, RHS) = CGF.EmitARCStoreUnsafeUnretained(E, Ignore);
4780 break;
4781
4783 RHS = Visit(E->getRHS());
4784 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4785 RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
4786 break;
4787
4789 // __block variables need to have the rhs evaluated first, plus
4790 // this should improve codegen just a little.
4791 Value *Previous = nullptr;
4792 QualType SrcType = E->getRHS()->getType();
4793 // Check if LHS is a bitfield, if RHS contains an implicit cast expression
4794 // we want to extract that value and potentially (if the bitfield sanitizer
4795 // is enabled) use it to check for an implicit conversion.
4796 if (E->getLHS()->refersToBitField())
4797 RHS = CGF.EmitWithOriginalRHSBitfieldAssignment(E, &Previous, &SrcType);
4798 else
4799 RHS = Visit(E->getRHS());
4800
4801 LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
4802
4803 // Store the value into the LHS. Bit-fields are handled specially
4804 // because the result is altered by the store, i.e., [C99 6.5.16p1]
4805 // 'An assignment expression has the value of the left operand after
4806 // the assignment...'.
4807 if (LHS.isBitField()) {
4808 CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
4809 // If the expression contained an implicit conversion, make sure
4810 // to use the value before the scalar conversion.
4811 Value *Src = Previous ? Previous : RHS;
4812 QualType DstType = E->getLHS()->getType();
4813 CGF.EmitBitfieldConversionCheck(Src, SrcType, RHS, DstType,
4814 LHS.getBitFieldInfo(), E->getExprLoc());
4815 } else {
4816 CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
4817 CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
4818 }
4819 }
4820
4821 // If the result is clearly ignored, return now.
4822 if (Ignore)
4823 return nullptr;
4824
4825 // The result of an assignment in C is the assigned r-value.
4826 if (!CGF.getLangOpts().CPlusPlus)
4827 return RHS;
4828
4829 // If the lvalue is non-volatile, return the computed value of the assignment.
4830 if (!LHS.isVolatileQualified())
4831 return RHS;
4832
4833 // Otherwise, reload the value.
4834 return EmitLoadOfLValue(LHS, E->getExprLoc());
4835}
4836
4837Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
4838 // Perform vector logical and on comparisons with zero vectors.
4839 if (E->getType()->isVectorType()) {
4841
4842 Value *LHS = Visit(E->getLHS());
4843 Value *RHS = Visit(E->getRHS());
4844 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4845 if (LHS->getType()->isFPOrFPVectorTy()) {
4846 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4847 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4848 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4849 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4850 } else {
4851 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4852 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4853 }
4854 Value *And = Builder.CreateAnd(LHS, RHS);
4855 return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
4856 }
4857
4858 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4859 llvm::Type *ResTy = ConvertType(E->getType());
4860
4861 // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
4862 // If we have 1 && X, just emit X without inserting the control flow.
4863 bool LHSCondVal;
4864 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
4865 if (LHSCondVal) { // If we have 1 && X, just emit X.
4867
4868 // If the top of the logical operator nest, reset the MCDC temp to 0.
4869 if (CGF.MCDCLogOpStack.empty())
4871
4872 CGF.MCDCLogOpStack.push_back(E);
4873
4874 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4875
4876 // If we're generating for profiling or coverage, generate a branch to a
4877 // block that increments the RHS counter needed to track branch condition
4878 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
4879 // "FalseBlock" after the increment is done.
4880 if (InstrumentRegions &&
4882 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
4883 llvm::BasicBlock *FBlock = CGF.createBasicBlock("land.end");
4884 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4885 Builder.CreateCondBr(RHSCond, RHSBlockCnt, FBlock);
4886 CGF.EmitBlock(RHSBlockCnt);
4888 CGF.EmitBranch(FBlock);
4889 CGF.EmitBlock(FBlock);
4890 }
4891
4892 CGF.MCDCLogOpStack.pop_back();
4893 // If the top of the logical operator nest, update the MCDC bitmap.
4894 if (CGF.MCDCLogOpStack.empty())
4896
4897 // ZExt result to int or bool.
4898 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
4899 }
4900
4901 // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
4902 if (!CGF.ContainsLabel(E->getRHS()))
4903 return llvm::Constant::getNullValue(ResTy);
4904 }
4905
4906 // If the top of the logical operator nest, reset the MCDC temp to 0.
4907 if (CGF.MCDCLogOpStack.empty())
4909
4910 CGF.MCDCLogOpStack.push_back(E);
4911
4912 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
4913 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
4914
4915 CodeGenFunction::ConditionalEvaluation eval(CGF);
4916
4917 // Branch on the LHS first. If it is false, go to the failure (cont) block.
4918 CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock,
4919 CGF.getProfileCount(E->getRHS()));
4920
4921 // Any edges into the ContBlock are now from an (indeterminate number of)
4922 // edges from this first condition. All of these values will be false. Start
4923 // setting up the PHI node in the Cont Block for this.
4924 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
4925 "", ContBlock);
4926 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
4927 PI != PE; ++PI)
4928 PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
4929
4930 eval.begin(CGF);
4931 CGF.EmitBlock(RHSBlock);
4933 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
4934 eval.end(CGF);
4935
4936 // Reaquire the RHS block, as there may be subblocks inserted.
4937 RHSBlock = Builder.GetInsertBlock();
4938
4939 // If we're generating for profiling or coverage, generate a branch on the
4940 // RHS to a block that increments the RHS true counter needed to track branch
4941 // condition coverage.
4942 if (InstrumentRegions &&
4944 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
4945 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("land.rhscnt");
4946 Builder.CreateCondBr(RHSCond, RHSBlockCnt, ContBlock);
4947 CGF.EmitBlock(RHSBlockCnt);
4949 CGF.EmitBranch(ContBlock);
4950 PN->addIncoming(RHSCond, RHSBlockCnt);
4951 }
4952
4953 // Emit an unconditional branch from this block to ContBlock.
4954 {
4955 // There is no need to emit line number for unconditional branch.
4956 auto NL = ApplyDebugLocation::CreateEmpty(CGF);
4957 CGF.EmitBlock(ContBlock);
4958 }
4959 // Insert an entry into the phi node for the edge with the value of RHSCond.
4960 PN->addIncoming(RHSCond, RHSBlock);
4961
4962 CGF.MCDCLogOpStack.pop_back();
4963 // If the top of the logical operator nest, update the MCDC bitmap.
4964 if (CGF.MCDCLogOpStack.empty())
4966
4967 // Artificial location to preserve the scope information
4968 {
4970 PN->setDebugLoc(Builder.getCurrentDebugLocation());
4971 }
4972
4973 // ZExt result to int.
4974 return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
4975}
4976
4977Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
4978 // Perform vector logical or on comparisons with zero vectors.
4979 if (E->getType()->isVectorType()) {
4981
4982 Value *LHS = Visit(E->getLHS());
4983 Value *RHS = Visit(E->getRHS());
4984 Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
4985 if (LHS->getType()->isFPOrFPVectorTy()) {
4986 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(
4987 CGF, E->getFPFeaturesInEffect(CGF.getLangOpts()));
4988 LHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, LHS, Zero, "cmp");
4989 RHS = Builder.CreateFCmp(llvm::CmpInst::FCMP_UNE, RHS, Zero, "cmp");
4990 } else {
4991 LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
4992 RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
4993 }
4994 Value *Or = Builder.CreateOr(LHS, RHS);
4995 return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
4996 }
4997
4998 bool InstrumentRegions = CGF.CGM.getCodeGenOpts().hasProfileClangInstr();
4999 llvm::Type *ResTy = ConvertType(E->getType());
5000
5001 // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
5002 // If we have 0 || X, just emit X without inserting the control flow.
5003 bool LHSCondVal;
5004 if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
5005 if (!LHSCondVal) { // If we have 0 || X, just emit X.
5007
5008 // If the top of the logical operator nest, reset the MCDC temp to 0.
5009 if (CGF.MCDCLogOpStack.empty())
5011
5012 CGF.MCDCLogOpStack.push_back(E);
5013
5014 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5015
5016 // If we're generating for profiling or coverage, generate a branch to a
5017 // block that increments the RHS counter need to track branch condition
5018 // coverage. In this case, use "FBlock" as both the final "TrueBlock" and
5019 // "FalseBlock" after the increment is done.
5020 if (InstrumentRegions &&
5022 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5023 llvm::BasicBlock *FBlock = CGF.createBasicBlock("lor.end");
5024 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5025 Builder.CreateCondBr(RHSCond, FBlock, RHSBlockCnt);
5026 CGF.EmitBlock(RHSBlockCnt);
5028 CGF.EmitBranch(FBlock);
5029 CGF.EmitBlock(FBlock);
5030 }
5031
5032 CGF.MCDCLogOpStack.pop_back();
5033 // If the top of the logical operator nest, update the MCDC bitmap.
5034 if (CGF.MCDCLogOpStack.empty())
5036
5037 // ZExt result to int or bool.
5038 return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
5039 }
5040
5041 // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
5042 if (!CGF.ContainsLabel(E->getRHS()))
5043 return llvm::ConstantInt::get(ResTy, 1);
5044 }
5045
5046 // If the top of the logical operator nest, reset the MCDC temp to 0.
5047 if (CGF.MCDCLogOpStack.empty())
5049
5050 CGF.MCDCLogOpStack.push_back(E);
5051
5052 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
5053 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
5054
5055 CodeGenFunction::ConditionalEvaluation eval(CGF);
5056
5057 // Branch on the LHS first. If it is true, go to the success (cont) block.
5058 CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock,
5060 CGF.getProfileCount(E->getRHS()));
5061
5062 // Any edges into the ContBlock are now from an (indeterminate number of)
5063 // edges from this first condition. All of these values will be true. Start
5064 // setting up the PHI node in the Cont Block for this.
5065 llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
5066 "", ContBlock);
5067 for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
5068 PI != PE; ++PI)
5069 PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
5070
5071 eval.begin(CGF);
5072
5073 // Emit the RHS condition as a bool value.
5074 CGF.EmitBlock(RHSBlock);
5076 Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
5077
5078 eval.end(CGF);
5079
5080 // Reaquire the RHS block, as there may be subblocks inserted.
5081 RHSBlock = Builder.GetInsertBlock();
5082
5083 // If we're generating for profiling or coverage, generate a branch on the
5084 // RHS to a block that increments the RHS true counter needed to track branch
5085 // condition coverage.
5086 if (InstrumentRegions &&
5088 CGF.maybeUpdateMCDCCondBitmap(E->getRHS(), RHSCond);
5089 llvm::BasicBlock *RHSBlockCnt = CGF.createBasicBlock("lor.rhscnt");
5090 Builder.CreateCondBr(RHSCond, ContBlock, RHSBlockCnt);
5091 CGF.EmitBlock(RHSBlockCnt);
5093 CGF.EmitBranch(ContBlock);
5094 PN->addIncoming(RHSCond, RHSBlockCnt);
5095 }
5096
5097 // Emit an unconditional branch from this block to ContBlock. Insert an entry
5098 // into the phi node for the edge with the value of RHSCond.
5099 CGF.EmitBlock(ContBlock);
5100 PN->addIncoming(RHSCond, RHSBlock);
5101
5102 CGF.MCDCLogOpStack.pop_back();
5103 // If the top of the logical operator nest, update the MCDC bitmap.
5104 if (CGF.MCDCLogOpStack.empty())
5106
5107 // ZExt result to int.
5108 return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
5109}
5110
5111Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
5112 CGF.EmitIgnoredExpr(E->getLHS());
5113 CGF.EnsureInsertPoint();
5114 return Visit(E->getRHS());
5115}
5116
5117//===----------------------------------------------------------------------===//
5118// Other Operators
5119//===----------------------------------------------------------------------===//
5120
5121/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
5122/// expression is cheap enough and side-effect-free enough to evaluate
5123/// unconditionally instead of conditionally. This is used to convert control
5124/// flow into selects in some cases.
5126 CodeGenFunction &CGF) {
5127 // Anything that is an integer or floating point constant is fine.
5128 return E->IgnoreParens()->isEvaluatable(CGF.getContext());
5129
5130 // Even non-volatile automatic variables can't be evaluated unconditionally.
5131 // Referencing a thread_local may cause non-trivial initialization work to
5132 // occur. If we're inside a lambda and one of the variables is from the scope
5133 // outside the lambda, that function may have returned already. Reading its
5134 // locals is a bad idea. Also, these reads may introduce races there didn't
5135 // exist in the source-level program.
5136}
5137
5138
5139Value *ScalarExprEmitter::
5140VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
5141 TestAndClearIgnoreResultAssign();
5142
5143 // Bind the common expression if necessary.
5144 CodeGenFunction::OpaqueValueMapping binding(CGF, E);
5145
5146 Expr *condExpr = E->getCond();
5147 Expr *lhsExpr = E->getTrueExpr();
5148 Expr *rhsExpr = E->getFalseExpr();
5149
5150 // If the condition constant folds and can be elided, try to avoid emitting
5151 // the condition and the dead arm.
5152 bool CondExprBool;
5153 if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
5154 Expr *live = lhsExpr, *dead = rhsExpr;
5155 if (!CondExprBool) std::swap(live, dead);
5156
5157 // If the dead side doesn't have labels we need, just emit the Live part.
5158 if (!CGF.ContainsLabel(dead)) {
5159 if (CondExprBool) {
5161 CGF.incrementProfileCounter(lhsExpr);
5162 CGF.incrementProfileCounter(rhsExpr);
5163 }
5165 }
5166 Value *Result = Visit(live);
5167
5168 // If the live part is a throw expression, it acts like it has a void
5169 // type, so evaluating it returns a null Value*. However, a conditional
5170 // with non-void type must return a non-null Value*.
5171 if (!Result && !E->getType()->isVoidType())
5172 Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
5173
5174 return Result;
5175 }
5176 }
5177
5178 // OpenCL: If the condition is a vector, we can treat this condition like
5179 // the select function.
5180 if ((CGF.getLangOpts().OpenCL && condExpr->getType()->isVectorType()) ||
5181 condExpr->getType()->isExtVectorType()) {
5183
5184 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5185 llvm::Value *LHS = Visit(lhsExpr);
5186 llvm::Value *RHS = Visit(rhsExpr);
5187
5188 llvm::Type *condType = ConvertType(condExpr->getType());
5189 auto *vecTy = cast<llvm::FixedVectorType>(condType);
5190
5191 unsigned numElem = vecTy->getNumElements();
5192 llvm::Type *elemType = vecTy->getElementType();
5193
5194 llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
5195 llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
5196 llvm::Value *tmp = Builder.CreateSExt(
5197 TestMSB, llvm::FixedVectorType::get(elemType, numElem), "sext");
5198 llvm::Value *tmp2 = Builder.CreateNot(tmp);
5199
5200 // Cast float to int to perform ANDs if necessary.
5201 llvm::Value *RHSTmp = RHS;
5202 llvm::Value *LHSTmp = LHS;
5203 bool wasCast = false;
5204 llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
5205 if (rhsVTy->getElementType()->isFloatingPointTy()) {
5206 RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
5207 LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
5208 wasCast = true;
5209 }
5210
5211 llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
5212 llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
5213 llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
5214 if (wasCast)
5215 tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
5216
5217 return tmp5;
5218 }
5219
5220 if (condExpr->getType()->isVectorType() ||
5221 condExpr->getType()->isSveVLSBuiltinType()) {
5223
5224 llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
5225 llvm::Value *LHS = Visit(lhsExpr);
5226 llvm::Value *RHS = Visit(rhsExpr);
5227
5228 llvm::Type *CondType = ConvertType(condExpr->getType());
5229 auto *VecTy = cast<llvm::VectorType>(CondType);
5230 llvm::Value *ZeroVec = llvm::Constant::getNullValue(VecTy);
5231
5232 CondV = Builder.CreateICmpNE(CondV, ZeroVec, "vector_cond");
5233 return Builder.CreateSelect(CondV, LHS, RHS, "vector_select");
5234 }
5235
5236 // If this is a really simple expression (like x ? 4 : 5), emit this as a
5237 // select instead of as control flow. We can only do this if it is cheap and
5238 // safe to evaluate the LHS and RHS unconditionally.
5239 if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
5241 llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
5242 llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
5243
5245 CGF.incrementProfileCounter(lhsExpr);
5246 CGF.incrementProfileCounter(rhsExpr);
5248 } else
5249 CGF.incrementProfileCounter(E, StepV);
5250
5251 llvm::Value *LHS = Visit(lhsExpr);
5252 llvm::Value *RHS = Visit(rhsExpr);
5253 if (!LHS) {
5254 // If the conditional has void type, make sure we return a null Value*.
5255 assert(!RHS && "LHS and RHS types must match");
5256 return nullptr;
5257 }
5258 return Builder.CreateSelect(CondV, LHS, RHS, "cond");
5259 }
5260
5261 // If the top of the logical operator nest, reset the MCDC temp to 0.
5262 if (CGF.MCDCLogOpStack.empty())
5263 CGF.maybeResetMCDCCondBitmap(condExpr);
5264
5265 llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
5266 llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
5267 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
5268
5269 CodeGenFunction::ConditionalEvaluation eval(CGF);
5270 CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock,
5271 CGF.getProfileCount(lhsExpr));
5272
5273 CGF.EmitBlock(LHSBlock);
5274
5275 // If the top of the logical operator nest, update the MCDC bitmap for the
5276 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5277 // may also contain a boolean expression.
5278 if (CGF.MCDCLogOpStack.empty())
5279 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5280
5282 CGF.incrementProfileCounter(lhsExpr);
5283 else
5285
5286 eval.begin(CGF);
5287 Value *LHS = Visit(lhsExpr);
5288 eval.end(CGF);
5289
5290 LHSBlock = Builder.GetInsertBlock();
5291 Builder.CreateBr(ContBlock);
5292
5293 CGF.EmitBlock(RHSBlock);
5294
5295 // If the top of the logical operator nest, update the MCDC bitmap for the
5296 // ConditionalOperator prior to visiting its LHS and RHS blocks, since they
5297 // may also contain a boolean expression.
5298 if (CGF.MCDCLogOpStack.empty())
5299 CGF.maybeUpdateMCDCTestVectorBitmap(condExpr);
5300
5302 CGF.incrementProfileCounter(rhsExpr);
5303
5304 eval.begin(CGF);
5305 Value *RHS = Visit(rhsExpr);
5306 eval.end(CGF);
5307
5308 RHSBlock = Builder.GetInsertBlock();
5309 CGF.EmitBlock(ContBlock);
5310
5311 // If the LHS or RHS is a throw expression, it will be legitimately null.
5312 if (!LHS)
5313 return RHS;
5314 if (!RHS)
5315 return LHS;
5316
5317 // Create a PHI node for the real part.
5318 llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
5319 PN->addIncoming(LHS, LHSBlock);
5320 PN->addIncoming(RHS, RHSBlock);
5321
5322 // When single byte coverage mode is enabled, add a counter to continuation
5323 // block.
5326
5327 return PN;
5328}
5329
5330Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
5331 return Visit(E->getChosenSubExpr());
5332}
5333
5334Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
5335 QualType Ty = VE->getType();
5336
5337 if (Ty->isVariablyModifiedType())
5339
5340 Address ArgValue = Address::invalid();
5341 Address ArgPtr = CGF.EmitVAArg(VE, ArgValue);
5342
5343 llvm::Type *ArgTy = ConvertType(VE->getType());
5344
5345 // If EmitVAArg fails, emit an error.
5346 if (!ArgPtr.isValid()) {
5347 CGF.ErrorUnsupported(VE, "va_arg expression");
5348 return llvm::UndefValue::get(ArgTy);
5349 }
5350
5351 // FIXME Volatility.
5352 llvm::Value *Val = Builder.CreateLoad(ArgPtr);
5353
5354 // If EmitVAArg promoted the type, we must truncate it.
5355 if (ArgTy != Val->getType()) {
5356 if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
5357 Val = Builder.CreateIntToPtr(Val, ArgTy);
5358 else
5359 Val = Builder.CreateTrunc(Val, ArgTy);
5360 }
5361
5362 return Val;
5363}
5364
5365Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
5366 return CGF.EmitBlockLiteral(block);
5367}
5368
5369// Convert a vec3 to vec4, or vice versa.
5371 Value *Src, unsigned NumElementsDst) {
5372 static constexpr int Mask[] = {0, 1, 2, -1};
5373 return Builder.CreateShuffleVector(Src, llvm::ArrayRef(Mask, NumElementsDst));
5374}
5375
5376// Create cast instructions for converting LLVM value \p Src to LLVM type \p
5377// DstTy. \p Src has the same size as \p DstTy. Both are single value types
5378// but could be scalar or vectors of different lengths, and either can be
5379// pointer.
5380// There are 4 cases:
5381// 1. non-pointer -> non-pointer : needs 1 bitcast
5382// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
5383// 3. pointer -> non-pointer
5384// a) pointer -> intptr_t : needs 1 ptrtoint
5385// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
5386// 4. non-pointer -> pointer
5387// a) intptr_t -> pointer : needs 1 inttoptr
5388// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
5389// Note: for cases 3b and 4b two casts are required since LLVM casts do not
5390// allow casting directly between pointer types and non-integer non-pointer
5391// types.
5393 const llvm::DataLayout &DL,
5394 Value *Src, llvm::Type *DstTy,
5395 StringRef Name = "") {
5396 auto SrcTy = Src->getType();
5397
5398 // Case 1.
5399 if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
5400 return Builder.CreateBitCast(Src, DstTy, Name);
5401
5402 // Case 2.
5403 if (SrcTy->isPointerTy() && DstTy->isPointerTy())
5404 return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
5405
5406 // Case 3.
5407 if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
5408 // Case 3b.
5409 if (!DstTy->isIntegerTy())
5410 Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
5411 // Cases 3a and 3b.
5412 return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
5413 }
5414
5415 // Case 4b.
5416 if (!SrcTy->isIntegerTy())
5417 Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
5418 // Cases 4a and 4b.
5419 return Builder.CreateIntToPtr(Src, DstTy, Name);
5420}
5421
5422Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
5423 Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
5424 llvm::Type *DstTy = ConvertType(E->getType());
5425
5426 llvm::Type *SrcTy = Src->getType();
5427 unsigned NumElementsSrc =
5428 isa<llvm::VectorType>(SrcTy)
5429 ? cast<llvm::FixedVectorType>(SrcTy)->getNumElements()
5430 : 0;
5431 unsigned NumElementsDst =
5432 isa<llvm::VectorType>(DstTy)
5433 ? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
5434 : 0;
5435
5436 // Use bit vector expansion for ext_vector_type boolean vectors.
5437 if (E->getType()->isExtVectorBoolType())
5438 return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
5439
5440 // Going from vec3 to non-vec3 is a special case and requires a shuffle
5441 // vector to get a vec4, then a bitcast if the target type is different.
5442 if (NumElementsSrc == 3 && NumElementsDst != 3) {
5443 Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
5444 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5445 DstTy);
5446
5447 Src->setName("astype");
5448 return Src;
5449 }
5450
5451 // Going from non-vec3 to vec3 is a special case and requires a bitcast
5452 // to vec4 if the original type is not vec4, then a shuffle vector to
5453 // get a vec3.
5454 if (NumElementsSrc != 3 && NumElementsDst == 3) {
5455 auto *Vec4Ty = llvm::FixedVectorType::get(
5456 cast<llvm::VectorType>(DstTy)->getElementType(), 4);
5457 Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
5458 Vec4Ty);
5459
5460 Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
5461 Src->setName("astype");
5462 return Src;
5463 }
5464
5465 return createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
5466 Src, DstTy, "astype");
5467}
5468
5469Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
5470 return CGF.EmitAtomicExpr(E).getScalarVal();
5471}
5472
5473//===----------------------------------------------------------------------===//
5474// Entry Point into this File
5475//===----------------------------------------------------------------------===//
5476
5477/// Emit the computation of the specified expression of scalar type, ignoring
5478/// the result.
5479Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
5480 assert(E && hasScalarEvaluationKind(E->getType()) &&
5481 "Invalid scalar expression to emit");
5482
5483 return ScalarExprEmitter(*this, IgnoreResultAssign)
5484 .Visit(const_cast<Expr *>(E));
5485}
5486
5487/// Emit a conversion from the specified type to the specified destination type,
5488/// both of which are LLVM scalar types.
5490 QualType DstTy,
5492 assert(hasScalarEvaluationKind(SrcTy) && hasScalarEvaluationKind(DstTy) &&
5493 "Invalid scalar expression to emit");
5494 return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy, Loc);
5495}
5496
5497/// Emit a conversion from the specified complex type to the specified
5498/// destination type, where the destination type is an LLVM scalar type.
5500 QualType SrcTy,
5501 QualType DstTy,
5503 assert(SrcTy->isAnyComplexType() && hasScalarEvaluationKind(DstTy) &&
5504 "Invalid complex -> scalar conversion");
5505 return ScalarExprEmitter(*this)
5506 .EmitComplexToScalarConversion(Src, SrcTy, DstTy, Loc);
5507}
5508
5509
5510Value *
5512 QualType PromotionType) {
5513 if (!PromotionType.isNull())
5514 return ScalarExprEmitter(*this).EmitPromoted(E, PromotionType);
5515 else
5516 return ScalarExprEmitter(*this).Visit(const_cast<Expr *>(E));
5517}
5518
5519
5520llvm::Value *CodeGenFunction::
5522 bool isInc, bool isPre) {
5523 return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
5524}
5525
5527 // object->isa or (*object).isa
5528 // Generate code as for: *(Class*)object
5529
5530 Expr *BaseExpr = E->getBase();
5531 Address Addr = Address::invalid();
5532 if (BaseExpr->isPRValue()) {
5533 llvm::Type *BaseTy =
5535 Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
5536 } else {
5537 Addr = EmitLValue(BaseExpr).getAddress(*this);
5538 }
5539
5540 // Cast the address to Class*.
5541 Addr = Addr.withElementType(ConvertType(E->getType()));
5542 return MakeAddrLValue(Addr, E->getType());
5543}
5544
5545
5547 const CompoundAssignOperator *E) {
5548 ScalarExprEmitter Scalar(*this);
5549 Value *Result = nullptr;
5550 switch (E->getOpcode()) {
5551#define COMPOUND_OP(Op) \
5552 case BO_##Op##Assign: \
5553 return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
5554 Result)
5555 COMPOUND_OP(Mul);
5556 COMPOUND_OP(Div);
5557 COMPOUND_OP(Rem);
5558 COMPOUND_OP(Add);
5559 COMPOUND_OP(Sub);
5560 COMPOUND_OP(Shl);
5561 COMPOUND_OP(Shr);
5563 COMPOUND_OP(Xor);
5564 COMPOUND_OP(Or);
5565#undef COMPOUND_OP
5566
5567 case BO_PtrMemD:
5568 case BO_PtrMemI:
5569 case BO_Mul:
5570 case BO_Div:
5571 case BO_Rem:
5572 case BO_Add:
5573 case BO_Sub:
5574 case BO_Shl:
5575 case BO_Shr:
5576 case BO_LT:
5577 case BO_GT:
5578 case BO_LE:
5579 case BO_GE:
5580 case BO_EQ:
5581 case BO_NE:
5582 case BO_Cmp:
5583 case BO_And:
5584 case BO_Xor:
5585 case BO_Or:
5586 case BO_LAnd:
5587 case BO_LOr:
5588 case BO_Assign:
5589 case BO_Comma:
5590 llvm_unreachable("Not valid compound assignment operators");
5591 }
5592
5593 llvm_unreachable("Unhandled compound assignment operator");
5594}
5595
5597 // The total (signed) byte offset for the GEP.
5598 llvm::Value *TotalOffset;
5599 // The offset overflow flag - true if the total offset overflows.
5600 llvm::Value *OffsetOverflows;
5601};
5602
5603/// Evaluate given GEPVal, which is either an inbounds GEP, or a constant,
5604/// and compute the total offset it applies from it's base pointer BasePtr.
5605/// Returns offset in bytes and a boolean flag whether an overflow happened
5606/// during evaluation.
5608 llvm::LLVMContext &VMContext,
5609 CodeGenModule &CGM,
5610 CGBuilderTy &Builder) {
5611 const auto &DL = CGM.getDataLayout();
5612
5613 // The total (signed) byte offset for the GEP.
5614 llvm::Value *TotalOffset = nullptr;
5615
5616 // Was the GEP already reduced to a constant?
5617 if (isa<llvm::Constant>(GEPVal)) {
5618 // Compute the offset by casting both pointers to integers and subtracting:
5619 // GEPVal = BasePtr + ptr(Offset) <--> Offset = int(GEPVal) - int(BasePtr)
5620 Value *BasePtr_int =
5621 Builder.CreatePtrToInt(BasePtr, DL.getIntPtrType(BasePtr->getType()));
5622 Value *GEPVal_int =
5623 Builder.CreatePtrToInt(GEPVal, DL.getIntPtrType(GEPVal->getType()));
5624 TotalOffset = Builder.CreateSub(GEPVal_int, BasePtr_int);
5625 return {TotalOffset, /*OffsetOverflows=*/Builder.getFalse()};
5626 }
5627
5628 auto *GEP = cast<llvm::GEPOperator>(GEPVal);
5629 assert(GEP->getPointerOperand() == BasePtr &&
5630 "BasePtr must be the base of the GEP.");
5631 assert(GEP->isInBounds() && "Expected inbounds GEP");
5632
5633 auto *IntPtrTy = DL.getIntPtrType(GEP->getPointerOperandType());
5634
5635 // Grab references to the signed add/mul overflow intrinsics for intptr_t.
5636 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
5637 auto *SAddIntrinsic =
5638 CGM.getIntrinsic(llvm::Intrinsic::sadd_with_overflow, IntPtrTy);
5639 auto *SMulIntrinsic =
5640 CGM.getIntrinsic(llvm::Intrinsic::smul_with_overflow, IntPtrTy);
5641
5642 // The offset overflow flag - true if the total offset overflows.
5643 llvm::Value *OffsetOverflows = Builder.getFalse();
5644
5645 /// Return the result of the given binary operation.
5646 auto eval = [&](BinaryOperator::Opcode Opcode, llvm::Value *LHS,
5647 llvm::Value *RHS) -> llvm::Value * {
5648 assert((Opcode == BO_Add || Opcode == BO_Mul) && "Can't eval binop");
5649
5650 // If the operands are constants, return a constant result.
5651 if (auto *LHSCI = dyn_cast<llvm::ConstantInt>(LHS)) {
5652 if (auto *RHSCI = dyn_cast<llvm::ConstantInt>(RHS)) {
5653 llvm::APInt N;
5654 bool HasOverflow = mayHaveIntegerOverflow(LHSCI, RHSCI, Opcode,
5655 /*Signed=*/true, N);
5656 if (HasOverflow)
5657 OffsetOverflows = Builder.getTrue();
5658 return llvm::ConstantInt::get(VMContext, N);
5659 }
5660 }
5661
5662 // Otherwise, compute the result with checked arithmetic.
5663 auto *ResultAndOverflow = Builder.CreateCall(
5664 (Opcode == BO_Add) ? SAddIntrinsic : SMulIntrinsic, {LHS, RHS});
5665 OffsetOverflows = Builder.CreateOr(
5666 Builder.CreateExtractValue(ResultAndOverflow, 1), OffsetOverflows);
5667 return Builder.CreateExtractValue(ResultAndOverflow, 0);
5668 };
5669
5670 // Determine the total byte offset by looking at each GEP operand.
5671 for (auto GTI = llvm::gep_type_begin(GEP), GTE = llvm::gep_type_end(GEP);
5672 GTI != GTE; ++GTI) {
5673 llvm::Value *LocalOffset;
5674 auto *Index = GTI.getOperand();
5675 // Compute the local offset contributed by this indexing step:
5676 if (auto *STy = GTI.getStructTypeOrNull()) {
5677 // For struct indexing, the local offset is the byte position of the
5678 // specified field.
5679 unsigned FieldNo = cast<llvm::ConstantInt>(Index)->getZExtValue();
5680 LocalOffset = llvm::ConstantInt::get(
5681 IntPtrTy, DL.getStructLayout(STy)->getElementOffset(FieldNo));
5682 } else {
5683 // Otherwise this is array-like indexing. The local offset is the index
5684 // multiplied by the element size.
5685 auto *ElementSize =
5686 llvm::ConstantInt::get(IntPtrTy, GTI.getSequentialElementStride(DL));
5687 auto *IndexS = Builder.CreateIntCast(Index, IntPtrTy, /*isSigned=*/true);
5688 LocalOffset = eval(BO_Mul, ElementSize, IndexS);
5689 }
5690
5691 // If this is the first offset, set it as the total offset. Otherwise, add
5692 // the local offset into the running total.
5693 if (!TotalOffset || TotalOffset == Zero)
5694 TotalOffset = LocalOffset;
5695 else
5696 TotalOffset = eval(BO_Add, TotalOffset, LocalOffset);
5697 }
5698
5699 return {TotalOffset, OffsetOverflows};
5700}
5701
5702Value *
5703CodeGenFunction::EmitCheckedInBoundsGEP(llvm::Type *ElemTy, Value *Ptr,
5704 ArrayRef<Value *> IdxList,
5705 bool SignedIndices, bool IsSubtraction,
5706 SourceLocation Loc, const Twine &Name) {
5707 llvm::Type *PtrTy = Ptr->getType();
5708 Value *GEPVal = Builder.CreateInBoundsGEP(ElemTy, Ptr, IdxList, Name);
5709
5710 // If the pointer overflow sanitizer isn't enabled, do nothing.
5711 if (!SanOpts.has(SanitizerKind::PointerOverflow))
5712 return GEPVal;
5713
5714 // Perform nullptr-and-offset check unless the nullptr is defined.
5715 bool PerformNullCheck = !NullPointerIsDefined(
5716 Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
5717 // Check for overflows unless the GEP got constant-folded,
5718 // and only in the default address space
5719 bool PerformOverflowCheck =
5720 !isa<llvm::Constant>(GEPVal) && PtrTy->getPointerAddressSpace() == 0;
5721
5722 if (!(PerformNullCheck || PerformOverflowCheck))
5723 return GEPVal;
5724
5725 const auto &DL = CGM.getDataLayout();
5726
5727 SanitizerScope SanScope(this);
5728 llvm::Type *IntPtrTy = DL.getIntPtrType(PtrTy);
5729
5730 GEPOffsetAndOverflow EvaluatedGEP =
5732
5733 assert((!isa<llvm::Constant>(EvaluatedGEP.TotalOffset) ||
5734 EvaluatedGEP.OffsetOverflows == Builder.getFalse()) &&
5735 "If the offset got constant-folded, we don't expect that there was an "
5736 "overflow.");
5737
5738 auto *Zero = llvm::ConstantInt::getNullValue(IntPtrTy);
5739
5740 // Common case: if the total offset is zero, and we are using C++ semantics,
5741 // where nullptr+0 is defined, don't emit a check.
5742 if (EvaluatedGEP.TotalOffset == Zero && CGM.getLangOpts().CPlusPlus)
5743 return GEPVal;
5744
5745 // Now that we've computed the total offset, add it to the base pointer (with
5746 // wrapping semantics).
5747 auto *IntPtr = Builder.CreatePtrToInt(Ptr, IntPtrTy);
5748 auto *ComputedGEP = Builder.CreateAdd(IntPtr, EvaluatedGEP.TotalOffset);
5749
5751
5752 if (PerformNullCheck) {
5753 // In C++, if the base pointer evaluates to a null pointer value,
5754 // the only valid pointer this inbounds GEP can produce is also
5755 // a null pointer, so the offset must also evaluate to zero.
5756 // Likewise, if we have non-zero base pointer, we can not get null pointer
5757 // as a result, so the offset can not be -intptr_t(BasePtr).
5758 // In other words, both pointers are either null, or both are non-null,
5759 // or the behaviour is undefined.
5760 //
5761 // C, however, is more strict in this regard, and gives more
5762 // optimization opportunities: in C, additionally, nullptr+0 is undefined.
5763 // So both the input to the 'gep inbounds' AND the output must not be null.
5764 auto *BaseIsNotNullptr = Builder.CreateIsNotNull(Ptr);
5765 auto *ResultIsNotNullptr = Builder.CreateIsNotNull(ComputedGEP);
5766 auto *Valid =
5767 CGM.getLangOpts().CPlusPlus
5768 ? Builder.CreateICmpEQ(BaseIsNotNullptr, ResultIsNotNullptr)
5769 : Builder.CreateAnd(BaseIsNotNullptr, ResultIsNotNullptr);
5770 Checks.emplace_back(Valid, SanitizerKind::PointerOverflow);
5771 }
5772
5773 if (PerformOverflowCheck) {
5774 // The GEP is valid if:
5775 // 1) The total offset doesn't overflow, and
5776 // 2) The sign of the difference between the computed address and the base
5777 // pointer matches the sign of the total offset.
5778 llvm::Value *ValidGEP;
5779 auto *NoOffsetOverflow = Builder.CreateNot(EvaluatedGEP.OffsetOverflows);
5780 if (SignedIndices) {
5781 // GEP is computed as `unsigned base + signed offset`, therefore:
5782 // * If offset was positive, then the computed pointer can not be
5783 // [unsigned] less than the base pointer, unless it overflowed.
5784 // * If offset was negative, then the computed pointer can not be
5785 // [unsigned] greater than the bas pointere, unless it overflowed.
5786 auto *PosOrZeroValid = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5787 auto *PosOrZeroOffset =
5788 Builder.CreateICmpSGE(EvaluatedGEP.TotalOffset, Zero);
5789 llvm::Value *NegValid = Builder.CreateICmpULT(ComputedGEP, IntPtr);
5790 ValidGEP =
5791 Builder.CreateSelect(PosOrZeroOffset, PosOrZeroValid, NegValid);
5792 } else if (!IsSubtraction) {
5793 // GEP is computed as `unsigned base + unsigned offset`, therefore the
5794 // computed pointer can not be [unsigned] less than base pointer,
5795 // unless there was an overflow.
5796 // Equivalent to `@llvm.uadd.with.overflow(%base, %offset)`.
5797 ValidGEP = Builder.CreateICmpUGE(ComputedGEP, IntPtr);
5798 } else {
5799 // GEP is computed as `unsigned base - unsigned offset`, therefore the
5800 // computed pointer can not be [unsigned] greater than base pointer,
5801 // unless there was an overflow.
5802 // Equivalent to `@llvm.usub.with.overflow(%base, sub(0, %offset))`.
5803 ValidGEP = Builder.CreateICmpULE(ComputedGEP, IntPtr);
5804 }
5805 ValidGEP = Builder.CreateAnd(ValidGEP, NoOffsetOverflow);
5806 Checks.emplace_back(ValidGEP, SanitizerKind::PointerOverflow);
5807 }
5808
5809 assert(!Checks.empty() && "Should have produced some checks.");
5810
5811 llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc)};
5812 // Pass the computed GEP to the runtime to avoid emitting poisoned arguments.
5813 llvm::Value *DynamicArgs[] = {IntPtr, ComputedGEP};
5814 EmitCheck(Checks, SanitizerHandler::PointerOverflow, StaticArgs, DynamicArgs);
5815
5816 return GEPVal;
5817}
5818
5820 Address Addr, ArrayRef<Value *> IdxList, llvm::Type *elementType,
5821 bool SignedIndices, bool IsSubtraction, SourceLocation Loc, CharUnits Align,
5822 const Twine &Name) {
5823 if (!SanOpts.has(SanitizerKind::PointerOverflow))
5824 return Builder.CreateInBoundsGEP(Addr, IdxList, elementType, Align, Name);
5825
5826 return RawAddress(
5828 IdxList, SignedIndices, IsSubtraction, Loc, Name),
5829 elementType, Align);
5830}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3285
ASTImporterLookupTable & LT
llvm::APSInt APSInt
static llvm::Value * EmitCompare(CGBuilderTy &Builder, CodeGenFunction &CGF, const BinaryOperator *E, llvm::Value *LHS, llvm::Value *RHS, CompareKind Kind, const char *NameSuffix="")
Definition: CGExprAgg.cpp:997
CodeGenFunction::ComplexPairTy ComplexPairTy
#define HANDLE_BINOP(OP)
#define VISITCOMP(CODE, UI, SI, FP, SIG)
static int getAsInt32(llvm::ConstantInt *C, llvm::Type *I32Ty)
static Value * emitPointerArithmetic(CodeGenFunction &CGF, const BinOpInfo &op, bool isSubtraction)
Emit pointer + index arithmetic.
static llvm::Value * EmitIsNegativeTestHelper(Value *V, QualType VType, const char *Name, CGBuilderTy &Builder)
static Value * createCastsForTypeOfSameSize(CGBuilderTy &Builder, const llvm::DataLayout &DL, Value *Src, llvm::Type *DstTy, StringRef Name="")
IntrinsicType
@ VCMPGT
@ VCMPEQ
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerMask > > EmitBitfieldTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT, BuiltinType::Kind ElemKind)
#define COMPOUND_OP(Op)
#define HANDLEBINOP(OP)
static GEPOffsetAndOverflow EmitGEPOffsetInBytes(Value *BasePtr, Value *GEPVal, llvm::LLVMContext &VMContext, CodeGenModule &CGM, CGBuilderTy &Builder)
Evaluate given GEPVal, which is either an inbounds GEP, or a constant, and compute the total offset i...
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E, CodeGenFunction &CGF)
isCheapEnoughToEvaluateUnconditionally - Return true if the specified expression is cheap enough and ...
static bool PromotionIsPotentiallyEligibleForImplicitIntegerConversionCheck(QualType SrcType, QualType DstType)
static Value * buildFMulAdd(llvm::Instruction *MulOp, Value *Addend, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool negMul, bool negAdd)
static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx, unsigned Off)
static Value * ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF, Value *Src, unsigned NumElementsDst)
static Value * tryEmitFMulAdd(const BinOpInfo &op, const CodeGenFunction &CGF, CGBuilderTy &Builder, bool isSub=false)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerMask > > EmitIntegerSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerMask > > EmitBitfieldSignChangeCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static std::pair< ScalarExprEmitter::ImplicitConversionCheckKind, std::pair< llvm::Value *, SanitizerMask > > EmitIntegerTruncationCheckHelper(Value *Src, QualType SrcType, Value *Dst, QualType DstType, CGBuilderTy &Builder)
static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E, llvm::Value *InVal, bool IsInc, FPOptions FPFeatures)
static Decl::Kind getKind(const Decl *D)
Definition: DeclBase.cpp:1125
SourceLocation Loc
Definition: SemaObjC.cpp:755
StateNode * Previous
llvm::APInt getValue() const
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
APSInt & getInt()
Definition: APValue.h:423
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:182
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
CanQualType FloatTy
Definition: ASTContext.h:1103
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType getCanonicalType(QualType T) const
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
Definition: ASTContext.h:2575
QualType getVectorType(QualType VectorType, unsigned NumElts, VectorKind VecKind) const
Return the unique reference to a vector type of the specified element type and size.
const LangOptions & getLangOpts() const
Definition: ASTContext.h:775
CanQualType BoolTy
Definition: ASTContext.h:1092
unsigned getOpenMPDefaultSimdAlign(QualType T) const
Get default simd alignment of the specified complete type in bits.
llvm::FixedPointSemantics getFixedPointSemantics(QualType Ty) const
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Definition: ASTContext.h:2618
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2341
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getPromotedIntegerType(QualType PromotableType) const
Return the type that PromotableType will promote to: C99 6.3.1.1p2, assuming that PromotableType is a...
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2771
QualType getComplexType(QualType T) const
Return the uniqued reference to the type for a complex number with the specified element type.
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:757
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
bool isPromotableIntegerType(QualType T) const
More type predicates useful for type checking/promotion.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Definition: ASTContext.h:2345
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
Definition: RecordLayout.h:196
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:249
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition: Expr.h:4141
Expr * getCond() const
getCond - Return the expression representing the condition for the ?: operator.
Definition: Expr.h:4319
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition: Expr.h:4325
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition: Expr.h:4331
AddrLabelExpr - The GNU address of label extension, representing &&label.
Definition: Expr.h:4338
LabelDecl * getLabel() const
Definition: Expr.h:4361
Represents the index of the current element of an array being initialized by an ArrayInitLoopExpr.
Definition: Expr.h:5564
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition: Expr.h:2664
An Embarcadero array type trait, as used in the implementation of __array_rank and __array_extent.
Definition: ExprCXX.h:2848
uint64_t getValue() const
Definition: ExprCXX.h:2894
QualType getElementType() const
Definition: Type.h:3530
AsTypeExpr - Clang builtin function __builtin_astype [OpenCL 6.2.4.2] This AST node provides support ...
Definition: Expr.h:6234
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition: Expr.h:6253
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6437
A builtin binary operation expression such as "x + y" or "x <= y".
Definition: Expr.h:3840
Expr * getLHS() const
Definition: Expr.h:3889
static Opcode getOpForCompoundAssignment(Opcode Opc)
Definition: Expr.h:3986
bool isCompoundAssignmentOp() const
Definition: Expr.h:3983
SourceLocation getExprLoc() const
Definition: Expr.h:3880
bool isShiftOp() const
Definition: Expr.h:3928
Expr * getRHS() const
Definition: Expr.h:3891
bool isShiftAssignOp() const
Definition: Expr.h:3997
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition: Expr.h:4039
static bool isNullPointerArithmeticExtension(ASTContext &Ctx, Opcode Opc, const Expr *LHS, const Expr *RHS)
Return true if a binary operator using the specified opcode and operands would match the 'p = (i8*)nu...
Definition: Expr.cpp:2206
Opcode getOpcode() const
Definition: Expr.h:3884
BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
Definition: Expr.h:6173
This class is used for builtin types like 'int'.
Definition: Type.h:2981
Kind getKind() const
Definition: Type.h:3023
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition: DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition: DeclCXX.h:249
A boolean literal, per ([C++ lex.bool] Boolean literals).
Definition: ExprCXX.h:720
bool getValue() const
Definition: ExprCXX.h:737
A default argument (C++ [dcl.fct.default]).
Definition: ExprCXX.h:1264
A use of a default initializer in a constructor or in aggregate initialization.
Definition: ExprCXX.h:1371
Expr * getExpr()
Get the initialization expression that will be used.
Definition: ExprCXX.cpp:1035
Represents a delete expression for memory deallocation and destructor calls, e.g.
Definition: ExprCXX.h:2493
A C++ dynamic_cast expression (C++ [expr.dynamic.cast]).
Definition: ExprCXX.h:478
Represents a new-expression for memory allocation and constructor calls, e.g: "new CXXNewExpr(foo)".
Definition: ExprCXX.h:2236
Represents a C++11 noexcept expression (C++ [expr.unary.noexcept]).
Definition: ExprCXX.h:4119
bool getValue() const
Definition: ExprCXX.h:4142
The null pointer literal (C++11 [lex.nullptr])
Definition: ExprCXX.h:765
Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
Definition: ExprCXX.h:2612
Represents a C++ struct/union/class.
Definition: DeclCXX.h:258
A rewritten comparison expression that was originally written using operator syntax.
Definition: ExprCXX.h:283
Expr * getSemanticForm()
Get an equivalent semantic form for this expression.
Definition: ExprCXX.h:301
An expression "T()" which creates a value-initialized rvalue of type T, which is a non-class type.
Definition: ExprCXX.h:2177
Represents the this expression in C++.
Definition: ExprCXX.h:1148
A C++ throw-expression (C++ [except.throw]).
Definition: ExprCXX.h:1202
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2820
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition: Expr.cpp:1590
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition: Expr.h:3483
path_iterator path_begin()
Definition: Expr.h:3553
CastKind getCastKind() const
Definition: Expr.h:3527
bool changesVolatileQualification() const
Return.
Definition: Expr.h:3612
path_iterator path_end()
Definition: Expr.h:3554
Expr * getSubExpr()
Definition: Expr.h:3533
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
bool isOne() const
isOne - Test whether the quantity equals one.
Definition: CharUnits.h:125
unsigned getValue() const
Definition: Expr.h:1610
ChooseExpr - GNU builtin-in function __builtin_choose_expr.
Definition: Expr.h:4558
Expr * getChosenSubExpr() const
getChosenSubExpr - Return the subexpression chosen according to the condition.
Definition: Expr.h:4594
Represents a 'co_await' expression.
Definition: ExprCXX.h:5175
bool hasProfileClangInstr() const
Check if Clang profile instrumenation is on.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:111
static Address invalid()
Definition: Address.h:153
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:220
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:184
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:241
bool isValid() const
Definition: Address.h:154
A scoped helper to set the current debug location to the specified location or preferred location of ...
Definition: CGDebugInfo.h:824
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
Definition: CGDebugInfo.h:864
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
Definition: CGDebugInfo.h:881
Address CreateGEP(CodeGenFunction &CGF, Address Addr, llvm::Value *Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:292
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition: CGBuilder.h:345
virtual llvm::Constant * EmitNullMemberPointer(const MemberPointerType *MPT)
Create a null member pointer of the given type.
Definition: CGCXXABI.cpp:105
virtual llvm::Value * EmitMemberPointerIsNotNull(CodeGenFunction &CGF, llvm::Value *MemPtr, const MemberPointerType *MPT)
Determine if a member pointer is non-null. Returns an i1.
Definition: CGCXXABI.cpp:97
virtual llvm::Value * EmitMemberPointerComparison(CodeGenFunction &CGF, llvm::Value *L, llvm::Value *R, const MemberPointerType *MPT, bool Inequality)
Emit a comparison between two member pointers. Returns an i1.
Definition: CGCXXABI.cpp:87
virtual llvm::Value * EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *Src)
Perform a derived-to-base, base-to-derived, or bitcast member pointer conversion.
Definition: CGCXXABI.cpp:74
void addHeapAllocSiteMetadata(llvm::CallBase *CallSite, QualType AllocatedTy, SourceLocation Loc)
Add heapallocsite metadata for MSAllocator calls.
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS)
Checks if the provided LVal is lastprivate conditional and emits the code to update the value of the ...
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * EmitARCExtendBlockObject(const Expr *expr)
static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts=false)
ContainsLabel - Return true if the statement contains a label in it.
CurrentSourceLocExprScope CurSourceLocExprScope
Source location information about the default argument or member initializer expression we're evaluat...
void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock, llvm::BasicBlock *FalseBlock, uint64_t TrueCount, Stmt::Likelihood LH=Stmt::LH_None, const Expr *ConditionalOp=nullptr)
EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g.
bool sanitizePerformTypeCheck() const
Whether any type-checking sanitizers are enabled.
SanitizerSet SanOpts
Sanitizers enabled for this function.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
LValue EmitScalarCompoundAssignWithComplex(const CompoundAssignOperator *E, llvm::Value *&Result)
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
static bool hasScalarEvaluationKind(QualType T)
LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E)
void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index, QualType IndexType, bool Accessed)
Emit a check that Base points into an array object, which we can access at index Index.
void EmitBitfieldConversionCheck(llvm::Value *Src, QualType SrcType, llvm::Value *Dst, QualType DstType, const CGBitFieldInfo &Info, SourceLocation Loc)
Emit a check that an [implicit] conversion of a bitfield.
llvm::BlockAddress * GetAddrOfLabel(const LabelDecl *L)
RawAddress CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
VlaSizePair getVLASize(const VariableArrayType *vla)
Returns an LLVM value that corresponds to the size, in non-variably-sized elements,...
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull, CFITypeCheckKind TCK, SourceLocation Loc)
Derived is the presumed address of an object of type T after a cast.
llvm::Value * EmitARCStoreWeak(Address addr, llvm::Value *value, bool ignored)
llvm::Value * getAsNaturalPointerTo(Address Addr, QualType PointeeType)
void EmitVariablyModifiedType(QualType Ty)
EmitVLASize - Capture all the sizes for the VLA expressions in the given variably-modified type and s...
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
const LangOptions & getLangOpts() const
llvm::Value * EmitObjCProtocolExpr(const ObjCProtocolExpr *E)
llvm::Value * EmitObjCStringLiteral(const ObjCStringLiteral *E)
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
static bool isInstrumentedCondition(const Expr *C)
isInstrumentedCondition - Determine whether the given condition is an instrumentable condition (i....
llvm::Value * EmitObjCBoxedExpr(const ObjCBoxedExpr *E)
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
@ TCK_DowncastPointer
Checking the operand of a static_cast to a derived pointer type.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
void SetDivFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
RValue EmitObjCMessageExpr(const ObjCMessageExpr *E, ReturnValueSlot Return=ReturnValueSlot())
void EmitIgnoredExpr(const Expr *E)
EmitIgnoredExpr - Emit an expression in a context which ignores the result.
llvm::Type * ConvertTypeForMem(QualType T)
llvm::Value * EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E)
LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK)
Same as EmitLValue but additionally we generate checking code to guard against undefined behavior.
llvm::Value * EmitObjCArrayLiteral(const ObjCArrayLiteral *E)
void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, llvm::Value **Result=nullptr)
EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints as EmitStoreThroughLValue.
llvm::Value * EmitObjCConsumeObject(QualType T, llvm::Value *Ptr)
ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr)
const TargetInfo & getTarget() const
std::pair< LValue, llvm::Value * > EmitARCStoreAutoreleasing(const BinaryOperator *e)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
std::pair< LValue, llvm::Value * > EmitARCStoreUnsafeUnretained(const BinaryOperator *e, bool ignored)
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerMask > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
void maybeResetMCDCCondBitmap(const Expr *E)
Zero-init the MCDC temp value.
uint64_t getCurrentProfileCount()
Get the profiler's current count.
SmallVector< const BinaryOperator *, 16 > MCDCLogOpStack
Stack to track the Logical Operator recursion nest for MC/DC.
RValue EmitCoyieldExpr(const CoyieldExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
static bool ShouldNullCheckClassCastValue(const CastExpr *Cast)
llvm::Value * EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified complex type to the specified destination type,...
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block,...
llvm::Value * EmitARCReclaimReturnedObject(const Expr *e, bool allowUnsafeClaim)
void EmitCXXDeleteExpr(const CXXDeleteExpr *E)
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * emitBoolVecConversion(llvm::Value *SrcVec, unsigned NumElementsDst, const llvm::Twine &Name="")
Address GetAddressOfDerivedClass(Address Value, const CXXRecordDecl *Derived, CastExpr::path_const_iterator PathBegin, CastExpr::path_const_iterator PathEnd, bool NullCheckValue)
Address EmitCompoundStmt(const CompoundStmt &S, bool GetLast=false, AggValueSlot AVS=AggValueSlot::ignored())
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e, AggValueSlot slot=AggValueSlot::ignored())
llvm::Value * EmitObjCSelectorExpr(const ObjCSelectorExpr *E)
llvm::CallInst * EmitNounwindRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitBuiltinAvailable(const VersionTuple &Version)
llvm::Value * EmitARCStoreStrong(LValue lvalue, llvm::Value *value, bool resultIgnored)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
llvm::Value * EmitBlockLiteral(const BlockExpr *)
Emit block literal.
void EmitNullabilityCheck(LValue LHS, llvm::Value *RHS, SourceLocation Loc)
Given an assignment *LHS = RHS, emit a test that checks if RHS is nonnull, if LHS is marked _Nonnull.
void maybeUpdateMCDCCondBitmap(const Expr *E, llvm::Value *Val)
Update the MCDC temp value with the condition's evaluated result.
void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint=true)
bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result, bool AllowLabels=false)
ConstantFoldsToSimpleInteger - If the specified expression does not fold to a constant,...
ComplexPairTy EmitPromotedValue(ComplexPairTy result, QualType PromotionType)
llvm::Value * EmitPromotedScalarExpr(const Expr *E, QualType PromotionType)
void maybeUpdateMCDCTestVectorBitmap(const Expr *E)
Increment the profiler's counter for the given expression by StepV.
llvm::Type * ConvertType(QualType T)
Address EmitVAArg(VAArgExpr *VE, Address &VAListAddr)
Generate code to get an argument from the passed in pointer and update it accordingly.
bool IsSanitizerScope
True if CodeGen currently emits code implementing sanitizer checks.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
RValue EmitCoawaitExpr(const CoawaitExpr &E, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
llvm::Value * EmitWithOriginalRHSBitfieldAssignment(const BinaryOperator *E, llvm::Value **Previous, QualType *SrcType)
Retrieve the implicit cast expression of the rhs in a binary operator expression by passing pointers ...
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
llvm::Value * EmitDynamicCast(Address V, const CXXDynamicCastExpr *DCE)
llvm::Value * EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty)
uint64_t getProfileCount(const Stmt *S)
Get the profiler's count for the given statement.
llvm::Value * EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, bool isInc, bool isPre)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
llvm::Value * LoadCXXThis()
LoadCXXThis - Load the value of 'this'.
llvm::Value * getArrayInitIndex()
Get the index of the current ArrayInitLoopExpr, if any.
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E)
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
RValue EmitCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue=ReturnValueSlot())
llvm::Value * emitScalarConstant(const ConstantEmission &Constant, Expr *E)
void EnsureInsertPoint()
EnsureInsertPoint - Ensure that an insertion point is defined so that emitted IR has a place to go.
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
void incrementProfileCounter(const Stmt *S, llvm::Value *StepV=nullptr)
Increment the profiler's counter for the given statement by StepV.
llvm::Value * EmitARCRetainScalarExpr(const Expr *expr)
llvm::Value * EmitCXXNewExpr(const CXXNewExpr *E)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitAtomicExpr(AtomicExpr *E)
This class organizes the cross-function state that is used while generating LLVM code.
void EmitExplicitCastExprType(const ExplicitCastExpr *E, CodeGenFunction *CGF=nullptr)
Emit type info if type of an expression is a variably modified type.
Definition: CGExpr.cpp:1241
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * getNullPointer(llvm::PointerType *T, QualType QT)
Get target specific null pointer.
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
llvm::Constant * getMemberPointerConstant(const UnaryOperator *e)
const llvm::DataLayout & getDataLayout() const
CGCXXABI & getCXXABI() const
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
llvm::Value * createOpenCLIntToSamplerConversion(const Expr *E, CodeGenFunction &CGF)
llvm::Constant * EmitNullConstant(QualType T)
Return the result of value-initializing the given type, i.e.
LangAS GetGlobalConstantAddressSpace() const
Return the AST address space of constant literal, which is used to emit the constant literal as globa...
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys=std::nullopt)
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
LValue - This represents an lvalue references.
Definition: CGValue.h:181
bool isBitField() const
Definition: CGValue.h:283
bool isVolatileQualified() const
Definition: CGValue.h:288
void setTBAAInfo(TBAAAccessInfo Info)
Definition: CGValue.h:339
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:370
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:432
static RValue get(llvm::Value *V)
Definition: CGValue.h:97
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:70
An abstract representation of an aligned address.
Definition: Address.h:41
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Complex values, per C99 6.2.5p11.
Definition: Type.h:3086
CompoundAssignOperator - For compound assignments (e.g.
Definition: Expr.h:4088
QualType getComputationLHSType() const
Definition: Expr.h:4122
QualType getComputationResultType() const
Definition: Expr.h:4125
CompoundLiteralExpr - [C99 6.5.2.5].
Definition: Expr.h:3413
Represents the specialization of a concept - evaluates to a prvalue of type bool.
Definition: ExprConcepts.h:42
bool isSatisfied() const
Whether or not the concept with the given arguments was satisfied when the expression was created.
Definition: ExprConcepts.h:124
ConstantExpr - An expression that occurs in a constant context and optionally the result of evaluatin...
Definition: Expr.h:1072
Represents a concrete matrix type with constant number of rows and columns.
Definition: Type.h:4167
unsigned getNumRows() const
Returns the number of rows in the matrix.
Definition: Type.h:4185
ConvertVectorExpr - Clang builtin function __builtin_convertvector This AST node provides support for...
Definition: Expr.h:4499
Expr * getSrcExpr() const
getSrcExpr - Return the Expr to be converted.
Definition: Expr.h:4519
Represents a 'co_yield' expression.
Definition: ExprCXX.h:5256
specific_decl_iterator - Iterates over a subrange of declarations stored in a DeclContext,...
Definition: DeclBase.h:2342
A reference to a declared variable, function, enum, etc.
Definition: Expr.h:1260
T * getAttr() const
Definition: DeclBase.h:579
ExplicitCastExpr - An explicit cast written in the source code.
Definition: Expr.h:3730
Represents an expression – generally a full-expression – that introduces cleanups to be run at the en...
Definition: ExprCXX.h:3467
This represents one expression.
Definition: Expr.h:110
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isGLValue() const
Definition: Expr.h:280
@ SE_AllowSideEffects
Allow any unmodeled side effect.
Definition: Expr.h:671
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx, SmallVectorImpl< PartialDiagnosticAt > *Diag=nullptr) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3055
bool isEvaluatable(const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects) const
isEvaluatable - Call EvaluateAsRValue to see if this expression can be constant folded without side-e...
bool isPRValue() const
Definition: Expr.h:278
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition: Expr.cpp:3039
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
bool refersToBitField() const
Returns true if this expression is a gl-value that potentially refers to a bit-field.
Definition: Expr.h:469
QualType getType() const
Definition: Expr.h:142
An expression trait intrinsic.
Definition: ExprCXX.h:2919
ExtVectorType - Extended vector type.
Definition: Type.h:4061
Represents a member of a struct/union/class.
Definition: Decl.h:3057
llvm::APFloat getValue() const
Definition: Expr.h:1647
const Expr * getSubExpr() const
Definition: Expr.h:1052
GNUNullExpr - Implements the GNU __null extension, which is a name for a null pointer constant that h...
Definition: Expr.h:4633
Represents a C11 generic selection.
Definition: Expr.h:5725
ImplicitCastExpr - Allows us to explicitly represent implicit type conversions, which have no direct ...
Definition: Expr.h:3655
Represents an implicitly-generated value initialization of an object of a given type.
Definition: Expr.h:5600
Describes an C or C++ initializer list.
Definition: Expr.h:4847
unsigned getNumInits() const
Definition: Expr.h:4877
bool hadArrayRangeDesignator() const
Definition: Expr.h:5024
const Expr * getInit(unsigned Init) const
Definition: Expr.h:4893
bool isSignedOverflowDefined() const
Definition: LangOptions.h:616
std::string OverflowHandler
The name of the handler function to be called when -ftrapv is specified.
Definition: LangOptions.h:506
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition: ExprCXX.h:4710
MatrixSubscriptExpr - Matrix subscript expression for the MatrixType extension.
Definition: Expr.h:2742
Represents a matrix type, as defined in the Matrix Types clang extensions.
Definition: Type.h:4131
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition: Expr.h:3172
Expr * getBase() const
Definition: Expr.h:3249
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition: Type.h:3460
ObjCArrayLiteral - used for objective-c array containers; as in: @["Hello", NSApp,...
Definition: ExprObjC.h:191
A runtime availability query.
Definition: ExprObjC.h:1696
VersionTuple getVersion() const
Definition: ExprObjC.h:1719
ObjCBoolLiteralExpr - Objective-C Boolean Literal.
Definition: ExprObjC.h:87
ObjCBoxedExpr - used for generalized expression boxing.
Definition: ExprObjC.h:127
ObjCDictionaryLiteral - AST node to represent objective-c dictionary literals; as in:"name" : NSUserN...
Definition: ExprObjC.h:309
ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
Definition: ExprObjC.h:1491
Expr * getBase() const
Definition: ExprObjC.h:1516
SourceLocation getExprLoc() const LLVM_READONLY
Definition: ExprObjC.h:1539
ObjCIvarRefExpr - A reference to an ObjC instance variable.
Definition: ExprObjC.h:549
An expression that sends a message to the given Objective-C object or class.
Definition: ExprObjC.h:945
const ObjCMethodDecl * getMethodDecl() const
Definition: ExprObjC.h:1356
QualType getReturnType() const
Definition: DeclObjC.h:329
Represents a pointer to an Objective C object.
Definition: Type.h:7008
const ObjCObjectType * getObjectType() const
Gets the type pointed to by this ObjC pointer.
Definition: Type.h:7045
ObjCProtocolExpr used for protocol expression in Objective-C.
Definition: ExprObjC.h:505
ObjCSelectorExpr used for @selector in Objective-C.
Definition: ExprObjC.h:455
ObjCStringLiteral, used for Objective-C string literals i.e.
Definition: ExprObjC.h:51
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition: Expr.h:2465
Expr * getIndexExpr(unsigned Idx)
Definition: Expr.h:2526
const OffsetOfNode & getComponent(unsigned Idx) const
Definition: Expr.h:2512
TypeSourceInfo * getTypeSourceInfo() const
Definition: Expr.h:2505
unsigned getNumComponents() const
Definition: Expr.h:2522
Helper class for OffsetOfExpr.
Definition: Expr.h:2359
unsigned getArrayExprIndex() const
For an array element node, returns the index into the array of expressions.
Definition: Expr.h:2417
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition: Expr.h:2423
@ Array
An index into an array.
Definition: Expr.h:2364
@ Identifier
A field in a dependent type, known only by its name.
Definition: Expr.h:2368
@ Field
A field.
Definition: Expr.h:2366
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition: Expr.h:2371
Kind getKind() const
Determine what kind of offsetof node this is.
Definition: Expr.h:2413
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition: Expr.h:2433
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition: Expr.h:1168
SourceLocation getExprLoc() const LLVM_READONLY
Definition: Expr.h:1198
Expr * getSelectedExpr() const
Definition: ExprCXX.h:4442
ParenExpr - This represents a parethesized expression, e.g.
Definition: Expr.h:2130
const Expr * getSubExpr() const
Definition: Expr.h:2145
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3139
QualType getPointeeType() const
Definition: Type.h:3149
PseudoObjectExpr - An expression which accesses a pseudo-object l-value.
Definition: Expr.h:6305
A (possibly-)qualified type.
Definition: Type.h:940
bool mayBeDynamicClass() const
Returns true if it is a class and it might be dynamic.
Definition: Type.cpp:95
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:1007
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition: Type.h:7359
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:7485
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition: Type.h:1432
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: Type.h:7560
QualType getCanonicalType() const
Definition: Type.h:7411
bool UseExcessPrecision(const ASTContext &Ctx)
Definition: Type.cpp:1561
bool mayBeNotDynamicClass() const
Returns true if it is not a class or if the class might not be dynamic.
Definition: Type.cpp:100
bool isCanonical() const
Definition: Type.h:7416
@ OCL_Strong
Assigning into this object requires the old value to be released and the new value to be retained.
Definition: Type.h:347
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition: Type.h:340
@ OCL_None
There is no lifetime qualification on this type.
Definition: Type.h:336
@ OCL_Weak
Reading or writing from this object requires a barrier call.
Definition: Type.h:350
@ OCL_Autoreleasing
Assigning into this object requires a lifetime extension.
Definition: Type.h:353
Represents a struct/union/class.
Definition: Decl.h:4168
field_iterator field_end() const
Definition: Decl.h:4377
field_iterator field_begin() const
Definition: Decl.cpp:5069
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:5549
C++2a [expr.prim.req]: A requires-expression provides a concise way to express requirements on templa...
Definition: ExprConcepts.h:510
bool isSatisfied() const
Whether or not the requires clause is satisfied.
Definition: ExprConcepts.h:562
std::string ComputeName(ASTContext &Context) const
Definition: Expr.cpp:592
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Sema - This implements semantic analysis and AST building for C.
Definition: Sema.h:451
ShuffleVectorExpr - clang-specific builtin-in function __builtin_shufflevector.
Definition: Expr.h:4431
llvm::APSInt getShuffleMaskIdx(const ASTContext &Ctx, unsigned N) const
Definition: Expr.h:4482
unsigned getNumSubExprs() const
getNumSubExprs - Return the size of the SubExprs array.
Definition: Expr.h:4465
Expr * getExpr(unsigned Index)
getExpr - Return the Expr at the specified index.
Definition: Expr.h:4471
Represents an expression that computes the length of a parameter pack.
Definition: ExprCXX.h:4251
unsigned getPackLength() const
Retrieve the length of the parameter pack.
Definition: ExprCXX.h:4326
Represents a function call to one of __builtin_LINE(), __builtin_COLUMN(), __builtin_FUNCTION(),...
Definition: Expr.h:4727
APValue EvaluateInContext(const ASTContext &Ctx, const Expr *DefaultExpr) const
Return the result of evaluating this SourceLocExpr in the specified (and possibly null) default argum...
Definition: Expr.cpp:2273
SourceLocation getLocation() const
Definition: Expr.h:4771
Encodes a location in the source.
StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
Definition: Expr.h:4383
CompoundStmt * getSubStmt()
Definition: Expr.h:4400
RetTy Visit(PTR(Stmt) S, ParamTys... P)
Definition: StmtVisitor.h:44
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
Definition: StmtVisitor.h:185
Stmt - This represents one statement.
Definition: Stmt.h:84
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:338
Represents a reference to a non-type template parameter that has been substituted with a template arg...
Definition: ExprCXX.h:4466
virtual bool useFP16ConversionIntrinsics() const
Check whether llvm intrinsics such as llvm.convert.to.fp16 should be used to convert to and from __fp...
Definition: TargetInfo.h:992
VersionTuple getPlatformMinVersion() const
Retrieve the minimum desired version of the platform, to which the program should be compiled.
Definition: TargetInfo.h:1648
const llvm::fltSemantics & getHalfFormat() const
Definition: TargetInfo.h:764
const llvm::fltSemantics & getBFloat16Format() const
Definition: TargetInfo.h:774
const llvm::fltSemantics & getLongDoubleFormat() const
Definition: TargetInfo.h:785
const llvm::fltSemantics & getFloat128Format() const
Definition: TargetInfo.h:793
const llvm::fltSemantics & getIbm128Format() const
Definition: TargetInfo.h:801
QualType getType() const
Return the type wrapped by this type source info.
Definition: Type.h:7341
A type trait used in the implementation of various C++11 and Library TR1 trait templates.
Definition: ExprCXX.h:2763
bool getValue() const
Definition: ExprCXX.h:2804
bool isVoidType() const
Definition: Type.h:7905
bool isBooleanType() const
Definition: Type.h:8033
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2156
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition: Type.cpp:2206
bool isArithmeticType() const
Definition: Type.cpp:2270
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:7945
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8193
bool isReferenceType() const
Definition: Type.h:7624
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition: Type.cpp:1856
bool isSveVLSBuiltinType() const
Determines if this is a sizeless type supported by the 'arm_sve_vector_bits' type attribute,...
Definition: Type.cpp:2488
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:695
bool isExtVectorType() const
Definition: Type.h:7722
bool isExtVectorBoolType() const
Definition: Type.h:7726
bool isOCLIntelSubgroupAVCType() const
Definition: Type.h:7850
bool isAnyComplexType() const
Definition: Type.h:7714
bool isFixedPointType() const
Return true if this is a fixed point type according to ISO/IEC JTC1 SC22 WG14 N1169.
Definition: Type.h:7958
bool isHalfType() const
Definition: Type.h:7909
bool hasSignedIntegerRepresentation() const
Determine whether this type has an signed integer representation of some sort, e.g....
Definition: Type.cpp:2175
bool isQueueT() const
Definition: Type.h:7821
bool isMatrixType() const
Definition: Type.h:7732
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition: Type.h:2671
bool isEventT() const
Definition: Type.h:7813
bool isFunctionType() const
Definition: Type.h:7608
bool isVectorType() const
Definition: Type.h:7718
bool isRealFloatingType() const
Floating point categories.
Definition: Type.cpp:2255
bool isFloatingType() const
Definition: Type.cpp:2238
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition: Type.cpp:2185
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8126
bool isNullPtrType() const
Definition: Type.h:7938
UnaryExprOrTypeTraitExpr - expression with either a type or (unevaluated) expression operand.
Definition: Expr.h:2568
QualType getTypeOfArgument() const
Gets the argument type, or the type of the argument expression, whichever is appropriate.
Definition: Expr.h:2637
bool isArgumentType() const
Definition: Expr.h:2610
UnaryExprOrTypeTrait getKind() const
Definition: Expr.h:2600
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition: Expr.h:2183
SourceLocation getExprLoc() const
Definition: Expr.h:2311
Expr * getSubExpr() const
Definition: Expr.h:2228
Opcode getOpcode() const
Definition: Expr.h:2223
FPOptions getFPFeaturesInEffect(const LangOptions &LO) const
Get the FP features status of this operator.
Definition: Expr.h:2338
bool canOverflow() const
Returns true if the unary operator can cause an overflow.
Definition: Expr.h:2241
Represents a call to the builtin function __builtin_va_arg.
Definition: Expr.h:4667
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition: Decl.h:706
QualType getType() const
Definition: Decl.h:717
QualType getType() const
Definition: Value.cpp:234
Represents a C array with a specified size that is not an integer-constant-expression.
Definition: Type.h:3747
Represents a GCC generic vector type.
Definition: Type.h:3969
VectorKind getVectorKind() const
Definition: Type.h:3989
QualType getElementType() const
Definition: Type.h:3983
Defines the clang::TargetInfo interface.
const AstTypeMatcher< PointerType > pointerType
Matches pointer types, but does not match Objective-C object pointer types.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::ArgumentAdaptingMatcherFunc< internal::HasMatcher > has
Matches AST nodes that have child AST nodes that match the provided matcher.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
llvm::APFloat APFloat
Definition: Floating.h:23
llvm::APInt APInt
Definition: Integral.h:29
bool LE(InterpState &S, CodePtr OpPC)
Definition: Interp.h:882
bool Zero(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1873
bool GE(InterpState &S, CodePtr OpPC)
Definition: Interp.h:897
The JSON file list parser is used to communicate input to InstallAPI.
BinaryOperatorKind
@ Result
The result type of a method or function.
CastKind
CastKind - The kind of operation required for a conversion.
const FunctionProtoType * T
@ Generic
not a target-specific vector type
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition: Dominators.h:30
cl::opt< bool > EnableSingleByteCoverage
#define true
Definition: stdbool.h:25
#define false
Definition: stdbool.h:26
llvm::Value * TotalOffset
llvm::Value * OffsetOverflows
Structure with information about how a bitfield should be accessed.
unsigned Size
The total size of the bit-field, in bits.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
static TBAAAccessInfo getMayAliasInfo()
Definition: CodeGenTBAA.h:63
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:642
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:644
bool has(SanitizerMask K) const
Check if a certain (single) sanitizer is enabled.
Definition: Sanitizers.h:159
bool hasOneOf(SanitizerMask K) const
Check if one or more sanitizers are enabled.
Definition: Sanitizers.h:165