clang 20.0.0git
InterpBuiltin.cpp
Go to the documentation of this file.
1//===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8#include "../ExprConstShared.h"
9#include "Boolean.h"
10#include "Compiler.h"
11#include "EvalEmitter.h"
12#include "Interp.h"
14#include "PrimType.h"
15#include "clang/AST/OSLog.h"
20#include "llvm/Support/SipHash.h"
21
22namespace clang {
23namespace interp {
24
25static unsigned callArgSize(const InterpState &S, const CallExpr *C) {
26 unsigned O = 0;
27
28 for (const Expr *E : C->arguments()) {
29 O += align(primSize(*S.getContext().classify(E)));
30 }
31
32 return O;
33}
34
35template <typename T>
36static T getParam(const InterpFrame *Frame, unsigned Index) {
37 assert(Frame->getFunction()->getNumParams() > Index);
38 unsigned Offset = Frame->getFunction()->getParamOffset(Index);
39 return Frame->getParam<T>(Offset);
40}
41
42static APSInt getAPSIntParam(const InterpFrame *Frame, unsigned Index) {
43 APSInt R;
44 unsigned Offset = Frame->getFunction()->getParamOffset(Index);
45 INT_TYPE_SWITCH(Frame->getFunction()->getParamType(Index),
46 R = Frame->getParam<T>(Offset).toAPSInt());
47 return R;
48}
49
51 const TargetInfo &TI = S.getASTContext().getTargetInfo();
52 unsigned IntWidth = TI.getIntWidth();
53
54 if (IntWidth == 32)
55 return PT_Sint32;
56 else if (IntWidth == 16)
57 return PT_Sint16;
58 llvm_unreachable("Int isn't 16 or 32 bit?");
59}
60
62 const TargetInfo &TI = S.getASTContext().getTargetInfo();
63 unsigned LongWidth = TI.getLongWidth();
64
65 if (LongWidth == 64)
66 return PT_Sint64;
67 else if (LongWidth == 32)
68 return PT_Sint32;
69 else if (LongWidth == 16)
70 return PT_Sint16;
71 llvm_unreachable("long isn't 16, 32 or 64 bit?");
72}
73
74/// Peek an integer value from the stack into an APSInt.
75static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset = 0) {
76 if (Offset == 0)
77 Offset = align(primSize(T));
78
79 APSInt R;
80 INT_TYPE_SWITCH(T, R = Stk.peek<T>(Offset).toAPSInt());
81
82 return R;
83}
84
85/// Pushes \p Val on the stack as the type given by \p QT.
86static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
89 std::optional<PrimType> T = S.getContext().classify(QT);
90 assert(T);
91
92 unsigned BitWidth = S.getASTContext().getTypeSize(QT);
94 int64_t V = Val.getSExtValue();
95 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
96 } else {
98 uint64_t V = Val.getZExtValue();
99 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
100 }
101}
102
103template <typename T>
104static void pushInteger(InterpState &S, T Val, QualType QT) {
105 if constexpr (std::is_same_v<T, APInt>)
106 pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
107 else if constexpr (std::is_same_v<T, APSInt>)
108 pushInteger(S, Val, QT);
109 else
110 pushInteger(S,
111 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
112 std::is_signed_v<T>),
113 !std::is_signed_v<T>),
114 QT);
115}
116
117static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value) {
119 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
120}
121
122static bool retPrimValue(InterpState &S, CodePtr OpPC,
123 std::optional<PrimType> &T) {
124 if (!T)
125 return RetVoid(S, OpPC);
126
127#define RET_CASE(X) \
128 case X: \
129 return Ret<X>(S, OpPC);
130 switch (*T) {
145 default:
146 llvm_unreachable("Unsupported return type for builtin function");
147 }
148#undef RET_CASE
149}
150
152 unsigned ID) {
153 auto Loc = S.Current->getSource(OpPC);
154 if (S.getLangOpts().CPlusPlus11)
155 S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
156 << /*isConstexpr=*/0 << /*isConstructor=*/0
157 << ("'" + S.getASTContext().BuiltinInfo.getName(ID) + "'").str();
158 else
159 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
160}
161
163 const InterpFrame *Frame,
164 const CallExpr *Call) {
165 unsigned Depth = S.Current->getDepth();
166 auto isStdCall = [](const FunctionDecl *F) -> bool {
167 return F && F->isInStdNamespace() && F->getIdentifier() &&
168 F->getIdentifier()->isStr("is_constant_evaluated");
169 };
170 const InterpFrame *Caller = Frame->Caller;
171 // The current frame is the one for __builtin_is_constant_evaluated.
172 // The one above that, potentially the one for std::is_constant_evaluated().
173 if (S.inConstantContext() && !S.checkingPotentialConstantExpression() &&
174 S.getEvalStatus().Diag &&
175 (Depth == 1 || (Depth == 2 && isStdCall(Caller->getCallee())))) {
176 if (Caller->Caller && isStdCall(Caller->getCallee())) {
177 const Expr *E = Caller->Caller->getExpr(Caller->getRetPC());
178 S.report(E->getExprLoc(),
179 diag::warn_is_constant_evaluated_always_true_constexpr)
180 << "std::is_constant_evaluated" << E->getSourceRange();
181 } else {
182 const Expr *E = Frame->Caller->getExpr(Frame->getRetPC());
183 S.report(E->getExprLoc(),
184 diag::warn_is_constant_evaluated_always_true_constexpr)
185 << "__builtin_is_constant_evaluated" << E->getSourceRange();
186 }
187 }
188
189 S.Stk.push<Boolean>(Boolean::from(S.inConstantContext()));
190 return true;
191}
192
194 const InterpFrame *Frame,
195 const Function *Func, const CallExpr *Call) {
196 unsigned ID = Func->getBuiltinID();
197 const Pointer &A = getParam<Pointer>(Frame, 0);
198 const Pointer &B = getParam<Pointer>(Frame, 1);
199
200 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp)
201 diagnoseNonConstexprBuiltin(S, OpPC, ID);
202
203 uint64_t Limit = ~static_cast<uint64_t>(0);
204 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp)
205 Limit = peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)))
206 .getZExtValue();
207
208 if (Limit == 0) {
209 pushInteger(S, 0, Call->getType());
210 return true;
211 }
212
213 if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
214 return false;
215
216 if (A.isDummy() || B.isDummy())
217 return false;
218
219 assert(A.getFieldDesc()->isPrimitiveArray());
220 assert(B.getFieldDesc()->isPrimitiveArray());
221
222 unsigned IndexA = A.getIndex();
223 unsigned IndexB = B.getIndex();
224 int32_t Result = 0;
225 uint64_t Steps = 0;
226 for (;; ++IndexA, ++IndexB, ++Steps) {
227
228 if (Steps >= Limit)
229 break;
230 const Pointer &PA = A.atIndex(IndexA);
231 const Pointer &PB = B.atIndex(IndexB);
232 if (!CheckRange(S, OpPC, PA, AK_Read) ||
233 !CheckRange(S, OpPC, PB, AK_Read)) {
234 return false;
235 }
236 uint8_t CA = PA.deref<uint8_t>();
237 uint8_t CB = PB.deref<uint8_t>();
238
239 if (CA > CB) {
240 Result = 1;
241 break;
242 } else if (CA < CB) {
243 Result = -1;
244 break;
245 }
246 if (CA == 0 || CB == 0)
247 break;
248 }
249
250 pushInteger(S, Result, Call->getType());
251 return true;
252}
253
255 const InterpFrame *Frame,
256 const Function *Func, const CallExpr *Call) {
257 unsigned ID = Func->getBuiltinID();
258 const Pointer &StrPtr = getParam<Pointer>(Frame, 0);
259
260 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
261 diagnoseNonConstexprBuiltin(S, OpPC, ID);
262
263 if (!CheckArray(S, OpPC, StrPtr))
264 return false;
265
266 if (!CheckLive(S, OpPC, StrPtr, AK_Read))
267 return false;
268
269 if (!CheckDummy(S, OpPC, StrPtr, AK_Read))
270 return false;
271
272 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
273 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
274
275 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
276 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
277 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
278 }
279
280 size_t Len = 0;
281 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
282 const Pointer &ElemPtr = StrPtr.atIndex(I);
283
284 if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
285 return false;
286
287 uint32_t Val;
288 switch (ElemSize) {
289 case 1:
290 Val = ElemPtr.deref<uint8_t>();
291 break;
292 case 2:
293 Val = ElemPtr.deref<uint16_t>();
294 break;
295 case 4:
296 Val = ElemPtr.deref<uint32_t>();
297 break;
298 default:
299 llvm_unreachable("Unsupported char size");
300 }
301 if (Val == 0)
302 break;
303 }
304
305 pushInteger(S, Len, Call->getType());
306
307 return true;
308}
309
311 const InterpFrame *Frame, const Function *F,
312 bool Signaling) {
313 const Pointer &Arg = getParam<Pointer>(Frame, 0);
314
315 if (!CheckLoad(S, OpPC, Arg))
316 return false;
317
318 assert(Arg.getFieldDesc()->isPrimitiveArray());
319
320 // Convert the given string to an integer using StringRef's API.
321 llvm::APInt Fill;
322 std::string Str;
323 assert(Arg.getNumElems() >= 1);
324 for (unsigned I = 0;; ++I) {
325 const Pointer &Elem = Arg.atIndex(I);
326
327 if (!CheckLoad(S, OpPC, Elem))
328 return false;
329
330 if (Elem.deref<int8_t>() == 0)
331 break;
332
333 Str += Elem.deref<char>();
334 }
335
336 // Treat empty strings as if they were zero.
337 if (Str.empty())
338 Fill = llvm::APInt(32, 0);
339 else if (StringRef(Str).getAsInteger(0, Fill))
340 return false;
341
342 const llvm::fltSemantics &TargetSemantics =
344
347 if (Signaling)
349 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
350 else
352 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
353 } else {
354 // Prior to IEEE 754-2008, architectures were allowed to choose whether
355 // the first bit of their significand was set for qNaN or sNaN. MIPS chose
356 // a different encoding to what became a standard in 2008, and for pre-
357 // 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
358 // sNaN. This is now known as "legacy NaN" encoding.
359 if (Signaling)
361 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
362 else
364 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
365 }
366
367 S.Stk.push<Floating>(Result);
368 return true;
369}
370
372 const InterpFrame *Frame, const Function *F) {
373 const llvm::fltSemantics &TargetSemantics =
375
376 S.Stk.push<Floating>(Floating::getInf(TargetSemantics));
377 return true;
378}
379
381 const InterpFrame *Frame,
382 const Function *F) {
383 const Floating &Arg1 = getParam<Floating>(Frame, 0);
384 const Floating &Arg2 = getParam<Floating>(Frame, 1);
385
386 APFloat Copy = Arg1.getAPFloat();
387 Copy.copySign(Arg2.getAPFloat());
388 S.Stk.push<Floating>(Floating(Copy));
389
390 return true;
391}
392
394 const InterpFrame *Frame, const Function *F,
395 bool IsNumBuiltin) {
396 const Floating &LHS = getParam<Floating>(Frame, 0);
397 const Floating &RHS = getParam<Floating>(Frame, 1);
398
400
401 if (IsNumBuiltin) {
402 Result = llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat());
403 } else {
404 // When comparing zeroes, return -0.0 if one of the zeroes is negative.
405 if (LHS.isZero() && RHS.isZero() && RHS.isNegative())
406 Result = RHS;
407 else if (LHS.isNan() || RHS < LHS)
408 Result = RHS;
409 else
410 Result = LHS;
411 }
412
413 S.Stk.push<Floating>(Result);
414 return true;
415}
416
418 const InterpFrame *Frame, const Function *Func,
419 bool IsNumBuiltin) {
420 const Floating &LHS = getParam<Floating>(Frame, 0);
421 const Floating &RHS = getParam<Floating>(Frame, 1);
422
424
425 if (IsNumBuiltin) {
426 Result = llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat());
427 } else {
428 // When comparing zeroes, return +0.0 if one of the zeroes is positive.
429 if (LHS.isZero() && RHS.isZero() && LHS.isNegative())
430 Result = RHS;
431 else if (LHS.isNan() || RHS > LHS)
432 Result = RHS;
433 else
434 Result = LHS;
435 }
436
437 S.Stk.push<Floating>(Result);
438 return true;
439}
440
441/// Defined as __builtin_isnan(...), to accommodate the fact that it can
442/// take a float, double, long double, etc.
443/// But for us, that's all a Floating anyway.
445 const InterpFrame *Frame, const Function *F,
446 const CallExpr *Call) {
447 const Floating &Arg = S.Stk.peek<Floating>();
448
449 pushInteger(S, Arg.isNan(), Call->getType());
450 return true;
451}
452
454 const InterpFrame *Frame,
455 const Function *F,
456 const CallExpr *Call) {
457 const Floating &Arg = S.Stk.peek<Floating>();
458
459 pushInteger(S, Arg.isSignaling(), Call->getType());
460 return true;
461}
462
464 const InterpFrame *Frame, const Function *F,
465 bool CheckSign, const CallExpr *Call) {
466 const Floating &Arg = S.Stk.peek<Floating>();
467 bool IsInf = Arg.isInf();
468
469 if (CheckSign)
470 pushInteger(S, IsInf ? (Arg.isNegative() ? -1 : 1) : 0, Call->getType());
471 else
472 pushInteger(S, Arg.isInf(), Call->getType());
473 return true;
474}
475
477 const InterpFrame *Frame,
478 const Function *F, const CallExpr *Call) {
479 const Floating &Arg = S.Stk.peek<Floating>();
480
481 pushInteger(S, Arg.isFinite(), Call->getType());
482 return true;
483}
484
486 const InterpFrame *Frame,
487 const Function *F, const CallExpr *Call) {
488 const Floating &Arg = S.Stk.peek<Floating>();
489
490 pushInteger(S, Arg.isNormal(), Call->getType());
491 return true;
492}
493
495 const InterpFrame *Frame,
496 const Function *F,
497 const CallExpr *Call) {
498 const Floating &Arg = S.Stk.peek<Floating>();
499
500 pushInteger(S, Arg.isDenormal(), Call->getType());
501 return true;
502}
503
505 const InterpFrame *Frame, const Function *F,
506 const CallExpr *Call) {
507 const Floating &Arg = S.Stk.peek<Floating>();
508
509 pushInteger(S, Arg.isZero(), Call->getType());
510 return true;
511}
512
514 const InterpFrame *Frame, const Function *F,
515 const CallExpr *Call) {
516 const Floating &Arg = S.Stk.peek<Floating>();
517
518 pushInteger(S, Arg.isNegative(), Call->getType());
519 return true;
520}
521
523 const InterpFrame *Frame,
524 const Function *F,
525 const CallExpr *Call) {
526 const Floating &RHS = S.Stk.peek<Floating>();
527 const Floating &LHS = S.Stk.peek<Floating>(align(2u * primSize(PT_Float)));
528 unsigned ID = F->getBuiltinID();
529
531 S,
532 [&] {
533 switch (ID) {
534 case Builtin::BI__builtin_isgreater:
535 return LHS > RHS;
536 case Builtin::BI__builtin_isgreaterequal:
537 return LHS >= RHS;
538 case Builtin::BI__builtin_isless:
539 return LHS < RHS;
540 case Builtin::BI__builtin_islessequal:
541 return LHS <= RHS;
542 case Builtin::BI__builtin_islessgreater: {
543 ComparisonCategoryResult cmp = LHS.compare(RHS);
544 return cmp == ComparisonCategoryResult::Less ||
546 }
547 case Builtin::BI__builtin_isunordered:
548 return LHS.compare(RHS) == ComparisonCategoryResult::Unordered;
549 default:
550 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
551 "comparison function");
552 }
553 }(),
554 Call->getType());
555 return true;
556}
557
558/// First parameter to __builtin_isfpclass is the floating value, the
559/// second one is an integral value.
561 const InterpFrame *Frame,
562 const Function *Func,
563 const CallExpr *Call) {
564 PrimType FPClassArgT = *S.getContext().classify(Call->getArg(1)->getType());
565 APSInt FPClassArg = peekToAPSInt(S.Stk, FPClassArgT);
566 const Floating &F =
567 S.Stk.peek<Floating>(align(primSize(FPClassArgT) + primSize(PT_Float)));
568
569 int32_t Result =
570 static_cast<int32_t>((F.classify() & FPClassArg).getZExtValue());
571 pushInteger(S, Result, Call->getType());
572
573 return true;
574}
575
576/// Five int values followed by one floating value.
578 const InterpFrame *Frame,
579 const Function *Func,
580 const CallExpr *Call) {
581 const Floating &Val = S.Stk.peek<Floating>();
582
583 unsigned Index;
584 switch (Val.getCategory()) {
585 case APFloat::fcNaN:
586 Index = 0;
587 break;
588 case APFloat::fcInfinity:
589 Index = 1;
590 break;
591 case APFloat::fcNormal:
592 Index = Val.isDenormal() ? 3 : 2;
593 break;
594 case APFloat::fcZero:
595 Index = 4;
596 break;
597 }
598
599 // The last argument is first on the stack.
600 assert(Index <= 4);
601 unsigned IntSize = primSize(getIntPrimType(S));
602 unsigned Offset =
603 align(primSize(PT_Float)) + ((1 + (4 - Index)) * align(IntSize));
604
605 APSInt I = peekToAPSInt(S.Stk, getIntPrimType(S), Offset);
606 pushInteger(S, I, Call->getType());
607 return true;
608}
609
610// The C standard says "fabs raises no floating-point exceptions,
611// even if x is a signaling NaN. The returned value is independent of
612// the current rounding direction mode." Therefore constant folding can
613// proceed without regard to the floating point settings.
614// Reference, WG14 N2478 F.10.4.3
616 const InterpFrame *Frame,
617 const Function *Func) {
618 const Floating &Val = getParam<Floating>(Frame, 0);
619
620 S.Stk.push<Floating>(Floating::abs(Val));
621 return true;
622}
623
625 const InterpFrame *Frame, const Function *Func,
626 const CallExpr *Call) {
627 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
628 APSInt Val = peekToAPSInt(S.Stk, ArgT);
629 if (Val ==
630 APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
631 return false;
632 if (Val.isNegative())
633 Val.negate();
634 pushInteger(S, Val, Call->getType());
635 return true;
636}
637
639 const InterpFrame *Frame,
640 const Function *Func,
641 const CallExpr *Call) {
642 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
643 APSInt Val = peekToAPSInt(S.Stk, ArgT);
644 pushInteger(S, Val.popcount(), Call->getType());
645 return true;
646}
647
649 const InterpFrame *Frame,
650 const Function *Func, const CallExpr *Call) {
651 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
652 APSInt Val = peekToAPSInt(S.Stk, ArgT);
653 pushInteger(S, Val.popcount() % 2, Call->getType());
654 return true;
655}
656
658 const InterpFrame *Frame,
659 const Function *Func, const CallExpr *Call) {
660 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
661 APSInt Val = peekToAPSInt(S.Stk, ArgT);
662 pushInteger(S, Val.getBitWidth() - Val.getSignificantBits(), Call->getType());
663 return true;
664}
665
667 const InterpFrame *Frame,
668 const Function *Func,
669 const CallExpr *Call) {
670 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
671 APSInt Val = peekToAPSInt(S.Stk, ArgT);
672 pushInteger(S, Val.reverseBits(), Call->getType());
673 return true;
674}
675
677 const InterpFrame *Frame,
678 const Function *Func,
679 const CallExpr *Call) {
680 // This is an unevaluated call, so there are no arguments on the stack.
681 assert(Call->getNumArgs() == 1);
682 const Expr *Arg = Call->getArg(0);
683
684 GCCTypeClass ResultClass =
686 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
687 pushInteger(S, ReturnVal, Call->getType());
688 return true;
689}
690
691// __builtin_expect(long, long)
692// __builtin_expect_with_probability(long, long, double)
694 const InterpFrame *Frame,
695 const Function *Func, const CallExpr *Call) {
696 // The return value is simply the value of the first parameter.
697 // We ignore the probability.
698 unsigned NumArgs = Call->getNumArgs();
699 assert(NumArgs == 2 || NumArgs == 3);
700
701 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
702 unsigned Offset = align(primSize(getLongPrimType(S))) * 2;
703 if (NumArgs == 3)
704 Offset += align(primSize(PT_Float));
705
706 APSInt Val = peekToAPSInt(S.Stk, ArgT, Offset);
707 pushInteger(S, Val, Call->getType());
708 return true;
709}
710
711/// rotateleft(value, amount)
713 const InterpFrame *Frame,
714 const Function *Func, const CallExpr *Call,
715 bool Right) {
716 PrimType AmountT = *S.getContext().classify(Call->getArg(1)->getType());
717 PrimType ValueT = *S.getContext().classify(Call->getArg(0)->getType());
718
719 APSInt Amount = peekToAPSInt(S.Stk, AmountT);
721 S.Stk, ValueT, align(primSize(AmountT)) + align(primSize(ValueT)));
722
724 if (Right)
725 Result = APSInt(Value.rotr(Amount.urem(Value.getBitWidth())),
726 /*IsUnsigned=*/true);
727 else // Left.
728 Result = APSInt(Value.rotl(Amount.urem(Value.getBitWidth())),
729 /*IsUnsigned=*/true);
730
731 pushInteger(S, Result, Call->getType());
732 return true;
733}
734
736 const InterpFrame *Frame, const Function *Func,
737 const CallExpr *Call) {
738 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
739 APSInt Value = peekToAPSInt(S.Stk, ArgT);
740
741 uint64_t N = Value.countr_zero();
742 pushInteger(S, N == Value.getBitWidth() ? 0 : N + 1, Call->getType());
743 return true;
744}
745
747 const InterpFrame *Frame,
748 const Function *Func,
749 const CallExpr *Call) {
750 assert(Call->getArg(0)->isLValue());
751 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
752
753 if (PtrT == PT_FnPtr) {
754 const FunctionPointer &Arg = S.Stk.peek<FunctionPointer>();
755 S.Stk.push<FunctionPointer>(Arg);
756 } else if (PtrT == PT_Ptr) {
757 const Pointer &Arg = S.Stk.peek<Pointer>();
758 S.Stk.push<Pointer>(Arg);
759 } else {
760 assert(false && "Unsupported pointer type passed to __builtin_addressof()");
761 }
762 return true;
763}
764
766 const InterpFrame *Frame, const Function *Func,
767 const CallExpr *Call) {
768
769 PrimType ArgT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
770
771 TYPE_SWITCH(ArgT, const T &Arg = S.Stk.peek<T>(); S.Stk.push<T>(Arg););
772
773 return Func->getDecl()->isConstexpr();
774}
775
777 const InterpFrame *Frame,
778 const Function *Func,
779 const CallExpr *Call) {
780 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
781 APSInt Arg = peekToAPSInt(S.Stk, ArgT);
782
784 Arg.getZExtValue());
785 pushInteger(S, Result, Call->getType());
786 return true;
787}
788
789/// Just takes the first Argument to the call and puts it on the stack.
790static bool noopPointer(InterpState &S, CodePtr OpPC, const InterpFrame *Frame,
791 const Function *Func, const CallExpr *Call) {
792 const Pointer &Arg = S.Stk.peek<Pointer>();
793 S.Stk.push<Pointer>(Arg);
794 return true;
795}
796
797// Two integral values followed by a pointer (lhs, rhs, resultOut)
799 const InterpFrame *Frame,
800 const Function *Func,
801 const CallExpr *Call) {
802 Pointer &ResultPtr = S.Stk.peek<Pointer>();
803 if (ResultPtr.isDummy())
804 return false;
805
806 unsigned BuiltinOp = Func->getBuiltinID();
807 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
808 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
809 APSInt RHS = peekToAPSInt(S.Stk, RHST,
810 align(primSize(PT_Ptr)) + align(primSize(RHST)));
811 APSInt LHS = peekToAPSInt(S.Stk, LHST,
812 align(primSize(PT_Ptr)) + align(primSize(RHST)) +
813 align(primSize(LHST)));
814 QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
815 PrimType ResultT = *S.getContext().classify(ResultType);
816 bool Overflow;
817
819 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
820 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
821 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
822 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
824 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
826 uint64_t LHSSize = LHS.getBitWidth();
827 uint64_t RHSSize = RHS.getBitWidth();
828 uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
829 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
830
831 // Add an additional bit if the signedness isn't uniformly agreed to. We
832 // could do this ONLY if there is a signed and an unsigned that both have
833 // MaxBits, but the code to check that is pretty nasty. The issue will be
834 // caught in the shrink-to-result later anyway.
835 if (IsSigned && !AllSigned)
836 ++MaxBits;
837
838 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
839 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
840 Result = APSInt(MaxBits, !IsSigned);
841 }
842
843 // Find largest int.
844 switch (BuiltinOp) {
845 default:
846 llvm_unreachable("Invalid value for BuiltinOp");
847 case Builtin::BI__builtin_add_overflow:
848 case Builtin::BI__builtin_sadd_overflow:
849 case Builtin::BI__builtin_saddl_overflow:
850 case Builtin::BI__builtin_saddll_overflow:
851 case Builtin::BI__builtin_uadd_overflow:
852 case Builtin::BI__builtin_uaddl_overflow:
853 case Builtin::BI__builtin_uaddll_overflow:
854 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
855 : LHS.uadd_ov(RHS, Overflow);
856 break;
857 case Builtin::BI__builtin_sub_overflow:
858 case Builtin::BI__builtin_ssub_overflow:
859 case Builtin::BI__builtin_ssubl_overflow:
860 case Builtin::BI__builtin_ssubll_overflow:
861 case Builtin::BI__builtin_usub_overflow:
862 case Builtin::BI__builtin_usubl_overflow:
863 case Builtin::BI__builtin_usubll_overflow:
864 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
865 : LHS.usub_ov(RHS, Overflow);
866 break;
867 case Builtin::BI__builtin_mul_overflow:
868 case Builtin::BI__builtin_smul_overflow:
869 case Builtin::BI__builtin_smull_overflow:
870 case Builtin::BI__builtin_smulll_overflow:
871 case Builtin::BI__builtin_umul_overflow:
872 case Builtin::BI__builtin_umull_overflow:
873 case Builtin::BI__builtin_umulll_overflow:
874 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
875 : LHS.umul_ov(RHS, Overflow);
876 break;
877 }
878
879 // In the case where multiple sizes are allowed, truncate and see if
880 // the values are the same.
881 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
882 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
883 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
884 // APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
885 // since it will give us the behavior of a TruncOrSelf in the case where
886 // its parameter <= its size. We previously set Result to be at least the
887 // type-size of the result, so getTypeSize(ResultType) <= Resu
888 APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
889 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
890
891 if (!APSInt::isSameValue(Temp, Result))
892 Overflow = true;
893 Result = Temp;
894 }
895
896 // Write Result to ResultPtr and put Overflow on the stacl.
897 assignInteger(ResultPtr, ResultT, Result);
898 ResultPtr.initialize();
899 assert(Func->getDecl()->getReturnType()->isBooleanType());
900 S.Stk.push<Boolean>(Overflow);
901 return true;
902}
903
904/// Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
906 const InterpFrame *Frame,
907 const Function *Func,
908 const CallExpr *Call) {
909 unsigned BuiltinOp = Func->getBuiltinID();
910 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
911 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
912 PrimType CarryT = *S.getContext().classify(Call->getArg(2)->getType());
913 APSInt RHS = peekToAPSInt(S.Stk, RHST,
914 align(primSize(PT_Ptr)) + align(primSize(CarryT)) +
915 align(primSize(RHST)));
916 APSInt LHS =
917 peekToAPSInt(S.Stk, LHST,
918 align(primSize(PT_Ptr)) + align(primSize(RHST)) +
919 align(primSize(CarryT)) + align(primSize(LHST)));
920 APSInt CarryIn = peekToAPSInt(
921 S.Stk, LHST, align(primSize(PT_Ptr)) + align(primSize(CarryT)));
922 APSInt CarryOut;
923
925 // Copy the number of bits and sign.
926 Result = LHS;
927 CarryOut = LHS;
928
929 bool FirstOverflowed = false;
930 bool SecondOverflowed = false;
931 switch (BuiltinOp) {
932 default:
933 llvm_unreachable("Invalid value for BuiltinOp");
934 case Builtin::BI__builtin_addcb:
935 case Builtin::BI__builtin_addcs:
936 case Builtin::BI__builtin_addc:
937 case Builtin::BI__builtin_addcl:
938 case Builtin::BI__builtin_addcll:
939 Result =
940 LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
941 break;
942 case Builtin::BI__builtin_subcb:
943 case Builtin::BI__builtin_subcs:
944 case Builtin::BI__builtin_subc:
945 case Builtin::BI__builtin_subcl:
946 case Builtin::BI__builtin_subcll:
947 Result =
948 LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
949 break;
950 }
951 // It is possible for both overflows to happen but CGBuiltin uses an OR so
952 // this is consistent.
953 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
954
955 Pointer &CarryOutPtr = S.Stk.peek<Pointer>();
956 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
957 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
958 assignInteger(CarryOutPtr, CarryOutT, CarryOut);
959 CarryOutPtr.initialize();
960
961 assert(Call->getType() == Call->getArg(0)->getType());
962 pushInteger(S, Result, Call->getType());
963 return true;
964}
965
967 const InterpFrame *Frame, const Function *Func,
968 const CallExpr *Call) {
969 unsigned CallSize = callArgSize(S, Call);
970 unsigned BuiltinOp = Func->getBuiltinID();
971 PrimType ValT = *S.getContext().classify(Call->getArg(0));
972 const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize);
973
974 // When the argument is 0, the result of GCC builtins is undefined, whereas
975 // for Microsoft intrinsics, the result is the bit-width of the argument.
976 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
977 BuiltinOp != Builtin::BI__lzcnt &&
978 BuiltinOp != Builtin::BI__lzcnt64;
979
980 if (Val == 0) {
981 if (Func->getBuiltinID() == Builtin::BI__builtin_clzg &&
982 Call->getNumArgs() == 2) {
983 // We have a fallback parameter.
984 PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
985 const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT);
986 pushInteger(S, Fallback, Call->getType());
987 return true;
988 }
989
990 if (ZeroIsUndefined)
991 return false;
992 }
993
994 pushInteger(S, Val.countl_zero(), Call->getType());
995 return true;
996}
997
999 const InterpFrame *Frame, const Function *Func,
1000 const CallExpr *Call) {
1001 unsigned CallSize = callArgSize(S, Call);
1002 PrimType ValT = *S.getContext().classify(Call->getArg(0));
1003 const APSInt &Val = peekToAPSInt(S.Stk, ValT, CallSize);
1004
1005 if (Val == 0) {
1006 if (Func->getBuiltinID() == Builtin::BI__builtin_ctzg &&
1007 Call->getNumArgs() == 2) {
1008 // We have a fallback parameter.
1009 PrimType FallbackT = *S.getContext().classify(Call->getArg(1));
1010 const APSInt &Fallback = peekToAPSInt(S.Stk, FallbackT);
1011 pushInteger(S, Fallback, Call->getType());
1012 return true;
1013 }
1014 return false;
1015 }
1016
1017 pushInteger(S, Val.countr_zero(), Call->getType());
1018 return true;
1019}
1020
1022 const InterpFrame *Frame,
1023 const Function *Func, const CallExpr *Call) {
1024 PrimType ReturnT = *S.getContext().classify(Call->getType());
1025 PrimType ValT = *S.getContext().classify(Call->getArg(0));
1026 const APSInt &Val = peekToAPSInt(S.Stk, ValT);
1027 assert(Val.getActiveBits() <= 64);
1028
1029 INT_TYPE_SWITCH(ReturnT,
1030 { S.Stk.push<T>(T::from(Val.byteSwap().getZExtValue())); });
1031 return true;
1032}
1033
1034/// bool __atomic_always_lock_free(size_t, void const volatile*)
1035/// bool __atomic_is_lock_free(size_t, void const volatile*)
1036/// bool __c11_atomic_is_lock_free(size_t)
1038 const InterpFrame *Frame,
1039 const Function *Func,
1040 const CallExpr *Call) {
1041 unsigned BuiltinOp = Func->getBuiltinID();
1042
1043 PrimType ValT = *S.getContext().classify(Call->getArg(0));
1044 unsigned SizeValOffset = 0;
1045 if (BuiltinOp != Builtin::BI__c11_atomic_is_lock_free)
1046 SizeValOffset = align(primSize(ValT)) + align(primSize(PT_Ptr));
1047 const APSInt &SizeVal = peekToAPSInt(S.Stk, ValT, SizeValOffset);
1048
1049 auto returnBool = [&S](bool Value) -> bool {
1050 S.Stk.push<Boolean>(Value);
1051 return true;
1052 };
1053
1054 // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
1055 // of two less than or equal to the maximum inline atomic width, we know it
1056 // is lock-free. If the size isn't a power of two, or greater than the
1057 // maximum alignment where we promote atomics, we know it is not lock-free
1058 // (at least not in the sense of atomic_is_lock_free). Otherwise,
1059 // the answer can only be determined at runtime; for example, 16-byte
1060 // atomics have lock-free implementations on some, but not all,
1061 // x86-64 processors.
1062
1063 // Check power-of-two.
1064 CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
1065 if (Size.isPowerOfTwo()) {
1066 // Check against inlining width.
1067 unsigned InlineWidthBits =
1069 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1070
1071 // OK, we will inline appropriately-aligned operations of this size,
1072 // and _Atomic(T) is appropriately-aligned.
1073 if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
1074 Size == CharUnits::One())
1075 return returnBool(true);
1076
1077 // Same for null pointers.
1078 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1079 const Pointer &Ptr = S.Stk.peek<Pointer>();
1080 if (Ptr.isZero())
1081 return returnBool(true);
1082
1083 if (Ptr.isIntegralPointer()) {
1084 uint64_t IntVal = Ptr.getIntegerRepresentation();
1085 if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
1086 return returnBool(true);
1087 }
1088
1089 const Expr *PtrArg = Call->getArg(1);
1090 // Otherwise, check if the type's alignment against Size.
1091 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
1092 // Drop the potential implicit-cast to 'const volatile void*', getting
1093 // the underlying type.
1094 if (ICE->getCastKind() == CK_BitCast)
1095 PtrArg = ICE->getSubExpr();
1096 }
1097
1098 if (auto PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1099 QualType PointeeType = PtrTy->getPointeeType();
1100 if (!PointeeType->isIncompleteType() &&
1101 S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
1102 // OK, we will inline operations on this object.
1103 return returnBool(true);
1104 }
1105 }
1106 }
1107 }
1108
1109 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1110 return returnBool(false);
1111
1112 return false;
1113}
1114
1115/// __builtin_complex(Float A, float B);
1117 const InterpFrame *Frame,
1118 const Function *Func,
1119 const CallExpr *Call) {
1120 const Floating &Arg2 = S.Stk.peek<Floating>();
1121 const Floating &Arg1 = S.Stk.peek<Floating>(align(primSize(PT_Float)) * 2);
1122 Pointer &Result = S.Stk.peek<Pointer>(align(primSize(PT_Float)) * 2 +
1124
1125 Result.atIndex(0).deref<Floating>() = Arg1;
1126 Result.atIndex(0).initialize();
1127 Result.atIndex(1).deref<Floating>() = Arg2;
1128 Result.atIndex(1).initialize();
1129 Result.initialize();
1130
1131 return true;
1132}
1133
1134/// __builtin_is_aligned()
1135/// __builtin_align_up()
1136/// __builtin_align_down()
1137/// The first parameter is either an integer or a pointer.
1138/// The second parameter is the requested alignment as an integer.
1140 const InterpFrame *Frame,
1141 const Function *Func,
1142 const CallExpr *Call) {
1143 unsigned BuiltinOp = Func->getBuiltinID();
1144 unsigned CallSize = callArgSize(S, Call);
1145
1146 PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
1147 const APSInt &Alignment = peekToAPSInt(S.Stk, AlignmentT);
1148
1149 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1150 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1151 return false;
1152 }
1153 unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
1154 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
1155 if (APSInt::compareValues(Alignment, MaxValue) > 0) {
1156 S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1157 << MaxValue << Call->getArg(0)->getType() << Alignment;
1158 return false;
1159 }
1160
1161 // The first parameter is either an integer or a pointer (but not a function
1162 // pointer).
1163 PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
1164
1165 if (isIntegralType(FirstArgT)) {
1166 const APSInt &Src = peekToAPSInt(S.Stk, FirstArgT, CallSize);
1167 APSInt Align = Alignment.extOrTrunc(Src.getBitWidth());
1168 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1169 APSInt AlignedVal =
1170 APSInt((Src + (Align - 1)) & ~(Align - 1), Src.isUnsigned());
1171 pushInteger(S, AlignedVal, Call->getType());
1172 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1173 APSInt AlignedVal = APSInt(Src & ~(Align - 1), Src.isUnsigned());
1174 pushInteger(S, AlignedVal, Call->getType());
1175 } else {
1176 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1177 S.Stk.push<Boolean>((Src & (Align - 1)) == 0);
1178 }
1179 return true;
1180 }
1181
1182 assert(FirstArgT == PT_Ptr);
1183 const Pointer &Ptr = S.Stk.peek<Pointer>(CallSize);
1184
1185 unsigned PtrOffset = Ptr.getByteOffset();
1186 PtrOffset = Ptr.getIndex();
1187 CharUnits BaseAlignment =
1189 CharUnits PtrAlign =
1190 BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
1191
1192 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1193 if (PtrAlign.getQuantity() >= Alignment) {
1194 S.Stk.push<Boolean>(true);
1195 return true;
1196 }
1197 // If the alignment is not known to be sufficient, some cases could still
1198 // be aligned at run time. However, if the requested alignment is less or
1199 // equal to the base alignment and the offset is not aligned, we know that
1200 // the run-time value can never be aligned.
1201 if (BaseAlignment.getQuantity() >= Alignment &&
1202 PtrAlign.getQuantity() < Alignment) {
1203 S.Stk.push<Boolean>(false);
1204 return true;
1205 }
1206
1207 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1208 << Alignment;
1209 return false;
1210 }
1211
1212 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1213 BuiltinOp == Builtin::BI__builtin_align_up);
1214
1215 // For align_up/align_down, we can return the same value if the alignment
1216 // is known to be greater or equal to the requested value.
1217 if (PtrAlign.getQuantity() >= Alignment) {
1218 S.Stk.push<Pointer>(Ptr);
1219 return true;
1220 }
1221
1222 // The alignment could be greater than the minimum at run-time, so we cannot
1223 // infer much about the resulting pointer value. One case is possible:
1224 // For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1225 // can infer the correct index if the requested alignment is smaller than
1226 // the base alignment so we can perform the computation on the offset.
1227 if (BaseAlignment.getQuantity() >= Alignment) {
1228 assert(Alignment.getBitWidth() <= 64 &&
1229 "Cannot handle > 64-bit address-space");
1230 uint64_t Alignment64 = Alignment.getZExtValue();
1231 CharUnits NewOffset =
1232 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1233 ? llvm::alignDown(PtrOffset, Alignment64)
1234 : llvm::alignTo(PtrOffset, Alignment64));
1235
1236 S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
1237 return true;
1238 }
1239
1240 // Otherwise, we cannot constant-evaluate the result.
1241 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1242 return false;
1243}
1244
1245/// __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1247 const InterpFrame *Frame,
1248 const Function *Func,
1249 const CallExpr *Call) {
1250 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1251
1252 // Might be called with function pointers in C.
1253 std::optional<PrimType> PtrT = S.Ctx.classify(Call->getArg(0));
1254 if (PtrT != PT_Ptr)
1255 return false;
1256
1257 unsigned ArgSize = callArgSize(S, Call);
1258 const Pointer &Ptr = S.Stk.peek<Pointer>(ArgSize);
1259 std::optional<APSInt> ExtraOffset;
1260 APSInt Alignment;
1261 if (Call->getNumArgs() == 2) {
1262 Alignment = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
1263 } else {
1264 PrimType AlignmentT = *S.Ctx.classify(Call->getArg(1));
1265 PrimType ExtraOffsetT = *S.Ctx.classify(Call->getArg(2));
1266 Alignment = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)),
1267 align(primSize(AlignmentT)) +
1268 align(primSize(ExtraOffsetT)));
1269 ExtraOffset = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
1270 }
1271
1272 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
1273
1274 // If there is a base object, then it must have the correct alignment.
1275 if (Ptr.isBlockPointer()) {
1276 CharUnits BaseAlignment;
1277 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1278 BaseAlignment = S.getASTContext().getDeclAlign(VD);
1279 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1280 BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
1281
1282 if (BaseAlignment < Align) {
1283 S.CCEDiag(Call->getArg(0),
1284 diag::note_constexpr_baa_insufficient_alignment)
1285 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1286 return false;
1287 }
1288 }
1289
1290 APValue AV = Ptr.toAPValue(S.getASTContext());
1291 CharUnits AVOffset = AV.getLValueOffset();
1292 if (ExtraOffset)
1293 AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
1294 if (AVOffset.alignTo(Align) != AVOffset) {
1295 if (Ptr.isBlockPointer())
1296 S.CCEDiag(Call->getArg(0),
1297 diag::note_constexpr_baa_insufficient_alignment)
1298 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1299 else
1300 S.CCEDiag(Call->getArg(0),
1301 diag::note_constexpr_baa_value_insufficient_alignment)
1302 << AVOffset.getQuantity() << Align.getQuantity();
1303 return false;
1304 }
1305
1306 S.Stk.push<Pointer>(Ptr);
1307 return true;
1308}
1309
1311 const InterpFrame *Frame,
1312 const Function *Func,
1313 const CallExpr *Call) {
1314 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1315 !Call->getArg(1)->getType()->isIntegerType())
1316 return false;
1317
1318 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1319 PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
1320 APSInt Val = peekToAPSInt(S.Stk, ValT,
1321 align(primSize(ValT)) + align(primSize(IndexT)));
1322 APSInt Index = peekToAPSInt(S.Stk, IndexT);
1323
1324 unsigned BitWidth = Val.getBitWidth();
1325 uint64_t Shift = Index.extractBitsAsZExtValue(8, 0);
1326 uint64_t Length = Index.extractBitsAsZExtValue(8, 8);
1327 Length = Length > BitWidth ? BitWidth : Length;
1328
1329 // Handle out of bounds cases.
1330 if (Length == 0 || Shift >= BitWidth) {
1331 pushInteger(S, 0, Call->getType());
1332 return true;
1333 }
1334
1335 uint64_t Result = Val.getZExtValue() >> Shift;
1336 Result &= llvm::maskTrailingOnes<uint64_t>(Length);
1337 pushInteger(S, Result, Call->getType());
1338 return true;
1339}
1340
1342 const InterpFrame *Frame,
1343 const Function *Func,
1344 const CallExpr *Call) {
1345 QualType CallType = Call->getType();
1346 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1347 !Call->getArg(1)->getType()->isIntegerType() ||
1348 !CallType->isIntegerType())
1349 return false;
1350
1351 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1352 PrimType IndexT = *S.Ctx.classify(Call->getArg(1));
1353
1354 APSInt Val = peekToAPSInt(S.Stk, ValT,
1355 align(primSize(ValT)) + align(primSize(IndexT)));
1356 APSInt Idx = peekToAPSInt(S.Stk, IndexT);
1357
1358 unsigned BitWidth = Val.getBitWidth();
1359 uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
1360
1361 if (Index < BitWidth)
1362 Val.clearHighBits(BitWidth - Index);
1363
1364 pushInteger(S, Val, CallType);
1365 return true;
1366}
1367
1369 const InterpFrame *Frame,
1370 const Function *Func,
1371 const CallExpr *Call) {
1372 QualType CallType = Call->getType();
1373 if (!CallType->isIntegerType() ||
1374 !Call->getArg(0)->getType()->isIntegerType())
1375 return false;
1376
1377 APSInt Val = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
1378 pushInteger(S, Val.countLeadingZeros(), CallType);
1379 return true;
1380}
1381
1383 const InterpFrame *Frame,
1384 const Function *Func,
1385 const CallExpr *Call) {
1386 QualType CallType = Call->getType();
1387 if (!CallType->isIntegerType() ||
1388 !Call->getArg(0)->getType()->isIntegerType())
1389 return false;
1390
1391 APSInt Val = peekToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(0)));
1392 pushInteger(S, Val.countTrailingZeros(), CallType);
1393 return true;
1394}
1395
1397 const InterpFrame *Frame,
1398 const Function *Func,
1399 const CallExpr *Call) {
1400 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1401 !Call->getArg(1)->getType()->isIntegerType())
1402 return false;
1403
1404 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1405 PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
1406
1407 APSInt Val =
1408 peekToAPSInt(S.Stk, ValT, align(primSize(ValT)) + align(primSize(MaskT)));
1409 APSInt Mask = peekToAPSInt(S.Stk, MaskT);
1410
1411 unsigned BitWidth = Val.getBitWidth();
1412 APInt Result = APInt::getZero(BitWidth);
1413 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1414 if (Mask[I])
1415 Result.setBitVal(I, Val[P++]);
1416 }
1417 pushInteger(S, Result, Call->getType());
1418 return true;
1419}
1420
1422 const InterpFrame *Frame,
1423 const Function *Func,
1424 const CallExpr *Call) {
1425 if (Call->getNumArgs() != 2 || !Call->getArg(0)->getType()->isIntegerType() ||
1426 !Call->getArg(1)->getType()->isIntegerType())
1427 return false;
1428
1429 PrimType ValT = *S.Ctx.classify(Call->getArg(0));
1430 PrimType MaskT = *S.Ctx.classify(Call->getArg(1));
1431
1432 APSInt Val =
1433 peekToAPSInt(S.Stk, ValT, align(primSize(ValT)) + align(primSize(MaskT)));
1434 APSInt Mask = peekToAPSInt(S.Stk, MaskT);
1435
1436 unsigned BitWidth = Val.getBitWidth();
1437 APInt Result = APInt::getZero(BitWidth);
1438 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
1439 if (Mask[I])
1440 Result.setBitVal(P++, Val[I]);
1441 }
1442 pushInteger(S, Result, Call->getType());
1443 return true;
1444}
1445
1447 CodePtr OpPC,
1448 const InterpFrame *Frame,
1449 const Function *Func,
1450 const CallExpr *Call) {
1451 if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
1452 !Call->getArg(1)->getType()->isIntegerType() ||
1453 !Call->getArg(2)->getType()->isIntegerType())
1454 return false;
1455
1456 unsigned BuiltinOp = Func->getBuiltinID();
1457 APSInt CarryIn = getAPSIntParam(Frame, 0);
1458 APSInt LHS = getAPSIntParam(Frame, 1);
1459 APSInt RHS = getAPSIntParam(Frame, 2);
1460
1461 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1462 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1463
1464 unsigned BitWidth = LHS.getBitWidth();
1465 unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
1466 APInt ExResult =
1467 IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
1468 : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
1469
1470 APInt Result = ExResult.extractBits(BitWidth, 0);
1471 APSInt CarryOut =
1472 APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
1473
1474 Pointer &CarryOutPtr = S.Stk.peek<Pointer>();
1475 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
1476 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
1477 assignInteger(CarryOutPtr, CarryOutT, APSInt(Result, true));
1478
1479 pushInteger(S, CarryOut, Call->getType());
1480
1481 return true;
1482}
1483
1485 CodePtr OpPC,
1486 const InterpFrame *Frame,
1487 const Function *Func,
1488 const CallExpr *Call) {
1491 pushInteger(S, Layout.size().getQuantity(), Call->getType());
1492 return true;
1493}
1494
1496 InterpState &S, CodePtr OpPC, const InterpFrame *Frame,
1497 const Function *Func, const CallExpr *Call) {
1498 const auto &Ptr = S.Stk.peek<Pointer>();
1499 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1500
1501 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1502 uint64_t Result = getPointerAuthStableSipHash(R);
1503 pushInteger(S, Result, Call->getType());
1504 return true;
1505}
1506
1507// FIXME: This implementation is not complete.
1508// The Compiler instance we create cannot access the current stack frame, local
1509// variables, function parameters, etc. We also need protection from
1510// side-effects, fatal errors, etc.
1512 const InterpFrame *Frame,
1513 const Function *Func,
1514 const CallExpr *Call) {
1515 const Expr *Arg = Call->getArg(0);
1516 QualType ArgType = Arg->getType();
1517
1518 auto returnInt = [&S, Call](bool Value) -> bool {
1519 pushInteger(S, Value, Call->getType());
1520 return true;
1521 };
1522
1523 // __builtin_constant_p always has one operand. The rules which gcc follows
1524 // are not precisely documented, but are as follows:
1525 //
1526 // - If the operand is of integral, floating, complex or enumeration type,
1527 // and can be folded to a known value of that type, it returns 1.
1528 // - If the operand can be folded to a pointer to the first character
1529 // of a string literal (or such a pointer cast to an integral type)
1530 // or to a null pointer or an integer cast to a pointer, it returns 1.
1531 //
1532 // Otherwise, it returns 0.
1533 //
1534 // FIXME: GCC also intends to return 1 for literals of aggregate types, but
1535 // its support for this did not work prior to GCC 9 and is not yet well
1536 // understood.
1537 if (ArgType->isIntegralOrEnumerationType() || ArgType->isFloatingType() ||
1538 ArgType->isAnyComplexType() || ArgType->isPointerType() ||
1539 ArgType->isNullPtrType()) {
1540 InterpStack Stk;
1541 Compiler<EvalEmitter> C(S.Ctx, S.P, S, Stk);
1542 auto Res = C.interpretExpr(Arg, /*ConvertResultToRValue=*/Arg->isGLValue());
1543 if (Res.isInvalid()) {
1544 C.cleanup();
1545 Stk.clear();
1546 }
1547
1548 if (!Res.isInvalid() && !Res.empty()) {
1549 const APValue &LV = Res.toAPValue();
1550 if (LV.isLValue()) {
1552 if (Base.isNull()) {
1553 // A null base is acceptable.
1554 return returnInt(true);
1555 } else if (const auto *E = Base.dyn_cast<const Expr *>()) {
1556 if (!isa<StringLiteral>(E))
1557 return returnInt(false);
1558 return returnInt(LV.getLValueOffset().isZero());
1559 } else if (Base.is<TypeInfoLValue>()) {
1560 // Surprisingly, GCC considers __builtin_constant_p(&typeid(int)) to
1561 // evaluate to true.
1562 return returnInt(true);
1563 } else {
1564 // Any other base is not constant enough for GCC.
1565 return returnInt(false);
1566 }
1567 }
1568 }
1569
1570 // Otherwise, any constant value is good enough.
1571 return returnInt(true);
1572 }
1573
1574 return returnInt(false);
1575}
1576
1578 const InterpFrame *Frame,
1579 const Function *Func,
1580 const CallExpr *Call) {
1581 // A call to __operator_new is only valid within std::allocate<>::allocate.
1582 // Walk up the call stack to find the appropriate caller and get the
1583 // element type from it.
1584 QualType ElemType;
1585
1586 for (const InterpFrame *F = Frame; F; F = F->Caller) {
1587 const Function *Func = F->getFunction();
1588 if (!Func)
1589 continue;
1590 const auto *MD = dyn_cast_if_present<CXXMethodDecl>(Func->getDecl());
1591 if (!MD)
1592 continue;
1593 const IdentifierInfo *FnII = MD->getIdentifier();
1594 if (!FnII || !FnII->isStr("allocate"))
1595 continue;
1596
1597 const auto *CTSD =
1598 dyn_cast<ClassTemplateSpecializationDecl>(MD->getParent());
1599 if (!CTSD)
1600 continue;
1601
1602 const IdentifierInfo *ClassII = CTSD->getIdentifier();
1603 const TemplateArgumentList &TAL = CTSD->getTemplateArgs();
1604 if (CTSD->isInStdNamespace() && ClassII && ClassII->isStr("allocator") &&
1605 TAL.size() >= 1 && TAL[0].getKind() == TemplateArgument::Type) {
1606 ElemType = TAL[0].getAsType();
1607 break;
1608 }
1609 }
1610
1611 if (ElemType.isNull()) {
1612 S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1613 ? diag::note_constexpr_new_untyped
1614 : diag::note_constexpr_new);
1615 return false;
1616 }
1617
1618 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1619 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1620 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1621 return false;
1622 }
1623
1624 APSInt Bytes = peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(0)));
1625 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
1626 assert(!ElemSize.isZero());
1627 // Divide the number of bytes by sizeof(ElemType), so we get the number of
1628 // elements we should allocate.
1629 APInt NumElems, Remainder;
1630 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1631 APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
1632 if (Remainder != 0) {
1633 // This likely indicates a bug in the implementation of 'std::allocator'.
1634 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1635 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1636 return false;
1637 }
1638
1639 // NB: The same check we're using in CheckArraySize()
1640 if (NumElems.getActiveBits() >
1642 NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1643 // FIXME: NoThrow check?
1644 const SourceInfo &Loc = S.Current->getSource(OpPC);
1645 S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1646 << NumElems.getZExtValue();
1647 return false;
1648 }
1649
1650 std::optional<PrimType> ElemT = S.getContext().classify(ElemType);
1651 DynamicAllocator &Allocator = S.getAllocator();
1652 if (ElemT) {
1653 if (NumElems.ule(1)) {
1654 const Descriptor *Desc =
1655 S.P.createDescriptor(Call, *ElemT, Descriptor::InlineDescMD,
1656 /*IsConst=*/false, /*IsTemporary=*/false,
1657 /*IsMutable=*/false);
1658 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1660 assert(B);
1661
1662 S.Stk.push<Pointer>(B);
1663 return true;
1664 }
1665 assert(NumElems.ugt(1));
1666
1667 Block *B =
1668 Allocator.allocate(Call, *ElemT, NumElems.getZExtValue(),
1669 S.Ctx.getEvalID(), DynamicAllocator::Form::Operator);
1670 assert(B);
1671 S.Stk.push<Pointer>(B);
1672 return true;
1673 }
1674
1675 assert(!ElemT);
1676 // Structs etc.
1677 const Descriptor *Desc = S.P.createDescriptor(
1679 /*IsConst=*/false, /*IsTemporary=*/false, /*IsMutable=*/false,
1680 /*Init=*/nullptr);
1681
1682 if (NumElems.ule(1)) {
1683 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1685 assert(B);
1686 S.Stk.push<Pointer>(B);
1687 return true;
1688 }
1689
1690 Block *B =
1691 Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
1693 assert(B);
1694 S.Stk.push<Pointer>(B);
1695 return true;
1696}
1697
1699 const InterpFrame *Frame,
1700 const Function *Func,
1701 const CallExpr *Call) {
1702 const Expr *Source = nullptr;
1703 const Block *BlockToDelete = nullptr;
1704
1705 {
1706 const Pointer &Ptr = S.Stk.peek<Pointer>();
1707
1708 if (Ptr.isZero()) {
1709 S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1710 return true;
1711 }
1712
1713 Source = Ptr.getDeclDesc()->asExpr();
1714 BlockToDelete = Ptr.block();
1715 }
1716 assert(BlockToDelete);
1717
1718 DynamicAllocator &Allocator = S.getAllocator();
1719 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1720 std::optional<DynamicAllocator::Form> AllocForm =
1721 Allocator.getAllocationForm(Source);
1722
1723 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1724 // Nothing has been deallocated, this must be a double-delete.
1725 const SourceInfo &Loc = S.Current->getSource(OpPC);
1726 S.FFDiag(Loc, diag::note_constexpr_double_delete);
1727 return false;
1728 }
1729 assert(AllocForm);
1730
1731 return CheckNewDeleteForms(
1732 S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
1733}
1734
1736 const InterpFrame *Frame,
1737 const Function *Func,
1738 const CallExpr *Call) {
1739 const Floating &Arg0 = S.Stk.peek<Floating>();
1740 S.Stk.push<Floating>(Arg0);
1741 return true;
1742}
1743
1745 const InterpFrame *Frame,
1746 const Function *Func,
1747 const CallExpr *Call) {
1748 const Pointer &Arg = S.Stk.peek<Pointer>();
1749 assert(Arg.getFieldDesc()->isPrimitiveArray());
1750
1751 unsigned ID = Func->getBuiltinID();
1752 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1753 assert(Call->getType() == ElemType);
1754 PrimType ElemT = *S.getContext().classify(ElemType);
1755 unsigned NumElems = Arg.getNumElems();
1756
1758 T Result = Arg.atIndex(0).deref<T>();
1759 unsigned BitWidth = Result.bitWidth();
1760 for (unsigned I = 1; I != NumElems; ++I) {
1761 T Elem = Arg.atIndex(I).deref<T>();
1762 T PrevResult = Result;
1763
1764 if (ID == Builtin::BI__builtin_reduce_add) {
1765 if (T::add(Result, Elem, BitWidth, &Result)) {
1766 unsigned OverflowBits = BitWidth + 1;
1767 (void)handleOverflow(S, OpPC,
1768 (PrevResult.toAPSInt(OverflowBits) +
1769 Elem.toAPSInt(OverflowBits)));
1770 return false;
1771 }
1772 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1773 if (T::mul(Result, Elem, BitWidth, &Result)) {
1774 unsigned OverflowBits = BitWidth * 2;
1775 (void)handleOverflow(S, OpPC,
1776 (PrevResult.toAPSInt(OverflowBits) *
1777 Elem.toAPSInt(OverflowBits)));
1778 return false;
1779 }
1780
1781 } else if (ID == Builtin::BI__builtin_reduce_and) {
1782 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1783 } else if (ID == Builtin::BI__builtin_reduce_or) {
1784 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1785 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1786 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1787 } else {
1788 llvm_unreachable("Unhandled vector reduce builtin");
1789 }
1790 }
1791 pushInteger(S, Result.toAPSInt(), Call->getType());
1792 });
1793
1794 return true;
1795}
1796
1797/// Can be called with an integer or vector as the first and only parameter.
1799 const InterpFrame *Frame,
1800 const Function *Func,
1801 const CallExpr *Call) {
1802 assert(Call->getNumArgs() == 1);
1803 if (Call->getArg(0)->getType()->isIntegerType()) {
1804 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1805 APSInt Val = peekToAPSInt(S.Stk, ArgT);
1806 pushInteger(S, Val.popcount(), Call->getType());
1807 return true;
1808 }
1809 // Otherwise, the argument must be a vector.
1810 assert(Call->getArg(0)->getType()->isVectorType());
1811 const Pointer &Arg = S.Stk.peek<Pointer>();
1812 assert(Arg.getFieldDesc()->isPrimitiveArray());
1813 const Pointer &Dst = S.Stk.peek<Pointer>(primSize(PT_Ptr) * 2);
1814 assert(Dst.getFieldDesc()->isPrimitiveArray());
1815 assert(Arg.getFieldDesc()->getNumElems() ==
1816 Dst.getFieldDesc()->getNumElems());
1817
1818 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1819 PrimType ElemT = *S.getContext().classify(ElemType);
1820 unsigned NumElems = Arg.getNumElems();
1821
1822 // FIXME: Reading from uninitialized vector elements?
1823 for (unsigned I = 0; I != NumElems; ++I) {
1825 Dst.atIndex(I).deref<T>() =
1826 T::from(Arg.atIndex(I).deref<T>().toAPSInt().popcount());
1827 Dst.atIndex(I).initialize();
1828 });
1829 }
1830
1831 return true;
1832}
1833
1835 const InterpFrame *Frame,
1836 const Function *Func, const CallExpr *Call) {
1837 assert(Call->getNumArgs() == 3);
1838 unsigned ID = Func->getBuiltinID();
1839 Pointer DestPtr = getParam<Pointer>(Frame, 0);
1840 const Pointer &SrcPtr = getParam<Pointer>(Frame, 1);
1841 const APSInt &Size =
1842 peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)));
1843 assert(!Size.isSigned() && "memcpy and friends take an unsigned size");
1844
1845 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1846 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1847
1848 bool Move = (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove);
1849
1850 // If the size is zero, we treat this as always being a valid no-op.
1851 if (Size.isZero()) {
1852 S.Stk.push<Pointer>(DestPtr);
1853 return true;
1854 }
1855
1856 if (SrcPtr.isZero() || DestPtr.isZero()) {
1857 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1858 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1859 << /*IsMove=*/Move << /*IsWchar=*/false << !SrcPtr.isZero()
1860 << DiagPtr.toDiagnosticString(S.getASTContext());
1861 return false;
1862 }
1863
1864 QualType ElemType;
1865 if (DestPtr.getFieldDesc()->isArray())
1866 ElemType = DestPtr.getFieldDesc()->getElemQualType();
1867 else
1868 ElemType = DestPtr.getType();
1869
1870 unsigned ElemSize =
1872 if (Size.urem(ElemSize) != 0) {
1873 S.FFDiag(S.Current->getSource(OpPC),
1874 diag::note_constexpr_memcpy_unsupported)
1875 << Move << /*IsWchar=*/false << 0 << ElemType << Size << ElemSize;
1876 return false;
1877 }
1878
1879 QualType SrcElemType;
1880 if (SrcPtr.getFieldDesc()->isArray())
1881 SrcElemType = SrcPtr.getFieldDesc()->getElemQualType();
1882 else
1883 SrcElemType = SrcPtr.getType();
1884
1885 if (!S.getASTContext().hasSameUnqualifiedType(ElemType, SrcElemType)) {
1886 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
1887 << Move << SrcElemType << ElemType;
1888 return false;
1889 }
1890
1891 // Check for overlapping memory regions.
1892 if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
1893 unsigned SrcIndex = SrcPtr.getIndex() * SrcPtr.elemSize();
1894 unsigned DstIndex = DestPtr.getIndex() * DestPtr.elemSize();
1895 unsigned N = Size.getZExtValue();
1896
1897 if ((SrcIndex <= DstIndex && (SrcIndex + N) > DstIndex) ||
1898 (DstIndex <= SrcIndex && (DstIndex + N) > SrcIndex)) {
1899 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
1900 << /*IsWChar=*/false;
1901 return false;
1902 }
1903 }
1904
1905 // As a last resort, reject dummy pointers.
1906 if (DestPtr.isDummy() || SrcPtr.isDummy())
1907 return false;
1908 assert(Size.getZExtValue() % ElemSize == 0);
1909 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size.getZExtValue()).toBits()))
1910 return false;
1911
1912 S.Stk.push<Pointer>(DestPtr);
1913 return true;
1914}
1915
1916/// Determine if T is a character type for which we guarantee that
1917/// sizeof(T) == 1.
1919 return T->isCharType() || T->isChar8Type();
1920}
1921
1923 const InterpFrame *Frame,
1924 const Function *Func, const CallExpr *Call) {
1925 assert(Call->getNumArgs() == 3);
1926 unsigned ID = Func->getBuiltinID();
1927 const Pointer &PtrA = getParam<Pointer>(Frame, 0);
1928 const Pointer &PtrB = getParam<Pointer>(Frame, 1);
1929 const APSInt &Size =
1930 peekToAPSInt(S.Stk, *S.getContext().classify(Call->getArg(2)));
1931
1932 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
1933 ID == Builtin::BIwmemcmp)
1934 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1935
1936 if (Size.isZero()) {
1937 pushInteger(S, 0, Call->getType());
1938 return true;
1939 }
1940
1941 bool IsWide =
1942 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
1943
1944 const ASTContext &ASTCtx = S.getASTContext();
1945 // FIXME: This is an arbitrary limitation the current constant interpreter
1946 // had. We could remove this.
1947 if (!IsWide && (!isOneByteCharacterType(PtrA.getType()) ||
1948 !isOneByteCharacterType(PtrB.getType()))) {
1949 S.FFDiag(S.Current->getSource(OpPC),
1950 diag::note_constexpr_memcmp_unsupported)
1951 << ("'" + ASTCtx.BuiltinInfo.getName(ID) + "'").str() << PtrA.getType()
1952 << PtrB.getType();
1953 return false;
1954 }
1955
1956 if (PtrA.isDummy() || PtrB.isDummy())
1957 return false;
1958
1959 // Now, read both pointers to a buffer and compare those.
1960 BitcastBuffer BufferA(
1961 Bits(ASTCtx.getTypeSize(PtrA.getFieldDesc()->getType())));
1962 readPointerToBuffer(S.getContext(), PtrA, BufferA, false);
1963 // FIXME: The swapping here is UNDOING something we do when reading the
1964 // data into the buffer.
1965 if (ASTCtx.getTargetInfo().isBigEndian())
1966 swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
1967
1968 BitcastBuffer BufferB(
1969 Bits(ASTCtx.getTypeSize(PtrB.getFieldDesc()->getType())));
1970 readPointerToBuffer(S.getContext(), PtrB, BufferB, false);
1971 // FIXME: The swapping here is UNDOING something we do when reading the
1972 // data into the buffer.
1973 if (ASTCtx.getTargetInfo().isBigEndian())
1974 swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
1975
1976 size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
1977 BufferB.byteSize().getQuantity());
1978
1979 unsigned ElemSize = 1;
1980 if (IsWide)
1981 ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1982 // The Size given for the wide variants is in wide-char units. Convert it
1983 // to bytes.
1984 size_t ByteSize = Size.getZExtValue() * ElemSize;
1985 size_t CmpSize = std::min(MinBufferSize, ByteSize);
1986
1987 for (size_t I = 0; I != CmpSize; I += ElemSize) {
1988 if (IsWide) {
1989 INT_TYPE_SWITCH(*S.getContext().classify(ASTCtx.getWCharType()), {
1990 T A = *reinterpret_cast<T *>(BufferA.Data.get() + I);
1991 T B = *reinterpret_cast<T *>(BufferB.Data.get() + I);
1992 if (A < B) {
1993 pushInteger(S, -1, Call->getType());
1994 return true;
1995 } else if (A > B) {
1996 pushInteger(S, 1, Call->getType());
1997 return true;
1998 }
1999 });
2000 } else {
2001 std::byte A = BufferA.Data[I];
2002 std::byte B = BufferB.Data[I];
2003
2004 if (A < B) {
2005 pushInteger(S, -1, Call->getType());
2006 return true;
2007 } else if (A > B) {
2008 pushInteger(S, 1, Call->getType());
2009 return true;
2010 }
2011 }
2012 }
2013
2014 // We compared CmpSize bytes above. If the limiting factor was the Size
2015 // passed, we're done and the result is equality (0).
2016 if (ByteSize <= CmpSize) {
2017 pushInteger(S, 0, Call->getType());
2018 return true;
2019 }
2020
2021 // However, if we read all the available bytes but were instructed to read
2022 // even more, diagnose this as a "read of dereferenced one-past-the-end
2023 // pointer". This is what would happen if we called CheckRead() on every array
2024 // element.
2025 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
2026 << AK_Read << S.Current->getRange(OpPC);
2027 return false;
2028}
2029
2031 const CallExpr *Call, uint32_t BuiltinID) {
2032 const InterpFrame *Frame = S.Current;
2033
2034 std::optional<PrimType> ReturnT = S.getContext().classify(Call);
2035
2036 switch (BuiltinID) {
2037 case Builtin::BI__builtin_is_constant_evaluated:
2039 return false;
2040 break;
2041 case Builtin::BI__builtin_assume:
2042 case Builtin::BI__assume:
2043 break;
2044 case Builtin::BI__builtin_strcmp:
2045 case Builtin::BIstrcmp:
2046 case Builtin::BI__builtin_strncmp:
2047 case Builtin::BIstrncmp:
2048 if (!interp__builtin_strcmp(S, OpPC, Frame, F, Call))
2049 return false;
2050 break;
2051 case Builtin::BI__builtin_strlen:
2052 case Builtin::BIstrlen:
2053 case Builtin::BI__builtin_wcslen:
2054 case Builtin::BIwcslen:
2055 if (!interp__builtin_strlen(S, OpPC, Frame, F, Call))
2056 return false;
2057 break;
2058 case Builtin::BI__builtin_nan:
2059 case Builtin::BI__builtin_nanf:
2060 case Builtin::BI__builtin_nanl:
2061 case Builtin::BI__builtin_nanf16:
2062 case Builtin::BI__builtin_nanf128:
2063 if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/false))
2064 return false;
2065 break;
2066 case Builtin::BI__builtin_nans:
2067 case Builtin::BI__builtin_nansf:
2068 case Builtin::BI__builtin_nansl:
2069 case Builtin::BI__builtin_nansf16:
2070 case Builtin::BI__builtin_nansf128:
2071 if (!interp__builtin_nan(S, OpPC, Frame, F, /*Signaling=*/true))
2072 return false;
2073 break;
2074
2075 case Builtin::BI__builtin_huge_val:
2076 case Builtin::BI__builtin_huge_valf:
2077 case Builtin::BI__builtin_huge_vall:
2078 case Builtin::BI__builtin_huge_valf16:
2079 case Builtin::BI__builtin_huge_valf128:
2080 case Builtin::BI__builtin_inf:
2081 case Builtin::BI__builtin_inff:
2082 case Builtin::BI__builtin_infl:
2083 case Builtin::BI__builtin_inff16:
2084 case Builtin::BI__builtin_inff128:
2085 if (!interp__builtin_inf(S, OpPC, Frame, F))
2086 return false;
2087 break;
2088 case Builtin::BI__builtin_copysign:
2089 case Builtin::BI__builtin_copysignf:
2090 case Builtin::BI__builtin_copysignl:
2091 case Builtin::BI__builtin_copysignf128:
2092 if (!interp__builtin_copysign(S, OpPC, Frame, F))
2093 return false;
2094 break;
2095
2096 case Builtin::BI__builtin_fmin:
2097 case Builtin::BI__builtin_fminf:
2098 case Builtin::BI__builtin_fminl:
2099 case Builtin::BI__builtin_fminf16:
2100 case Builtin::BI__builtin_fminf128:
2101 if (!interp__builtin_fmin(S, OpPC, Frame, F, /*IsNumBuiltin=*/false))
2102 return false;
2103 break;
2104
2105 case Builtin::BI__builtin_fminimum_num:
2106 case Builtin::BI__builtin_fminimum_numf:
2107 case Builtin::BI__builtin_fminimum_numl:
2108 case Builtin::BI__builtin_fminimum_numf16:
2109 case Builtin::BI__builtin_fminimum_numf128:
2110 if (!interp__builtin_fmin(S, OpPC, Frame, F, /*IsNumBuiltin=*/true))
2111 return false;
2112 break;
2113
2114 case Builtin::BI__builtin_fmax:
2115 case Builtin::BI__builtin_fmaxf:
2116 case Builtin::BI__builtin_fmaxl:
2117 case Builtin::BI__builtin_fmaxf16:
2118 case Builtin::BI__builtin_fmaxf128:
2119 if (!interp__builtin_fmax(S, OpPC, Frame, F, /*IsNumBuiltin=*/false))
2120 return false;
2121 break;
2122
2123 case Builtin::BI__builtin_fmaximum_num:
2124 case Builtin::BI__builtin_fmaximum_numf:
2125 case Builtin::BI__builtin_fmaximum_numl:
2126 case Builtin::BI__builtin_fmaximum_numf16:
2127 case Builtin::BI__builtin_fmaximum_numf128:
2128 if (!interp__builtin_fmax(S, OpPC, Frame, F, /*IsNumBuiltin=*/true))
2129 return false;
2130 break;
2131
2132 case Builtin::BI__builtin_isnan:
2133 if (!interp__builtin_isnan(S, OpPC, Frame, F, Call))
2134 return false;
2135 break;
2136 case Builtin::BI__builtin_issignaling:
2137 if (!interp__builtin_issignaling(S, OpPC, Frame, F, Call))
2138 return false;
2139 break;
2140
2141 case Builtin::BI__builtin_isinf:
2142 if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/false, Call))
2143 return false;
2144 break;
2145
2146 case Builtin::BI__builtin_isinf_sign:
2147 if (!interp__builtin_isinf(S, OpPC, Frame, F, /*Sign=*/true, Call))
2148 return false;
2149 break;
2150
2151 case Builtin::BI__builtin_isfinite:
2152 if (!interp__builtin_isfinite(S, OpPC, Frame, F, Call))
2153 return false;
2154 break;
2155 case Builtin::BI__builtin_isnormal:
2156 if (!interp__builtin_isnormal(S, OpPC, Frame, F, Call))
2157 return false;
2158 break;
2159 case Builtin::BI__builtin_issubnormal:
2160 if (!interp__builtin_issubnormal(S, OpPC, Frame, F, Call))
2161 return false;
2162 break;
2163 case Builtin::BI__builtin_iszero:
2164 if (!interp__builtin_iszero(S, OpPC, Frame, F, Call))
2165 return false;
2166 break;
2167 case Builtin::BI__builtin_signbit:
2168 case Builtin::BI__builtin_signbitf:
2169 case Builtin::BI__builtin_signbitl:
2170 if (!interp__builtin_signbit(S, OpPC, Frame, F, Call))
2171 return false;
2172 break;
2173 case Builtin::BI__builtin_isgreater:
2174 case Builtin::BI__builtin_isgreaterequal:
2175 case Builtin::BI__builtin_isless:
2176 case Builtin::BI__builtin_islessequal:
2177 case Builtin::BI__builtin_islessgreater:
2178 case Builtin::BI__builtin_isunordered:
2179 if (!interp_floating_comparison(S, OpPC, Frame, F, Call))
2180 return false;
2181 break;
2182 case Builtin::BI__builtin_isfpclass:
2183 if (!interp__builtin_isfpclass(S, OpPC, Frame, F, Call))
2184 return false;
2185 break;
2186 case Builtin::BI__builtin_fpclassify:
2187 if (!interp__builtin_fpclassify(S, OpPC, Frame, F, Call))
2188 return false;
2189 break;
2190
2191 case Builtin::BI__builtin_fabs:
2192 case Builtin::BI__builtin_fabsf:
2193 case Builtin::BI__builtin_fabsl:
2194 case Builtin::BI__builtin_fabsf128:
2195 if (!interp__builtin_fabs(S, OpPC, Frame, F))
2196 return false;
2197 break;
2198
2199 case Builtin::BI__builtin_abs:
2200 case Builtin::BI__builtin_labs:
2201 case Builtin::BI__builtin_llabs:
2202 if (!interp__builtin_abs(S, OpPC, Frame, F, Call))
2203 return false;
2204 break;
2205
2206 case Builtin::BI__builtin_popcount:
2207 case Builtin::BI__builtin_popcountl:
2208 case Builtin::BI__builtin_popcountll:
2209 case Builtin::BI__builtin_popcountg:
2210 case Builtin::BI__popcnt16: // Microsoft variants of popcount
2211 case Builtin::BI__popcnt:
2212 case Builtin::BI__popcnt64:
2213 if (!interp__builtin_popcount(S, OpPC, Frame, F, Call))
2214 return false;
2215 break;
2216
2217 case Builtin::BI__builtin_parity:
2218 case Builtin::BI__builtin_parityl:
2219 case Builtin::BI__builtin_parityll:
2220 if (!interp__builtin_parity(S, OpPC, Frame, F, Call))
2221 return false;
2222 break;
2223
2224 case Builtin::BI__builtin_clrsb:
2225 case Builtin::BI__builtin_clrsbl:
2226 case Builtin::BI__builtin_clrsbll:
2227 if (!interp__builtin_clrsb(S, OpPC, Frame, F, Call))
2228 return false;
2229 break;
2230
2231 case Builtin::BI__builtin_bitreverse8:
2232 case Builtin::BI__builtin_bitreverse16:
2233 case Builtin::BI__builtin_bitreverse32:
2234 case Builtin::BI__builtin_bitreverse64:
2235 if (!interp__builtin_bitreverse(S, OpPC, Frame, F, Call))
2236 return false;
2237 break;
2238
2239 case Builtin::BI__builtin_classify_type:
2240 if (!interp__builtin_classify_type(S, OpPC, Frame, F, Call))
2241 return false;
2242 break;
2243
2244 case Builtin::BI__builtin_expect:
2245 case Builtin::BI__builtin_expect_with_probability:
2246 if (!interp__builtin_expect(S, OpPC, Frame, F, Call))
2247 return false;
2248 break;
2249
2250 case Builtin::BI__builtin_rotateleft8:
2251 case Builtin::BI__builtin_rotateleft16:
2252 case Builtin::BI__builtin_rotateleft32:
2253 case Builtin::BI__builtin_rotateleft64:
2254 case Builtin::BI_rotl8: // Microsoft variants of rotate left
2255 case Builtin::BI_rotl16:
2256 case Builtin::BI_rotl:
2257 case Builtin::BI_lrotl:
2258 case Builtin::BI_rotl64:
2259 if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/false))
2260 return false;
2261 break;
2262
2263 case Builtin::BI__builtin_rotateright8:
2264 case Builtin::BI__builtin_rotateright16:
2265 case Builtin::BI__builtin_rotateright32:
2266 case Builtin::BI__builtin_rotateright64:
2267 case Builtin::BI_rotr8: // Microsoft variants of rotate right
2268 case Builtin::BI_rotr16:
2269 case Builtin::BI_rotr:
2270 case Builtin::BI_lrotr:
2271 case Builtin::BI_rotr64:
2272 if (!interp__builtin_rotate(S, OpPC, Frame, F, Call, /*Right=*/true))
2273 return false;
2274 break;
2275
2276 case Builtin::BI__builtin_ffs:
2277 case Builtin::BI__builtin_ffsl:
2278 case Builtin::BI__builtin_ffsll:
2279 if (!interp__builtin_ffs(S, OpPC, Frame, F, Call))
2280 return false;
2281 break;
2282 case Builtin::BIaddressof:
2283 case Builtin::BI__addressof:
2284 case Builtin::BI__builtin_addressof:
2285 if (!interp__builtin_addressof(S, OpPC, Frame, F, Call))
2286 return false;
2287 break;
2288
2289 case Builtin::BIas_const:
2290 case Builtin::BIforward:
2291 case Builtin::BIforward_like:
2292 case Builtin::BImove:
2293 case Builtin::BImove_if_noexcept:
2294 if (!interp__builtin_move(S, OpPC, Frame, F, Call))
2295 return false;
2296 break;
2297
2298 case Builtin::BI__builtin_eh_return_data_regno:
2300 return false;
2301 break;
2302
2303 case Builtin::BI__builtin_launder:
2304 if (!noopPointer(S, OpPC, Frame, F, Call))
2305 return false;
2306 break;
2307
2308 case Builtin::BI__builtin_add_overflow:
2309 case Builtin::BI__builtin_sub_overflow:
2310 case Builtin::BI__builtin_mul_overflow:
2311 case Builtin::BI__builtin_sadd_overflow:
2312 case Builtin::BI__builtin_uadd_overflow:
2313 case Builtin::BI__builtin_uaddl_overflow:
2314 case Builtin::BI__builtin_uaddll_overflow:
2315 case Builtin::BI__builtin_usub_overflow:
2316 case Builtin::BI__builtin_usubl_overflow:
2317 case Builtin::BI__builtin_usubll_overflow:
2318 case Builtin::BI__builtin_umul_overflow:
2319 case Builtin::BI__builtin_umull_overflow:
2320 case Builtin::BI__builtin_umulll_overflow:
2321 case Builtin::BI__builtin_saddl_overflow:
2322 case Builtin::BI__builtin_saddll_overflow:
2323 case Builtin::BI__builtin_ssub_overflow:
2324 case Builtin::BI__builtin_ssubl_overflow:
2325 case Builtin::BI__builtin_ssubll_overflow:
2326 case Builtin::BI__builtin_smul_overflow:
2327 case Builtin::BI__builtin_smull_overflow:
2328 case Builtin::BI__builtin_smulll_overflow:
2329 if (!interp__builtin_overflowop(S, OpPC, Frame, F, Call))
2330 return false;
2331 break;
2332
2333 case Builtin::BI__builtin_addcb:
2334 case Builtin::BI__builtin_addcs:
2335 case Builtin::BI__builtin_addc:
2336 case Builtin::BI__builtin_addcl:
2337 case Builtin::BI__builtin_addcll:
2338 case Builtin::BI__builtin_subcb:
2339 case Builtin::BI__builtin_subcs:
2340 case Builtin::BI__builtin_subc:
2341 case Builtin::BI__builtin_subcl:
2342 case Builtin::BI__builtin_subcll:
2343 if (!interp__builtin_carryop(S, OpPC, Frame, F, Call))
2344 return false;
2345 break;
2346
2347 case Builtin::BI__builtin_clz:
2348 case Builtin::BI__builtin_clzl:
2349 case Builtin::BI__builtin_clzll:
2350 case Builtin::BI__builtin_clzs:
2351 case Builtin::BI__builtin_clzg:
2352 case Builtin::BI__lzcnt16: // Microsoft variants of count leading-zeroes
2353 case Builtin::BI__lzcnt:
2354 case Builtin::BI__lzcnt64:
2355 if (!interp__builtin_clz(S, OpPC, Frame, F, Call))
2356 return false;
2357 break;
2358
2359 case Builtin::BI__builtin_ctz:
2360 case Builtin::BI__builtin_ctzl:
2361 case Builtin::BI__builtin_ctzll:
2362 case Builtin::BI__builtin_ctzs:
2363 case Builtin::BI__builtin_ctzg:
2364 if (!interp__builtin_ctz(S, OpPC, Frame, F, Call))
2365 return false;
2366 break;
2367
2368 case Builtin::BI__builtin_bswap16:
2369 case Builtin::BI__builtin_bswap32:
2370 case Builtin::BI__builtin_bswap64:
2371 if (!interp__builtin_bswap(S, OpPC, Frame, F, Call))
2372 return false;
2373 break;
2374
2375 case Builtin::BI__atomic_always_lock_free:
2376 case Builtin::BI__atomic_is_lock_free:
2377 case Builtin::BI__c11_atomic_is_lock_free:
2378 if (!interp__builtin_atomic_lock_free(S, OpPC, Frame, F, Call))
2379 return false;
2380 break;
2381
2382 case Builtin::BI__builtin_complex:
2383 if (!interp__builtin_complex(S, OpPC, Frame, F, Call))
2384 return false;
2385 break;
2386
2387 case Builtin::BI__builtin_is_aligned:
2388 case Builtin::BI__builtin_align_up:
2389 case Builtin::BI__builtin_align_down:
2391 return false;
2392 break;
2393
2394 case Builtin::BI__builtin_assume_aligned:
2395 if (!interp__builtin_assume_aligned(S, OpPC, Frame, F, Call))
2396 return false;
2397 break;
2398
2399 case clang::X86::BI__builtin_ia32_bextr_u32:
2400 case clang::X86::BI__builtin_ia32_bextr_u64:
2401 case clang::X86::BI__builtin_ia32_bextri_u32:
2402 case clang::X86::BI__builtin_ia32_bextri_u64:
2403 if (!interp__builtin_ia32_bextr(S, OpPC, Frame, F, Call))
2404 return false;
2405 break;
2406
2407 case clang::X86::BI__builtin_ia32_bzhi_si:
2408 case clang::X86::BI__builtin_ia32_bzhi_di:
2409 if (!interp__builtin_ia32_bzhi(S, OpPC, Frame, F, Call))
2410 return false;
2411 break;
2412
2413 case clang::X86::BI__builtin_ia32_lzcnt_u16:
2414 case clang::X86::BI__builtin_ia32_lzcnt_u32:
2415 case clang::X86::BI__builtin_ia32_lzcnt_u64:
2416 if (!interp__builtin_ia32_lzcnt(S, OpPC, Frame, F, Call))
2417 return false;
2418 break;
2419
2420 case clang::X86::BI__builtin_ia32_tzcnt_u16:
2421 case clang::X86::BI__builtin_ia32_tzcnt_u32:
2422 case clang::X86::BI__builtin_ia32_tzcnt_u64:
2423 if (!interp__builtin_ia32_tzcnt(S, OpPC, Frame, F, Call))
2424 return false;
2425 break;
2426
2427 case clang::X86::BI__builtin_ia32_pdep_si:
2428 case clang::X86::BI__builtin_ia32_pdep_di:
2429 if (!interp__builtin_ia32_pdep(S, OpPC, Frame, F, Call))
2430 return false;
2431 break;
2432
2433 case clang::X86::BI__builtin_ia32_pext_si:
2434 case clang::X86::BI__builtin_ia32_pext_di:
2435 if (!interp__builtin_ia32_pext(S, OpPC, Frame, F, Call))
2436 return false;
2437 break;
2438
2439 case clang::X86::BI__builtin_ia32_addcarryx_u32:
2440 case clang::X86::BI__builtin_ia32_addcarryx_u64:
2441 case clang::X86::BI__builtin_ia32_subborrow_u32:
2442 case clang::X86::BI__builtin_ia32_subborrow_u64:
2444 return false;
2445 break;
2446
2447 case Builtin::BI__builtin_os_log_format_buffer_size:
2449 return false;
2450 break;
2451
2452 case Builtin::BI__builtin_ptrauth_string_discriminator:
2454 return false;
2455 break;
2456
2457 case Builtin::BI__builtin_constant_p:
2458 if (!interp__builtin_constant_p(S, OpPC, Frame, F, Call))
2459 return false;
2460 break;
2461
2462 case Builtin::BI__noop:
2463 pushInteger(S, 0, Call->getType());
2464 break;
2465
2466 case Builtin::BI__builtin_operator_new:
2467 if (!interp__builtin_operator_new(S, OpPC, Frame, F, Call))
2468 return false;
2469 break;
2470
2471 case Builtin::BI__builtin_operator_delete:
2472 if (!interp__builtin_operator_delete(S, OpPC, Frame, F, Call))
2473 return false;
2474 break;
2475
2476 case Builtin::BI__arithmetic_fence:
2477 if (!interp__builtin_arithmetic_fence(S, OpPC, Frame, F, Call))
2478 return false;
2479 break;
2480
2481 case Builtin::BI__builtin_reduce_add:
2482 case Builtin::BI__builtin_reduce_mul:
2483 case Builtin::BI__builtin_reduce_and:
2484 case Builtin::BI__builtin_reduce_or:
2485 case Builtin::BI__builtin_reduce_xor:
2486 if (!interp__builtin_vector_reduce(S, OpPC, Frame, F, Call))
2487 return false;
2488 break;
2489
2490 case Builtin::BI__builtin_elementwise_popcount:
2492 return false;
2493 break;
2494
2495 case Builtin::BI__builtin_memcpy:
2496 case Builtin::BImemcpy:
2497 case Builtin::BI__builtin_memmove:
2498 case Builtin::BImemmove:
2499 if (!interp__builtin_memcpy(S, OpPC, Frame, F, Call))
2500 return false;
2501 break;
2502
2503 case Builtin::BI__builtin_memcmp:
2504 case Builtin::BImemcmp:
2505 case Builtin::BI__builtin_bcmp:
2506 case Builtin::BIbcmp:
2507 case Builtin::BI__builtin_wmemcmp:
2508 case Builtin::BIwmemcmp:
2509 if (!interp__builtin_memcmp(S, OpPC, Frame, F, Call))
2510 return false;
2511 break;
2512
2513 default:
2514 S.FFDiag(S.Current->getLocation(OpPC),
2515 diag::note_invalid_subexpr_in_const_expr)
2516 << S.Current->getRange(OpPC);
2517
2518 return false;
2519 }
2520
2521 return retPrimValue(S, OpPC, ReturnT);
2522}
2523
2525 llvm::ArrayRef<int64_t> ArrayIndices,
2526 int64_t &IntResult) {
2528 unsigned N = E->getNumComponents();
2529 assert(N > 0);
2530
2531 unsigned ArrayIndex = 0;
2532 QualType CurrentType = E->getTypeSourceInfo()->getType();
2533 for (unsigned I = 0; I != N; ++I) {
2534 const OffsetOfNode &Node = E->getComponent(I);
2535 switch (Node.getKind()) {
2536 case OffsetOfNode::Field: {
2537 const FieldDecl *MemberDecl = Node.getField();
2538 const RecordType *RT = CurrentType->getAs<RecordType>();
2539 if (!RT)
2540 return false;
2541 const RecordDecl *RD = RT->getDecl();
2542 if (RD->isInvalidDecl())
2543 return false;
2545 unsigned FieldIndex = MemberDecl->getFieldIndex();
2546 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
2547 Result +=
2549 CurrentType = MemberDecl->getType().getNonReferenceType();
2550 break;
2551 }
2552 case OffsetOfNode::Array: {
2553 // When generating bytecode, we put all the index expressions as Sint64 on
2554 // the stack.
2555 int64_t Index = ArrayIndices[ArrayIndex];
2556 const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType);
2557 if (!AT)
2558 return false;
2559 CurrentType = AT->getElementType();
2560 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType);
2561 Result += Index * ElementSize;
2562 ++ArrayIndex;
2563 break;
2564 }
2565 case OffsetOfNode::Base: {
2566 const CXXBaseSpecifier *BaseSpec = Node.getBase();
2567 if (BaseSpec->isVirtual())
2568 return false;
2569
2570 // Find the layout of the class whose base we are looking into.
2571 const RecordType *RT = CurrentType->getAs<RecordType>();
2572 if (!RT)
2573 return false;
2574 const RecordDecl *RD = RT->getDecl();
2575 if (RD->isInvalidDecl())
2576 return false;
2578
2579 // Find the base class itself.
2580 CurrentType = BaseSpec->getType();
2581 const RecordType *BaseRT = CurrentType->getAs<RecordType>();
2582 if (!BaseRT)
2583 return false;
2584
2585 // Add the offset to the base.
2586 Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
2587 break;
2588 }
2590 llvm_unreachable("Dependent OffsetOfExpr?");
2591 }
2592 }
2593
2594 IntResult = Result.getQuantity();
2595
2596 return true;
2597}
2598
2600 const Pointer &Ptr, const APSInt &IntValue) {
2601
2602 const Record *R = Ptr.getRecord();
2603 assert(R);
2604 assert(R->getNumFields() == 1);
2605
2606 unsigned FieldOffset = R->getField(0u)->Offset;
2607 const Pointer &FieldPtr = Ptr.atField(FieldOffset);
2608 PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
2609
2610 INT_TYPE_SWITCH(FieldT,
2611 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
2612 FieldPtr.initialize();
2613 return true;
2614}
2615
2616static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2617 Pointer &Dest, bool Activate);
2618static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
2619 Pointer &Dest, bool Activate = false) {
2620 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2621 const Descriptor *DestDesc = Dest.getFieldDesc();
2622
2623 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
2624 Pointer DestField = Dest.atField(F.Offset);
2625 if (std::optional<PrimType> FT = S.Ctx.classify(F.Decl->getType())) {
2626 TYPE_SWITCH(*FT, {
2627 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
2628 if (Src.atField(F.Offset).isInitialized())
2629 DestField.initialize();
2630 if (Activate)
2631 DestField.activate();
2632 });
2633 return true;
2634 }
2635 // Composite field.
2636 return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate);
2637 };
2638
2639 assert(SrcDesc->isRecord());
2640 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
2641 const Record *R = DestDesc->ElemRecord;
2642 for (const Record::Field &F : R->fields()) {
2643 if (R->isUnion()) {
2644 // For unions, only copy the active field.
2645 const Pointer &SrcField = Src.atField(F.Offset);
2646 if (SrcField.isActive()) {
2647 if (!copyField(F, /*Activate=*/true))
2648 return false;
2649 }
2650 } else {
2651 if (!copyField(F, Activate))
2652 return false;
2653 }
2654 }
2655
2656 for (const Record::Base &B : R->bases()) {
2657 Pointer DestBase = Dest.atField(B.Offset);
2658 if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate))
2659 return false;
2660 }
2661
2662 Dest.initialize();
2663 return true;
2664}
2665
2666static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
2667 Pointer &Dest, bool Activate = false) {
2668 assert(Src.isLive() && Dest.isLive());
2669
2670 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
2671 const Descriptor *DestDesc = Dest.getFieldDesc();
2672
2673 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
2674
2675 if (DestDesc->isPrimitiveArray()) {
2676 assert(SrcDesc->isPrimitiveArray());
2677 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
2678 PrimType ET = DestDesc->getPrimType();
2679 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
2680 Pointer DestElem = Dest.atIndex(I);
2681 TYPE_SWITCH(ET, {
2682 DestElem.deref<T>() = Src.atIndex(I).deref<T>();
2683 DestElem.initialize();
2684 });
2685 }
2686 return true;
2687 }
2688
2689 if (DestDesc->isRecord())
2690 return copyRecord(S, OpPC, Src, Dest, Activate);
2691 return Invalid(S, OpPC);
2692}
2693
2694bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
2695 return copyComposite(S, OpPC, Src, Dest);
2696}
2697
2698} // namespace interp
2699} // namespace clang
#define V(N, I)
Definition: ASTContext.h:3443
DynTypedNode Node
StringRef P
Defines enum values for all the target-independent builtin functions.
Expr * E
GCCTypeClass
Values returned by __builtin_classify_type, chosen to match the values produced by GCC's builtin.
CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E, UnaryExprOrTypeTrait ExprKind)
GCCTypeClass EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts)
EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way as GCC.
#define RET_CASE(X)
#define INT_TYPE_SWITCH_NO_BOOL(Expr, B)
Definition: PrimType.h:194
#define INT_TYPE_SWITCH(Expr, B)
Definition: PrimType.h:175
#define TYPE_SWITCH(Expr, B)
Definition: PrimType.h:153
SourceLocation Loc
Definition: SemaObjC.cpp:759
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition: APValue.h:122
const LValueBase getLValueBase() const
Definition: APValue.cpp:973
CharUnits & getLValueOffset()
Definition: APValue.cpp:983
bool isLValue() const
Definition: APValue.h:448
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
Builtin::Context & BuiltinInfo
Definition: ASTContext.h:682
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
QualType getWCharType() const
Return the unique wchar_t type available in C++ (and available as __wchar_t as a Microsoft extension)...
Definition: ASTContext.h:1915
bool hasSameUnqualifiedType(QualType T1, QualType T2) const
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
Definition: ASTContext.h:2763
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
Definition: ASTContext.h:2482
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
Definition: ASTContext.h:799
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
Definition: RecordLayout.h:38
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
Definition: RecordLayout.h:196
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
Definition: RecordLayout.h:200
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
Definition: RecordLayout.h:249
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition: Type.h:3577
QualType getElementType() const
Definition: Type.h:3589
llvm::StringRef getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition: Builtins.h:103
Represents a base class of a C++ class.
Definition: DeclCXX.h:146
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition: DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition: DeclCXX.h:249
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition: Expr.h:2874
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition: CharUnits.h:207
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition: CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition: CharUnits.h:201
static unsigned getMaxSizeBits(const ASTContext &Context)
Determine the maximum number of active bits that an array's size can require, which limits the maximu...
Definition: Type.cpp:245
bool isInvalidDecl() const
Definition: DeclBase.h:591
This represents one expression.
Definition: Expr.h:110
bool isGLValue() const
Definition: Expr.h:280
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
Represents a member of a struct/union/class.
Definition: Decl.h:3033
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition: Decl.cpp:4654
Represents a function declaration or definition.
Definition: Decl.h:1935
QualType getReturnType() const
Definition: Decl.h:2720
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition: Expr.h:2519
Helper class for OffsetOfExpr.
Definition: Expr.h:2413
@ Array
An index into an array.
Definition: Expr.h:2418
@ Identifier
A field in a dependent type, known only by its name.
Definition: Expr.h:2422
@ Field
A field.
Definition: Expr.h:2420
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition: Expr.h:2425
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3198
A (possibly-)qualified type.
Definition: Type.h:929
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:996
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition: Type.h:7931
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition: Type.h:8134
Represents a struct/union/class.
Definition: Decl.h:4148
A helper class that allows the use of isa/cast/dyncast to detect TagType objects of structs/unions/cl...
Definition: Type.h:6072
RecordDecl * getDecl() const
Definition: Type.h:6082
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID, bool DeferHint=false)
Emit a diagnostic.
Definition: SemaBase.cpp:60
ASTContext & getASTContext() const
Definition: Sema.h:531
const LangOptions & getLangOpts() const
Definition: Sema.h:524
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition: Stmt.cpp:333
Exposes information about the current target.
Definition: TargetInfo.h:220
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition: TargetInfo.h:844
unsigned getIntWidth() const
getIntWidth/Align - Return the size of 'signed int' and 'unsigned int' for this target,...
Definition: TargetInfo.h:519
bool isBigEndian() const
Definition: TargetInfo.h:1672
virtual int getEHDataRegisterNumber(unsigned RegNo) const
Return the register number that __builtin_eh_return_regno would return with the specified argument.
Definition: TargetInfo.h:1617
unsigned getLongWidth() const
getLongWidth/Align - Return the size of 'signed long' and 'unsigned long' for this target,...
Definition: TargetInfo.h:524
virtual bool isNan2008() const
Returns true if NaN encoding is IEEE 754-2008.
Definition: TargetInfo.h:1257
A template argument list.
Definition: DeclTemplate.h:250
unsigned size() const
Retrieve the number of template arguments in this template argument list.
Definition: DeclTemplate.h:286
@ Type
The template argument is a type.
Definition: TemplateBase.h:70
Symbolic representation of typeid(T) for some type T.
Definition: APValue.h:44
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition: Type.cpp:2201
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition: Type.cpp:2251
bool isCharType() const
Definition: Type.cpp:2123
bool isPointerType() const
Definition: Type.h:8186
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition: Type.h:8550
bool isChar8Type() const
Definition: Type.cpp:2139
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:738
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition: Type.h:8625
bool isAnyComplexType() const
Definition: Type.h:8294
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition: Type.cpp:2396
bool isFunctionType() const
Definition: Type.h:8182
bool isFloatingType() const
Definition: Type.cpp:2283
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8731
bool isNullPtrType() const
Definition: Type.h:8543
QualType getType() const
Definition: Decl.h:682
A memory block, either on the stack or in the heap.
Definition: InterpBlock.h:49
const Descriptor * getDescriptor() const
Returns the block's descriptor.
Definition: InterpBlock.h:68
Wrapper around boolean types.
Definition: Boolean.h:25
static Boolean from(T Value)
Definition: Boolean.h:103
Pointer into the code segment.
Definition: Source.h:30
Compilation context for expressions.
Definition: Compiler.h:104
Manages dynamic memory allocations done during bytecode interpretation.
bool isInf() const
Definition: Floating.h:97
const APFloat & getAPFloat() const
Definition: Floating.h:40
llvm::FPClassTest classify() const
Definition: Floating.h:101
bool isSignaling() const
Definition: Floating.h:96
bool isNormal() const
Definition: Floating.h:99
ComparisonCategoryResult compare(const Floating &RHS) const
Definition: Floating.h:104
bool isNan() const
Definition: Floating.h:95
bool isZero() const
Definition: Floating.h:91
bool isNegative() const
Definition: Floating.h:89
static Floating getInf(const llvm::fltSemantics &Sem)
Definition: Floating.h:37
bool isFinite() const
Definition: Floating.h:98
bool isDenormal() const
Definition: Floating.h:100
static Floating abs(const Floating &F)
Definition: Floating.h:164
APFloat::fltCategory getCategory() const
Definition: Floating.h:102
Base class for stack frames, shared between VM and walker.
Definition: Frame.h:25
Bytecode function.
Definition: Function.h:81
const FunctionDecl * getDecl() const
Returns the original FunctionDecl.
Definition: Function.h:96
unsigned getBuiltinID() const
Definition: Function.h:196
Frame storing local variables.
Definition: InterpFrame.h:26
const Expr * getExpr(CodePtr PC) const
InterpFrame * Caller
The frame of the previous function.
Definition: InterpFrame.h:29
CodePtr getRetPC() const
Returns the return address of the frame.
Definition: InterpFrame.h:110
const FunctionDecl * getCallee() const override
Returns the caller.
Stack frame storing temporaries and parameters.
Definition: InterpStack.h:28
void clear()
Clears the stack without calling any destructors.
Definition: InterpStack.cpp:24
T & peek() const
Returns a reference to the value on the top of the stack.
Definition: InterpStack.h:69
Interpreter context.
Definition: InterpState.h:36
A pointer to a memory block, live or dead.
Definition: Pointer.h:83
bool isInitialized() const
Checks if an object was initialized.
Definition: Pointer.cpp:320
Pointer atIndex(uint64_t Idx) const
Offsets a pointer inside an array.
Definition: Pointer.h:151
bool isDummy() const
Checks if the pointer points to a dummy value.
Definition: Pointer.h:544
int64_t getIndex() const
Returns the index into an array.
Definition: Pointer.h:589
bool isActive() const
Checks if the object is active.
Definition: Pointer.h:533
Pointer atField(unsigned Off) const
Creates a pointer to a field.
Definition: Pointer.h:170
T & deref() const
Dereferences the pointer, if it's live.
Definition: Pointer.h:642
unsigned getNumElems() const
Returns the number of elements.
Definition: Pointer.h:580
void activate() const
Activats a field.
Definition: Pointer.cpp:404
bool isIntegralPointer() const
Definition: Pointer.h:468
QualType getType() const
Returns the type of the innermost field.
Definition: Pointer.h:339
bool isLive() const
Checks if the pointer is live.
Definition: Pointer.h:270
uint64_t getByteOffset() const
Returns the byte offset from the start.
Definition: Pointer.h:571
std::string toDiagnosticString(const ASTContext &Ctx) const
Converts the pointer to a string usable in diagnostics.
Definition: Pointer.cpp:310
bool isZero() const
Checks if the pointer is null.
Definition: Pointer.h:261
const Descriptor * getDeclDesc() const
Accessor for information about the declaration site.
Definition: Pointer.h:284
static bool pointToSameBlock(const Pointer &A, const Pointer &B)
Checks if both given pointers point to the same block.
Definition: Pointer.cpp:463
APValue toAPValue(const ASTContext &ASTCtx) const
Converts the pointer to an APValue.
Definition: Pointer.cpp:140
uint64_t getIntegerRepresentation() const
Definition: Pointer.h:138
bool isBlockPointer() const
Definition: Pointer.h:467
const Block * block() const
Definition: Pointer.h:586
const Descriptor * getFieldDesc() const
Accessors for information about the innermost field.
Definition: Pointer.h:329
size_t elemSize() const
Returns the element size of the innermost field.
Definition: Pointer.h:358
void initialize() const
Initializes a field.
Definition: Pointer.cpp:356
const Record * getRecord() const
Returns the record descriptor of a class.
Definition: Pointer.h:472
Structure/Class descriptor.
Definition: Record.h:25
bool isUnion() const
Checks if the record is a union.
Definition: Record.h:57
const Field * getField(const FieldDecl *FD) const
Returns a field.
Definition: Record.cpp:40
llvm::iterator_range< const_base_iter > bases() const
Definition: Record.h:88
unsigned getNumFields() const
Definition: Record.h:84
llvm::iterator_range< const_field_iter > fields() const
Definition: Record.h:80
Describes the statement/declaration an opcode was generated from.
Definition: Source.h:77
Defines the clang::TargetInfo interface.
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition: OSLog.cpp:180
static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool __atomic_always_lock_free(size_t, void const volatile*) bool __atomic_is_lock_free(size_t,...
llvm::APFloat APFloat
Definition: Floating.h:23
bool readPointerToBuffer(const Context &Ctx, const Pointer &FromPtr, BitcastBuffer &Buffer, bool ReturnOnUninit)
static APSInt peekToAPSInt(InterpStack &Stk, PrimType T, size_t Offset=0)
Peek an integer value from the stack into an APSInt.
static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
llvm::APInt APInt
Definition: FixedPoint.h:19
static PrimType getLongPrimType(const InterpState &S)
static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
__builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, DynamicAllocator::Form AllocForm, DynamicAllocator::Form DeleteForm, const Descriptor *D, const Expr *NewExpr)
Diagnose mismatched new[]/delete or new/delete[] pairs.
Definition: Interp.cpp:844
static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, llvm::ArrayRef< int64_t > ArrayIndices, int64_t &Result)
Interpret an offsetof operation.
static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, bool Signaling)
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, const Pointer &Ptr, const APSInt &IntValue)
Sets the given integral value to the pointer, which is of a std::{weak,partial,strong}_ordering type.
static bool interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool retPrimValue(InterpState &S, CodePtr OpPC, std::optional< PrimType > &T)
static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func)
static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F)
static bool interp__builtin_ia32_bzhi(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static unsigned callArgSize(const InterpState &S, const CallExpr *C)
static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_os_log_format_buffer_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
__builtin_is_aligned() __builtin_align_up() __builtin_align_down() The first parameter is either an i...
static bool interp__builtin_clrsb(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_constant_p(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool RetVoid(InterpState &S, CodePtr &PC)
Definition: Interp.h:340
static bool isOneByteCharacterType(QualType T)
Determine if T is a character type for which we guarantee that sizeof(T) == 1.
static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a value can be loaded from a block.
Definition: Interp.cpp:589
static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static T getParam(const InterpFrame *Frame, unsigned Index)
static bool interp__builtin_ia32_lzcnt(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
constexpr size_t align(size_t Size)
Aligns a size to the pointer alignment.
Definition: PrimType.h:131
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is in range.
Definition: Interp.cpp:415
bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is live and accessible.
Definition: Interp.cpp:306
static bool interp__builtin_ia32_pext(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_elementwise_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
Can be called with an integer or vector as the first and only parameter.
static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue)
Definition: Interp.h:164
PrimType
Enumeration of the primitive types of the VM.
Definition: PrimType.h:34
static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate)
static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, bool CheckSign, const CallExpr *Call)
static bool interp__builtin_move(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, bool IsNumBuiltin)
static APSInt getAPSIntParam(const InterpFrame *Frame, unsigned Index)
static bool noopPointer(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
Just takes the first Argument to the call and puts it on the stack.
static void pushInteger(InterpState &S, const APSInt &Val, QualType QT)
Pushes Val on the stack as the type given by QT.
static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if the array is offsetable.
Definition: Interp.cpp:298
static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
__builtin_complex(Float A, float B);
static bool interp__builtin_ia32_tzcnt(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_parity(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool CheckDummy(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is a dummy pointer.
Definition: Interp.cpp:901
static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
static bool interp__builtin_ia32_pdep(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static PrimType getIntPrimType(const InterpState &S)
size_t primSize(PrimType Type)
Returns the size of a primitive type in bytes.
Definition: PrimType.cpp:23
static void assignInteger(Pointer &Dest, PrimType ValueT, const APSInt &Value)
static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const Function *F, const CallExpr *Call, uint32_t BuiltinID)
Interpret a builtin function.
static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
llvm::APSInt APSInt
Definition: FixedPoint.h:20
static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
Five int values followed by one floating value.
static bool interp__builtin_ia32_bextr(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, bool IsNumBuiltin)
static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F, const CallExpr *Call)
Defined as __builtin_isnan(...), to accommodate the fact that it can take a float,...
bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest)
Copy the contents of Src into Dest.
static bool interp__builtin_rotate(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call, bool Right)
rotateleft(value, amount)
constexpr bool isIntegralType(PrimType T)
Definition: PrimType.h:74
static bool interp__builtin_ffs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *F)
static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
First parameter to __builtin_isfpclass is the floating value, the second one is an integral value.
static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC, unsigned ID)
static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_bitreverse(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
static void swapBytes(std::byte *M, size_t N)
static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const Function *Func, const CallExpr *Call)
The JSON file list parser is used to communicate input to InstallAPI.
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
ComparisonCategoryResult
An enumeration representing the possible results of a three-way comparison.
@ Result
The result type of a method or function.
@ AK_Read
Definition: State.h:27
const FunctionProtoType * T
Track what bits have been initialized to known values and which ones have indeterminate value.
Definition: BitcastBuffer.h:81
std::unique_ptr< std::byte[]> Data
Definition: BitcastBuffer.h:83
A quantity in bits.
Definition: BitcastBuffer.h:24
A quantity in bytes.
Definition: BitcastBuffer.h:55
size_t getQuantity() const
Definition: BitcastBuffer.h:58
Bits toBits() const
Definition: BitcastBuffer.h:59
Describes a memory block created by an allocation site.
Definition: Descriptor.h:116
unsigned getNumElems() const
Returns the number of elements stored in the block.
Definition: Descriptor.h:243
bool isPrimitive() const
Checks if the descriptor is of a primitive.
Definition: Descriptor.h:257
QualType getElemQualType() const
Definition: Descriptor.cpp:408
const ValueDecl * asValueDecl() const
Definition: Descriptor.h:208
static constexpr unsigned MaxArrayElemBytes
Maximum number of bytes to be used for array elements.
Definition: Descriptor.h:141
QualType getType() const
Definition: Descriptor.cpp:393
static constexpr MetadataSize InlineDescMD
Definition: Descriptor.h:137
unsigned getElemSize() const
returns the size of an element when the structure is viewed as an array.
Definition: Descriptor.h:238
bool isPrimitiveArray() const
Checks if the descriptor is of an array of primitives.
Definition: Descriptor.h:248
PrimType getPrimType() const
Definition: Descriptor.h:230
bool isRecord() const
Checks if the descriptor is of a record.
Definition: Descriptor.h:262
const Record *const ElemRecord
Pointer to the record, if block contains records.
Definition: Descriptor.h:146
const Expr * asExpr() const
Definition: Descriptor.h:205
bool isArray() const
Checks if the descriptor is of an array.
Definition: Descriptor.h:260