21#include "llvm/ADT/DenseMap.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Intrinsics.h"
24#include "llvm/IR/Operator.h"
27using namespace CodeGen;
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
51 ValueTy = ATy->getValueType();
58 TypeInfo ValueTI =
C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.
Width;
60 ValueAlignInBits = ValueTI.
Align;
62 TypeInfo AtomicTI =
C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.
Width;
64 AtomicAlignInBits = AtomicTI.
Align;
66 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
69 AtomicAlign =
C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign =
C.toCharUnitsFromBits(ValueAlignInBits);
77 ValueSizeInBits =
C.getTypeSize(ValueTy);
79 auto Offset = OrigBFI.Offset %
C.toBits(lvalue.
getAlignment());
80 AtomicSizeInBits =
C.toBits(
81 C.toCharUnitsFromBits(Offset + OrigBFI.Size +
C.getCharWidth() - 1)
85 (
C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.
getAlignment()) *
87 llvm::Value *StoragePtr = CGF.
Builder.CreateConstGEP1_64(
88 CGF.
Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
90 StoragePtr, CGF.
UnqualPtrTy,
"atomic_bitfield_base");
95 llvm::Type *StorageTy = CGF.
Builder.getIntNTy(AtomicSizeInBits);
96 LVal = LValue::MakeBitfield(
99 AtomicTy =
C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
103 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
104 AtomicTy =
C.getConstantArrayType(
C.CharTy, Size,
nullptr,
105 ArraySizeModifier::Normal,
111 ValueSizeInBits =
C.getTypeSize(ValueTy);
113 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
119 ValueSizeInBits =
C.getTypeSize(ValueTy);
121 lvalue.
getType(), cast<llvm::FixedVectorType>(
124 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
128 UseLibcall = !
C.getTargetInfo().hasBuiltinAtomic(
132 QualType getAtomicType()
const {
return AtomicTy; }
133 QualType getValueType()
const {
return ValueTy; }
134 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
135 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
136 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
138 bool shouldUseLibcall()
const {
return UseLibcall; }
139 const LValue &getAtomicLValue()
const {
return LVal; }
140 llvm::Value *getAtomicPointer()
const {
150 Address getAtomicAddress()
const {
160 return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
163 Address getAtomicAddressAsAtomicIntPointer()
const {
164 return castToAtomicIntPointer(getAtomicAddress());
173 bool hasPadding()
const {
174 return (ValueSizeInBits != AtomicSizeInBits);
177 bool emitMemSetZeroIfNecessary()
const;
179 llvm::Value *getAtomicSizeValue()
const {
197 llvm::Value *getScalarRValValueOrNull(
RValue RVal)
const;
200 llvm::Value *convertRValueToInt(
RValue RVal,
bool CmpXchg =
false)
const;
204 bool CmpXchg =
false)
const;
207 void emitCopyIntoMemory(
RValue rvalue)
const;
210 LValue projectValue()
const {
212 Address addr = getAtomicAddress();
216 return LValue::MakeAddr(addr, getValueType(), CGF.
getContext(),
223 bool AsValue, llvm::AtomicOrdering AO,
234 std::pair<RValue, llvm::Value *>
237 llvm::AtomicOrdering::SequentiallyConsistent,
238 llvm::AtomicOrdering Failure =
239 llvm::AtomicOrdering::SequentiallyConsistent,
240 bool IsWeak =
false);
245 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
250 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
257 Address CreateTempAlloca()
const;
259 bool requiresMemSetZero(llvm::Type *
type)
const;
263 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
264 llvm::AtomicOrdering AO,
bool IsVolatile);
266 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile,
267 bool CmpXchg =
false);
269 llvm::Value *EmitAtomicCompareExchangeLibcall(
270 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
272 llvm::AtomicOrdering::SequentiallyConsistent,
273 llvm::AtomicOrdering Failure =
274 llvm::AtomicOrdering::SequentiallyConsistent);
276 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
277 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
279 llvm::AtomicOrdering::SequentiallyConsistent,
280 llvm::AtomicOrdering Failure =
281 llvm::AtomicOrdering::SequentiallyConsistent,
282 bool IsWeak =
false);
285 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
289 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
293 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
RValue UpdateRVal,
296 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRal,
301Address AtomicInfo::CreateTempAlloca()
const {
303 (LVal.
isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
305 getAtomicAlignment(),
310 TempAlloca, getAtomicAddress().getType(),
311 getAtomicAddress().getElementType());
323 fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
324 fnAttrB.addAttribute(llvm::Attribute::WillReturn);
325 llvm::AttributeList fnAttrs = llvm::AttributeList::get(
326 CGF.
getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
328 llvm::FunctionCallee fn =
336 uint64_t expectedSize) {
343bool AtomicInfo::requiresMemSetZero(llvm::Type *
type)
const {
345 if (hasPadding())
return true;
348 switch (getEvaluationKind()) {
355 AtomicSizeInBits / 2);
361 llvm_unreachable(
"bad evaluation kind");
364bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
381 llvm::AtomicOrdering SuccessOrder,
382 llvm::AtomicOrdering FailureOrder,
383 llvm::SyncScope::ID
Scope) {
390 Pair->setVolatile(
E->isVolatile());
391 Pair->setWeak(IsWeak);
395 llvm::Value *Old = CGF.
Builder.CreateExtractValue(Pair, 0);
396 llvm::Value *Cmp = CGF.
Builder.CreateExtractValue(Pair, 1);
400 llvm::BasicBlock *StoreExpectedBB =
405 llvm::BasicBlock *ContinueBB =
410 CGF.
Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
412 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
416 CGF.
Builder.CreateBr(ContinueBB);
418 CGF.
Builder.SetInsertPoint(ContinueBB);
429 llvm::Value *FailureOrderVal,
431 llvm::AtomicOrdering SuccessOrder,
432 llvm::SyncScope::ID
Scope) {
433 llvm::AtomicOrdering FailureOrder;
434 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
435 auto FOS = FO->getSExtValue();
436 if (!llvm::isValidAtomicOrderingCABI(FOS))
437 FailureOrder = llvm::AtomicOrdering::Monotonic;
439 switch ((llvm::AtomicOrderingCABI)FOS) {
440 case llvm::AtomicOrderingCABI::relaxed:
443 case llvm::AtomicOrderingCABI::release:
444 case llvm::AtomicOrderingCABI::acq_rel:
445 FailureOrder = llvm::AtomicOrdering::Monotonic;
447 case llvm::AtomicOrderingCABI::consume:
448 case llvm::AtomicOrderingCABI::acquire:
449 FailureOrder = llvm::AtomicOrdering::Acquire;
451 case llvm::AtomicOrderingCABI::seq_cst:
452 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
460 FailureOrder,
Scope);
473 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
475 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
477 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
479 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
483 CGF.
Builder.SetInsertPoint(MonotonicBB);
485 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic,
Scope);
488 CGF.
Builder.SetInsertPoint(AcquireBB);
490 llvm::AtomicOrdering::Acquire,
Scope);
493 CGF.
Builder.SetInsertPoint(SeqCstBB);
495 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
498 CGF.
Builder.SetInsertPoint(ContBB);
508 llvm::CmpInst::Predicate Pred;
511 llvm_unreachable(
"Unexpected min/max operation");
512 case AtomicExpr::AO__atomic_max_fetch:
513 case AtomicExpr::AO__scoped_atomic_max_fetch:
514 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
516 case AtomicExpr::AO__atomic_min_fetch:
517 case AtomicExpr::AO__scoped_atomic_min_fetch:
518 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
521 llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS,
"tst");
522 return Builder.CreateSelect(Cmp, OldVal, RHS,
"newval");
527 llvm::Value *IsWeak, llvm::Value *FailureOrder,
528 uint64_t Size, llvm::AtomicOrdering Order,
529 llvm::SyncScope::ID
Scope) {
530 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
531 bool PostOpMinMax =
false;
534 switch (
E->getOp()) {
535 case AtomicExpr::AO__c11_atomic_init:
536 case AtomicExpr::AO__opencl_atomic_init:
537 llvm_unreachable(
"Already handled!");
539 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
540 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
541 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
543 FailureOrder, Size, Order,
Scope);
545 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
546 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
547 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
549 FailureOrder, Size, Order,
Scope);
551 case AtomicExpr::AO__atomic_compare_exchange:
552 case AtomicExpr::AO__atomic_compare_exchange_n:
553 case AtomicExpr::AO__scoped_atomic_compare_exchange:
554 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
555 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
557 Val1, Val2, FailureOrder, Size, Order,
Scope);
560 llvm::BasicBlock *StrongBB =
563 llvm::BasicBlock *ContBB =
566 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
567 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
569 CGF.
Builder.SetInsertPoint(StrongBB);
571 FailureOrder, Size, Order,
Scope);
574 CGF.
Builder.SetInsertPoint(WeakBB);
576 FailureOrder, Size, Order,
Scope);
579 CGF.
Builder.SetInsertPoint(ContBB);
583 case AtomicExpr::AO__c11_atomic_load:
584 case AtomicExpr::AO__opencl_atomic_load:
585 case AtomicExpr::AO__hip_atomic_load:
586 case AtomicExpr::AO__atomic_load_n:
587 case AtomicExpr::AO__atomic_load:
588 case AtomicExpr::AO__scoped_atomic_load_n:
589 case AtomicExpr::AO__scoped_atomic_load: {
591 Load->setAtomic(Order,
Scope);
592 Load->setVolatile(
E->isVolatile());
597 case AtomicExpr::AO__c11_atomic_store:
598 case AtomicExpr::AO__opencl_atomic_store:
599 case AtomicExpr::AO__hip_atomic_store:
600 case AtomicExpr::AO__atomic_store:
601 case AtomicExpr::AO__atomic_store_n:
602 case AtomicExpr::AO__scoped_atomic_store:
603 case AtomicExpr::AO__scoped_atomic_store_n: {
606 Store->setAtomic(Order,
Scope);
607 Store->setVolatile(
E->isVolatile());
611 case AtomicExpr::AO__c11_atomic_exchange:
612 case AtomicExpr::AO__hip_atomic_exchange:
613 case AtomicExpr::AO__opencl_atomic_exchange:
614 case AtomicExpr::AO__atomic_exchange_n:
615 case AtomicExpr::AO__atomic_exchange:
616 case AtomicExpr::AO__scoped_atomic_exchange_n:
617 case AtomicExpr::AO__scoped_atomic_exchange:
618 Op = llvm::AtomicRMWInst::Xchg;
621 case AtomicExpr::AO__atomic_add_fetch:
622 case AtomicExpr::AO__scoped_atomic_add_fetch:
623 PostOp =
E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
624 : llvm::Instruction::Add;
626 case AtomicExpr::AO__c11_atomic_fetch_add:
627 case AtomicExpr::AO__hip_atomic_fetch_add:
628 case AtomicExpr::AO__opencl_atomic_fetch_add:
629 case AtomicExpr::AO__atomic_fetch_add:
630 case AtomicExpr::AO__scoped_atomic_fetch_add:
631 Op =
E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
632 : llvm::AtomicRMWInst::Add;
635 case AtomicExpr::AO__atomic_sub_fetch:
636 case AtomicExpr::AO__scoped_atomic_sub_fetch:
637 PostOp =
E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
638 : llvm::Instruction::Sub;
640 case AtomicExpr::AO__c11_atomic_fetch_sub:
641 case AtomicExpr::AO__hip_atomic_fetch_sub:
642 case AtomicExpr::AO__opencl_atomic_fetch_sub:
643 case AtomicExpr::AO__atomic_fetch_sub:
644 case AtomicExpr::AO__scoped_atomic_fetch_sub:
645 Op =
E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
646 : llvm::AtomicRMWInst::Sub;
649 case AtomicExpr::AO__atomic_min_fetch:
650 case AtomicExpr::AO__scoped_atomic_min_fetch:
653 case AtomicExpr::AO__c11_atomic_fetch_min:
654 case AtomicExpr::AO__hip_atomic_fetch_min:
655 case AtomicExpr::AO__opencl_atomic_fetch_min:
656 case AtomicExpr::AO__atomic_fetch_min:
657 case AtomicExpr::AO__scoped_atomic_fetch_min:
658 Op =
E->getValueType()->isFloatingType()
659 ? llvm::AtomicRMWInst::FMin
660 : (
E->getValueType()->isSignedIntegerType()
661 ? llvm::AtomicRMWInst::Min
662 : llvm::AtomicRMWInst::UMin);
665 case AtomicExpr::AO__atomic_max_fetch:
666 case AtomicExpr::AO__scoped_atomic_max_fetch:
669 case AtomicExpr::AO__c11_atomic_fetch_max:
670 case AtomicExpr::AO__hip_atomic_fetch_max:
671 case AtomicExpr::AO__opencl_atomic_fetch_max:
672 case AtomicExpr::AO__atomic_fetch_max:
673 case AtomicExpr::AO__scoped_atomic_fetch_max:
674 Op =
E->getValueType()->isFloatingType()
675 ? llvm::AtomicRMWInst::FMax
676 : (
E->getValueType()->isSignedIntegerType()
677 ? llvm::AtomicRMWInst::Max
678 : llvm::AtomicRMWInst::UMax);
681 case AtomicExpr::AO__atomic_and_fetch:
682 case AtomicExpr::AO__scoped_atomic_and_fetch:
683 PostOp = llvm::Instruction::And;
685 case AtomicExpr::AO__c11_atomic_fetch_and:
686 case AtomicExpr::AO__hip_atomic_fetch_and:
687 case AtomicExpr::AO__opencl_atomic_fetch_and:
688 case AtomicExpr::AO__atomic_fetch_and:
689 case AtomicExpr::AO__scoped_atomic_fetch_and:
690 Op = llvm::AtomicRMWInst::And;
693 case AtomicExpr::AO__atomic_or_fetch:
694 case AtomicExpr::AO__scoped_atomic_or_fetch:
695 PostOp = llvm::Instruction::Or;
697 case AtomicExpr::AO__c11_atomic_fetch_or:
698 case AtomicExpr::AO__hip_atomic_fetch_or:
699 case AtomicExpr::AO__opencl_atomic_fetch_or:
700 case AtomicExpr::AO__atomic_fetch_or:
701 case AtomicExpr::AO__scoped_atomic_fetch_or:
702 Op = llvm::AtomicRMWInst::Or;
705 case AtomicExpr::AO__atomic_xor_fetch:
706 case AtomicExpr::AO__scoped_atomic_xor_fetch:
707 PostOp = llvm::Instruction::Xor;
709 case AtomicExpr::AO__c11_atomic_fetch_xor:
710 case AtomicExpr::AO__hip_atomic_fetch_xor:
711 case AtomicExpr::AO__opencl_atomic_fetch_xor:
712 case AtomicExpr::AO__atomic_fetch_xor:
713 case AtomicExpr::AO__scoped_atomic_fetch_xor:
714 Op = llvm::AtomicRMWInst::Xor;
717 case AtomicExpr::AO__atomic_nand_fetch:
718 case AtomicExpr::AO__scoped_atomic_nand_fetch:
719 PostOp = llvm::Instruction::And;
721 case AtomicExpr::AO__c11_atomic_fetch_nand:
722 case AtomicExpr::AO__atomic_fetch_nand:
723 case AtomicExpr::AO__scoped_atomic_fetch_nand:
724 Op = llvm::AtomicRMWInst::Nand;
729 llvm::AtomicRMWInst *RMWI =
731 RMWI->setVolatile(
E->isVolatile());
735 llvm::Value *Result = RMWI;
738 E->getValueType()->isSignedIntegerType(),
741 Result = CGF.
Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
743 if (
E->getOp() == AtomicExpr::AO__atomic_nand_fetch ||
744 E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)
745 Result = CGF.
Builder.CreateNot(Result);
761 llvm::Value *IsWeak, llvm::Value *FailureOrder,
762 uint64_t Size, llvm::AtomicOrdering Order,
763 llvm::Value *
Scope) {
764 auto ScopeModel =
Expr->getScopeModel();
775 if (
auto SC = dyn_cast<llvm::ConstantInt>(
Scope)) {
786 auto Scopes = ScopeModel->getRuntimeValues();
787 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
788 for (
auto S : Scopes)
791 llvm::BasicBlock *ContBB =
794 auto *SC = Builder.CreateIntCast(
Scope, Builder.getInt32Ty(),
false);
797 auto FallBack = ScopeModel->getFallBackValue();
798 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
799 for (
auto S : Scopes) {
802 SI->addCase(Builder.getInt32(S), B);
804 Builder.SetInsertPoint(B);
811 Builder.CreateBr(ContBB);
814 Builder.SetInsertPoint(ContBB);
821 MemTy = AT->getValueType();
822 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr;
829 if (
E->getOp() == AtomicExpr::AO__c11_atomic_init ||
830 E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
843 bool Misaligned = (Ptr.
getAlignment() % TInfo.Width) != 0;
847 << (
int)TInfo.Width.getQuantity()
852 << (
int)TInfo.Width.getQuantity() << (
int)MaxInlineWidth.
getQuantity();
858 bool ShouldCastToIntPtrTy =
true;
860 switch (
E->getOp()) {
861 case AtomicExpr::AO__c11_atomic_init:
862 case AtomicExpr::AO__opencl_atomic_init:
863 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
865 case AtomicExpr::AO__atomic_load_n:
866 case AtomicExpr::AO__scoped_atomic_load_n:
867 case AtomicExpr::AO__c11_atomic_load:
868 case AtomicExpr::AO__opencl_atomic_load:
869 case AtomicExpr::AO__hip_atomic_load:
872 case AtomicExpr::AO__atomic_load:
873 case AtomicExpr::AO__scoped_atomic_load:
877 case AtomicExpr::AO__atomic_store:
878 case AtomicExpr::AO__scoped_atomic_store:
882 case AtomicExpr::AO__atomic_exchange:
883 case AtomicExpr::AO__scoped_atomic_exchange:
888 case AtomicExpr::AO__atomic_compare_exchange:
889 case AtomicExpr::AO__atomic_compare_exchange_n:
890 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
891 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
892 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
893 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
894 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
895 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
896 case AtomicExpr::AO__scoped_atomic_compare_exchange:
897 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
899 if (
E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
900 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
905 if (
E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
906 E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
907 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
908 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
912 case AtomicExpr::AO__c11_atomic_fetch_add:
913 case AtomicExpr::AO__c11_atomic_fetch_sub:
914 case AtomicExpr::AO__hip_atomic_fetch_add:
915 case AtomicExpr::AO__hip_atomic_fetch_sub:
916 case AtomicExpr::AO__opencl_atomic_fetch_add:
917 case AtomicExpr::AO__opencl_atomic_fetch_sub:
934 case AtomicExpr::AO__atomic_fetch_add:
935 case AtomicExpr::AO__atomic_fetch_max:
936 case AtomicExpr::AO__atomic_fetch_min:
937 case AtomicExpr::AO__atomic_fetch_sub:
938 case AtomicExpr::AO__atomic_add_fetch:
939 case AtomicExpr::AO__atomic_max_fetch:
940 case AtomicExpr::AO__atomic_min_fetch:
941 case AtomicExpr::AO__atomic_sub_fetch:
942 case AtomicExpr::AO__c11_atomic_fetch_max:
943 case AtomicExpr::AO__c11_atomic_fetch_min:
944 case AtomicExpr::AO__opencl_atomic_fetch_max:
945 case AtomicExpr::AO__opencl_atomic_fetch_min:
946 case AtomicExpr::AO__hip_atomic_fetch_max:
947 case AtomicExpr::AO__hip_atomic_fetch_min:
948 case AtomicExpr::AO__scoped_atomic_fetch_add:
949 case AtomicExpr::AO__scoped_atomic_fetch_max:
950 case AtomicExpr::AO__scoped_atomic_fetch_min:
951 case AtomicExpr::AO__scoped_atomic_fetch_sub:
952 case AtomicExpr::AO__scoped_atomic_add_fetch:
953 case AtomicExpr::AO__scoped_atomic_max_fetch:
954 case AtomicExpr::AO__scoped_atomic_min_fetch:
955 case AtomicExpr::AO__scoped_atomic_sub_fetch:
959 case AtomicExpr::AO__atomic_fetch_and:
960 case AtomicExpr::AO__atomic_fetch_nand:
961 case AtomicExpr::AO__atomic_fetch_or:
962 case AtomicExpr::AO__atomic_fetch_xor:
963 case AtomicExpr::AO__atomic_and_fetch:
964 case AtomicExpr::AO__atomic_nand_fetch:
965 case AtomicExpr::AO__atomic_or_fetch:
966 case AtomicExpr::AO__atomic_xor_fetch:
967 case AtomicExpr::AO__atomic_store_n:
968 case AtomicExpr::AO__atomic_exchange_n:
969 case AtomicExpr::AO__c11_atomic_fetch_and:
970 case AtomicExpr::AO__c11_atomic_fetch_nand:
971 case AtomicExpr::AO__c11_atomic_fetch_or:
972 case AtomicExpr::AO__c11_atomic_fetch_xor:
973 case AtomicExpr::AO__c11_atomic_store:
974 case AtomicExpr::AO__c11_atomic_exchange:
975 case AtomicExpr::AO__hip_atomic_fetch_and:
976 case AtomicExpr::AO__hip_atomic_fetch_or:
977 case AtomicExpr::AO__hip_atomic_fetch_xor:
978 case AtomicExpr::AO__hip_atomic_store:
979 case AtomicExpr::AO__hip_atomic_exchange:
980 case AtomicExpr::AO__opencl_atomic_fetch_and:
981 case AtomicExpr::AO__opencl_atomic_fetch_or:
982 case AtomicExpr::AO__opencl_atomic_fetch_xor:
983 case AtomicExpr::AO__opencl_atomic_store:
984 case AtomicExpr::AO__opencl_atomic_exchange:
985 case AtomicExpr::AO__scoped_atomic_fetch_and:
986 case AtomicExpr::AO__scoped_atomic_fetch_nand:
987 case AtomicExpr::AO__scoped_atomic_fetch_or:
988 case AtomicExpr::AO__scoped_atomic_fetch_xor:
989 case AtomicExpr::AO__scoped_atomic_and_fetch:
990 case AtomicExpr::AO__scoped_atomic_nand_fetch:
991 case AtomicExpr::AO__scoped_atomic_or_fetch:
992 case AtomicExpr::AO__scoped_atomic_xor_fetch:
993 case AtomicExpr::AO__scoped_atomic_store_n:
994 case AtomicExpr::AO__scoped_atomic_exchange_n:
1005 AtomicInfo Atomics(*
this, AtomicVal);
1007 if (ShouldCastToIntPtrTy) {
1008 Ptr = Atomics.castToAtomicIntPointer(Ptr);
1010 Val1 = Atomics.convertToAtomicIntPointer(Val1);
1012 Val2 = Atomics.convertToAtomicIntPointer(Val2);
1015 if (ShouldCastToIntPtrTy)
1016 Dest = Atomics.castToAtomicIntPointer(Dest);
1017 }
else if (
E->isCmpXChg())
1020 Dest = Atomics.CreateTempAlloca();
1021 if (ShouldCastToIntPtrTy)
1022 Dest = Atomics.castToAtomicIntPointer(Dest);
1025 bool PowerOf2Size = (
Size & (
Size - 1)) == 0;
1026 bool UseLibcall = !PowerOf2Size || (
Size > 16);
1046 auto CastToGenericAddrSpace = [&](llvm::Value *
V,
QualType PT) {
1053 auto *DestType = llvm::PointerType::get(
getLLVMContext(), DestAS);
1064 std::string LibCallName;
1066 bool HaveRetTy =
false;
1067 switch (
E->getOp()) {
1068 case AtomicExpr::AO__c11_atomic_init:
1069 case AtomicExpr::AO__opencl_atomic_init:
1070 llvm_unreachable(
"Already handled!");
1077 case AtomicExpr::AO__atomic_compare_exchange:
1078 case AtomicExpr::AO__atomic_compare_exchange_n:
1079 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1080 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1081 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1082 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1083 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1084 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1085 case AtomicExpr::AO__scoped_atomic_compare_exchange:
1086 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
1087 LibCallName =
"__atomic_compare_exchange";
1101 case AtomicExpr::AO__atomic_exchange:
1102 case AtomicExpr::AO__atomic_exchange_n:
1103 case AtomicExpr::AO__c11_atomic_exchange:
1104 case AtomicExpr::AO__hip_atomic_exchange:
1105 case AtomicExpr::AO__opencl_atomic_exchange:
1106 case AtomicExpr::AO__scoped_atomic_exchange:
1107 case AtomicExpr::AO__scoped_atomic_exchange_n:
1108 LibCallName =
"__atomic_exchange";
1114 case AtomicExpr::AO__atomic_store:
1115 case AtomicExpr::AO__atomic_store_n:
1116 case AtomicExpr::AO__c11_atomic_store:
1117 case AtomicExpr::AO__hip_atomic_store:
1118 case AtomicExpr::AO__opencl_atomic_store:
1119 case AtomicExpr::AO__scoped_atomic_store:
1120 case AtomicExpr::AO__scoped_atomic_store_n:
1121 LibCallName =
"__atomic_store";
1129 case AtomicExpr::AO__atomic_load:
1130 case AtomicExpr::AO__atomic_load_n:
1131 case AtomicExpr::AO__c11_atomic_load:
1132 case AtomicExpr::AO__hip_atomic_load:
1133 case AtomicExpr::AO__opencl_atomic_load:
1134 case AtomicExpr::AO__scoped_atomic_load:
1135 case AtomicExpr::AO__scoped_atomic_load_n:
1136 LibCallName =
"__atomic_load";
1138 case AtomicExpr::AO__atomic_add_fetch:
1139 case AtomicExpr::AO__scoped_atomic_add_fetch:
1140 case AtomicExpr::AO__atomic_fetch_add:
1141 case AtomicExpr::AO__c11_atomic_fetch_add:
1142 case AtomicExpr::AO__hip_atomic_fetch_add:
1143 case AtomicExpr::AO__opencl_atomic_fetch_add:
1144 case AtomicExpr::AO__scoped_atomic_fetch_add:
1145 case AtomicExpr::AO__atomic_and_fetch:
1146 case AtomicExpr::AO__scoped_atomic_and_fetch:
1147 case AtomicExpr::AO__atomic_fetch_and:
1148 case AtomicExpr::AO__c11_atomic_fetch_and:
1149 case AtomicExpr::AO__hip_atomic_fetch_and:
1150 case AtomicExpr::AO__opencl_atomic_fetch_and:
1151 case AtomicExpr::AO__scoped_atomic_fetch_and:
1152 case AtomicExpr::AO__atomic_or_fetch:
1153 case AtomicExpr::AO__scoped_atomic_or_fetch:
1154 case AtomicExpr::AO__atomic_fetch_or:
1155 case AtomicExpr::AO__c11_atomic_fetch_or:
1156 case AtomicExpr::AO__hip_atomic_fetch_or:
1157 case AtomicExpr::AO__opencl_atomic_fetch_or:
1158 case AtomicExpr::AO__scoped_atomic_fetch_or:
1159 case AtomicExpr::AO__atomic_sub_fetch:
1160 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1161 case AtomicExpr::AO__atomic_fetch_sub:
1162 case AtomicExpr::AO__c11_atomic_fetch_sub:
1163 case AtomicExpr::AO__hip_atomic_fetch_sub:
1164 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1165 case AtomicExpr::AO__scoped_atomic_fetch_sub:
1166 case AtomicExpr::AO__atomic_xor_fetch:
1167 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1168 case AtomicExpr::AO__atomic_fetch_xor:
1169 case AtomicExpr::AO__c11_atomic_fetch_xor:
1170 case AtomicExpr::AO__hip_atomic_fetch_xor:
1171 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1172 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1173 case AtomicExpr::AO__atomic_nand_fetch:
1174 case AtomicExpr::AO__atomic_fetch_nand:
1175 case AtomicExpr::AO__c11_atomic_fetch_nand:
1176 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1177 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1178 case AtomicExpr::AO__atomic_min_fetch:
1179 case AtomicExpr::AO__atomic_fetch_min:
1180 case AtomicExpr::AO__c11_atomic_fetch_min:
1181 case AtomicExpr::AO__hip_atomic_fetch_min:
1182 case AtomicExpr::AO__opencl_atomic_fetch_min:
1183 case AtomicExpr::AO__scoped_atomic_fetch_min:
1184 case AtomicExpr::AO__scoped_atomic_min_fetch:
1185 case AtomicExpr::AO__atomic_max_fetch:
1186 case AtomicExpr::AO__atomic_fetch_max:
1187 case AtomicExpr::AO__c11_atomic_fetch_max:
1188 case AtomicExpr::AO__hip_atomic_fetch_max:
1189 case AtomicExpr::AO__opencl_atomic_fetch_max:
1190 case AtomicExpr::AO__scoped_atomic_fetch_max:
1191 case AtomicExpr::AO__scoped_atomic_max_fetch:
1192 llvm_unreachable(
"Integral atomic operations always become atomicrmw!");
1195 if (
E->isOpenCL()) {
1197 std::string(
"__opencl") + StringRef(LibCallName).drop_front(1).str();
1225 bool IsStore =
E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1226 E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1227 E->getOp() == AtomicExpr::AO__hip_atomic_store ||
1228 E->getOp() == AtomicExpr::AO__atomic_store ||
1229 E->getOp() == AtomicExpr::AO__atomic_store_n ||
1230 E->getOp() == AtomicExpr::AO__scoped_atomic_store ||
1231 E->getOp() == AtomicExpr::AO__scoped_atomic_store_n;
1232 bool IsLoad =
E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1233 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1234 E->getOp() == AtomicExpr::AO__hip_atomic_load ||
1235 E->getOp() == AtomicExpr::AO__atomic_load ||
1236 E->getOp() == AtomicExpr::AO__atomic_load_n ||
1237 E->getOp() == AtomicExpr::AO__scoped_atomic_load ||
1238 E->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
1240 if (isa<llvm::ConstantInt>(Order)) {
1241 auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1244 if (llvm::isValidAtomicOrderingCABI(ord))
1245 switch ((llvm::AtomicOrderingCABI)ord) {
1246 case llvm::AtomicOrderingCABI::relaxed:
1247 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1248 llvm::AtomicOrdering::Monotonic,
Scope);
1250 case llvm::AtomicOrderingCABI::consume:
1251 case llvm::AtomicOrderingCABI::acquire:
1254 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1255 llvm::AtomicOrdering::Acquire,
Scope);
1257 case llvm::AtomicOrderingCABI::release:
1260 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1261 llvm::AtomicOrdering::Release,
Scope);
1263 case llvm::AtomicOrderingCABI::acq_rel:
1264 if (IsLoad || IsStore)
1266 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1267 llvm::AtomicOrdering::AcquireRelease,
Scope);
1269 case llvm::AtomicOrderingCABI::seq_cst:
1270 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1271 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1284 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1285 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1286 *SeqCstBB =
nullptr;
1292 if (!IsLoad && !IsStore)
1301 Order =
Builder.CreateIntCast(Order,
Builder.getInt32Ty(),
false);
1302 llvm::SwitchInst *SI =
Builder.CreateSwitch(Order, MonotonicBB);
1305 Builder.SetInsertPoint(MonotonicBB);
1306 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1307 llvm::AtomicOrdering::Monotonic,
Scope);
1310 Builder.SetInsertPoint(AcquireBB);
1311 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1312 llvm::AtomicOrdering::Acquire,
Scope);
1314 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
1316 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
1320 Builder.SetInsertPoint(ReleaseBB);
1321 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1322 llvm::AtomicOrdering::Release,
Scope);
1324 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::release),
1327 if (!IsLoad && !IsStore) {
1328 Builder.SetInsertPoint(AcqRelBB);
1329 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1330 llvm::AtomicOrdering::AcquireRelease,
Scope);
1332 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acq_rel),
1335 Builder.SetInsertPoint(SeqCstBB);
1336 EmitAtomicOp(*
this,
E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1337 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1339 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
1343 Builder.SetInsertPoint(ContBB);
1347 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1353 llvm::IntegerType *ty =
1358Address AtomicInfo::convertToAtomicIntPointer(
Address Addr)
const {
1361 if (SourceSizeInBits != AtomicSizeInBits) {
1362 Address Tmp = CreateTempAlloca();
1364 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1368 return castToAtomicIntPointer(Addr);
1374 bool asValue)
const {
1410 if (ValTy->isFloatingPointTy())
1411 return ValTy->isX86_FP80Ty() || CmpXchg;
1412 return !ValTy->isIntegerTy() && !ValTy->isPointerTy();
1415RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,
1418 bool CmpXchg)
const {
1420 assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() ||
1421 Val->getType()->isIEEELikeFPTy()) &&
1422 "Expected integer, pointer or floating point value when converting "
1429 auto *ValTy = AsValue
1431 : getAtomicAddress().getElementType();
1433 assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) &&
1434 "Different integer types.");
1437 if (llvm::CastInst::isBitCastable(Val->getType(), ValTy))
1444 bool TempIsVolatile =
false;
1450 Temp = CreateTempAlloca();
1454 Address CastTemp = castToAtomicIntPointer(Temp);
1457 return convertAtomicTempToRValue(Temp, ResultSlot,
Loc, AsValue);
1460void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1461 llvm::AtomicOrdering AO,
bool) {
1473llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1474 bool IsVolatile,
bool CmpXchg) {
1476 Address Addr = getAtomicAddress();
1478 Addr = castToAtomicIntPointer(Addr);
1480 Load->setAtomic(AO);
1484 Load->setVolatile(
true);
1494 AtomicInfo AI(*
this, LV);
1497 bool AtomicIsInline = !AI.shouldUseLibcall();
1502 return IsVolatile && AtomicIsInline;
1507 llvm::AtomicOrdering AO;
1510 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1512 AO = llvm::AtomicOrdering::Acquire;
1519 bool AsValue, llvm::AtomicOrdering AO,
1522 if (shouldUseLibcall()) {
1528 TempAddr = CreateTempAlloca();
1530 EmitAtomicLoadLibcall(TempAddr.
emitRawPointer(CGF), AO, IsVolatile);
1534 return convertAtomicTempToRValue(TempAddr, ResultSlot,
Loc, AsValue);
1538 auto *
Load = EmitAtomicLoadOp(AO, IsVolatile);
1546 return ConvertToValueOrAtomic(Load, ResultSlot,
Loc, AsValue);
1552 llvm::AtomicOrdering AO,
bool IsVolatile,
1554 AtomicInfo Atomics(*
this, src);
1555 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1561void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1580 emitMemSetZeroIfNecessary();
1583 LValue TempLVal = projectValue();
1604 AtomicInfo Atomics(CGF, TempLV);
1605 Atomics.emitCopyIntoMemory(rvalue);
1609llvm::Value *AtomicInfo::getScalarRValValueOrNull(
RValue RVal)
const {
1615llvm::Value *AtomicInfo::convertRValueToInt(
RValue RVal,
bool CmpXchg)
const {
1618 if (llvm::Value *
Value = getScalarRValValueOrNull(RVal)) {
1622 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1624 LVal.
isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1625 if (llvm::BitCastInst::isBitCastable(
Value->
getType(), InputIntTy))
1631 Address Addr = materializeRValue(RVal);
1634 Addr = castToAtomicIntPointer(Addr);
1638std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1639 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1640 llvm::AtomicOrdering
Success, llvm::AtomicOrdering Failure,
bool IsWeak) {
1642 Address Addr = getAtomicAddressAsAtomicIntPointer();
1647 Inst->setWeak(IsWeak);
1650 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1651 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1652 return std::make_pair(PreviousVal, SuccessFailureVal);
1656AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1657 llvm::Value *DesiredAddr,
1659 llvm::AtomicOrdering Failure) {
1668 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(
Success))),
1671 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Failure))),
1676 return SuccessFailureRVal.getScalarVal();
1679std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1681 llvm::AtomicOrdering Failure,
bool IsWeak) {
1683 if (shouldUseLibcall()) {
1687 llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);
1688 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,
1690 return std::make_pair(
1698 auto *ExpectedVal = convertRValueToInt(
Expected,
true);
1699 auto *DesiredVal = convertRValueToInt(Desired,
true);
1700 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal,
Success,
1702 return std::make_pair(
1714 LValue AtomicLVal = Atomics.getAtomicLValue();
1721 Address Ptr = Atomics.materializeRValue(OldRVal);
1754 RValue NewRVal = UpdateOp(UpRVal);
1764void AtomicInfo::EmitAtomicUpdateLibcall(
1765 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1767 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1769 Address ExpectedAddr = CreateTempAlloca();
1771 EmitAtomicLoadLibcall(ExpectedAddr.
emitRawPointer(CGF), AO, IsVolatile);
1775 Address DesiredAddr = CreateTempAlloca();
1777 requiresMemSetZero(getAtomicAddress().getElementType())) {
1781 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1788 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1789 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1793void AtomicInfo::EmitAtomicUpdateOp(
1794 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1796 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1799 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile,
true);
1803 auto *CurBB = CGF.
Builder.GetInsertBlock();
1805 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1807 PHI->addIncoming(OldVal, CurBB);
1808 Address NewAtomicAddr = CreateTempAlloca();
1811 ? castToAtomicIntPointer(NewAtomicAddr)
1815 requiresMemSetZero(getAtomicAddress().getElementType())) {
1824 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1825 PHI->addIncoming(Res.
first, CGF.
Builder.GetInsertBlock());
1832 LValue AtomicLVal = Atomics.getAtomicLValue();
1856void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1857 RValue UpdateRVal,
bool IsVolatile) {
1858 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1860 Address ExpectedAddr = CreateTempAlloca();
1862 EmitAtomicLoadLibcall(ExpectedAddr.
emitRawPointer(CGF), AO, IsVolatile);
1866 Address DesiredAddr = CreateTempAlloca();
1868 requiresMemSetZero(getAtomicAddress().getElementType())) {
1876 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1877 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1881void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1883 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1886 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile,
true);
1890 auto *CurBB = CGF.
Builder.GetInsertBlock();
1892 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1894 PHI->addIncoming(OldVal, CurBB);
1895 Address NewAtomicAddr = CreateTempAlloca();
1896 Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
1898 requiresMemSetZero(getAtomicAddress().getElementType())) {
1904 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1905 PHI->addIncoming(Res.
first, CGF.
Builder.GetInsertBlock());
1910void AtomicInfo::EmitAtomicUpdate(
1911 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1913 if (shouldUseLibcall()) {
1914 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1916 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1920void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1922 if (shouldUseLibcall()) {
1923 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1925 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1932 llvm::AtomicOrdering AO;
1934 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1936 AO = llvm::AtomicOrdering::Release;
1948 llvm::AtomicOrdering AO,
bool IsVolatile,
1956 AtomicInfo atomics(*
this, dest);
1957 LValue LVal = atomics.getAtomicLValue();
1962 atomics.emitCopyIntoMemory(rvalue);
1967 if (atomics.shouldUseLibcall()) {
1969 Address srcAddr = atomics.materializeRValue(rvalue);
1986 llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue);
1989 Address Addr = atomics.getAtomicAddress();
1990 if (llvm::Value *
Value = atomics.getScalarRValValueOrNull(rvalue))
1992 Addr = atomics.castToAtomicIntPointer(Addr);
1998 if (AO == llvm::AtomicOrdering::Acquire)
1999 AO = llvm::AtomicOrdering::Monotonic;
2000 else if (AO == llvm::AtomicOrdering::AcquireRelease)
2001 AO = llvm::AtomicOrdering::Release;
2004 store->setAtomic(AO);
2008 store->setVolatile(
true);
2014 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2021 llvm::AtomicOrdering
Success, llvm::AtomicOrdering Failure,
bool IsWeak,
2026 Expected.getAggregateAddress().getElementType() ==
2031 AtomicInfo Atomics(*
this, Obj);
2033 return Atomics.EmitAtomicCompareExchange(
Expected, Desired,
Success, Failure,
2037llvm::AtomicRMWInst *
2039 llvm::Value *Val, llvm::AtomicOrdering Order,
2040 llvm::SyncScope::ID SSID) {
2042 llvm::AtomicRMWInst *RMW =
2049 LValue LVal, llvm::AtomicOrdering AO,
2050 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
2051 AtomicInfo Atomics(*
this, LVal);
2052 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2056 AtomicInfo atomics(*
this, dest);
2058 switch (atomics.getEvaluationKind()) {
2074 bool Zeroed =
false;
2076 Zeroed = atomics.emitMemSetZeroIfNecessary();
2077 dest = atomics.projectValue();
2091 llvm_unreachable(
"bad evaluation kind");
Defines the clang::ASTContext interface.
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)
Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)
static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg)
Return true if.
CodeGenFunction::ComplexPairTy ComplexPairTy
static QualType getPointeeType(const MemRegion *R)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
TypeInfoChars getTypeInfoInChars(const Type *T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address getAddress() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
void add(RValue rvalue, QualType type)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
llvm::Type * ConvertTypeForMem(QualType T)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitAtomicInit(Expr *E, LValue lvalue)
const TargetInfo & getTarget() const
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
const TargetCodeGenInfo & getTargetHooks() const
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
ASTContext & getContext() const
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
This class organizes the cross-function state that is used while generating LLVM code.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const llvm::DataLayout & getDataLayout() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
llvm::LLVMContext & getLLVMContext()
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
LValue - This represents an lvalue references.
llvm::Value * getRawExtVectorPointer(CodeGenFunction &CGF) const
llvm::Constant * getExtVectorElts() const
void setAlignment(CharUnits A)
bool isVolatileQualified() const
llvm::Value * getRawBitFieldPointer(CodeGenFunction &CGF) const
CharUnits getAlignment() const
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getAddress() const
llvm::Value * getRawVectorPointer(CodeGenFunction &CGF) const
bool isExtVectorElt() const
llvm::Value * getVectorIdx() const
LValueBaseInfo getBaseInfo() const
const CGBitFieldInfo & getBitFieldInfo() const
TBAAAccessInfo getTBAAInfo() const
Address getVectorAddress() const
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getExtVectorAddress() const
Address getBitFieldAddress() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::AtomicRMWInst &RMW) const
Allow the target to apply other metadata to an atomic instruction.
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Scope - A scope is a transient data structure that is used while parsing the program.
Encodes a location in the source.
SourceLocation getBeginLoc() const LLVM_READONLY
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
bool isPointerType() const
const T * castAs() const
Member-template castAs<specific type>.
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isAtomicType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
Represents a GCC generic vector type.
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Load(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
llvm::StringRef getAsString(SyncScope S)
@ Success
Template argument deduction was successful.
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * SizeTy
llvm::IntegerType * IntTy
int
llvm::PointerType * UnqualPtrTy