From b36d1a86f10047ff2b27d61e9c542fbf17743823 Mon Sep 17 00:00:00 2001 From: JF Bastien Date: Wed, 6 Apr 2016 21:19:33 +0000 Subject: [PATCH] NFC: make AtomicOrdering an enum class Summary: In the context of http://wg21.link/lwg2445 C++ uses the concept of 'stronger' ordering but doesn't define it properly. This should be fixed in C++17 barring a small question that's still open. The code currently plays fast and loose with the AtomicOrdering enum. Using an enum class is one step towards tightening things. I later also want to tighten related enums, such as clang's AtomicOrderingKind (which should be shared with LLVM as a 'C++ ABI' enum). This change touches a few lines of code which can be improved later, I'd like to keep it as NFC for now as it's already quite complex. I have related changes for clang. As a follow-up I'll add: bool operator<(AtomicOrdering, AtomicOrdering) = delete; bool operator>(AtomicOrdering, AtomicOrdering) = delete; bool operator<=(AtomicOrdering, AtomicOrdering) = delete; bool operator>=(AtomicOrdering, AtomicOrdering) = delete; This is separate so that clang and LLVM changes don't need to be in sync. Reviewers: jyknight, reames Subscribers: jyknight, llvm-commits Differential Revision: http://reviews.llvm.org/D18775 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@265602 91177308-0d34-0410-b5e6-96231b3b80d8 --- docs/Atomics.rst | 2 +- include/llvm/CodeGen/SelectionDAGNodes.h | 8 +- include/llvm/IR/Instructions.h | 127 +++++++++++++----- include/llvm/Target/TargetLowering.h | 4 +- lib/Analysis/AliasAnalysis.cpp | 4 +- lib/Analysis/AliasSetTracker.cpp | 4 +- lib/Analysis/MemoryDependenceAnalysis.cpp | 15 +-- lib/AsmParser/LLParser.cpp | 54 +++++--- lib/Bitcode/Reader/BitcodeReader.cpp | 39 +++--- lib/Bitcode/Writer/BitcodeWriter.cpp | 14 +- lib/CodeGen/AtomicExpandPass.cpp | 38 +++--- .../SelectionDAG/SelectionDAGBuilder.cpp | 2 +- lib/IR/AsmWriter.cpp | 36 +---- lib/IR/Core.cpp | 30 +++-- lib/IR/Instruction.cpp | 4 +- lib/IR/Instructions.cpp | 38 +++--- lib/IR/Verifier.cpp | 37 ++--- lib/Target/AArch64/AArch64ISelDAGToDAG.cpp | 2 +- lib/Target/AArch64/AArch64ISelLowering.cpp | 4 +- lib/Target/AArch64/AArch64InstrAtomics.td | 11 +- lib/Target/ARM/ARMISelLowering.cpp | 46 +++---- lib/Target/ARM/ARMInstrInfo.td | 4 +- lib/Target/CppBackend/CPPBackend.cpp | 15 ++- lib/Target/PowerPC/PPCISelLowering.cpp | 6 +- lib/Target/Sparc/SparcISelLowering.cpp | 10 +- lib/Target/SystemZ/SystemZISelLowering.cpp | 6 +- lib/Target/X86/X86ISelLowering.cpp | 8 +- lib/Target/XCore/XCoreISelLowering.cpp | 10 +- lib/Transforms/IPO/GlobalOpt.cpp | 5 +- lib/Transforms/IPO/MergeFunctions.cpp | 25 ++-- .../Instrumentation/MemorySanitizer.cpp | 44 +++--- .../Instrumentation/SanitizerCoverage.cpp | 2 +- .../Instrumentation/ThreadSanitizer.cpp | 18 +-- lib/Transforms/Scalar/EarlyCSE.cpp | 2 +- lib/Transforms/Scalar/LowerAtomic.cpp | 4 +- lib/Transforms/Utils/GlobalStatus.cpp | 12 +- unittests/Analysis/AliasAnalysisTest.cpp | 8 +- 37 files changed, 386 insertions(+), 312 deletions(-) diff --git a/docs/Atomics.rst b/docs/Atomics.rst index 112ea23b9e95..ff6674804462 100644 --- a/docs/Atomics.rst +++ b/docs/Atomics.rst @@ -367,7 +367,7 @@ Predicates for optimizer writers to query: that they return true for any operation which is volatile or at least Monotonic. -* ``isAtLeastAcquire()``/``isAtLeastRelease()``: These are predicates on +* ``isStrongerThan`` / ``isAtLeastOrStrongerThan``: These are predicates on orderings. They can be useful for passes that are aware of atomics, for example to do DSE across a single atomic access, but not across a release-acquire pair (see MemoryDependencyAnalysis for an example of this) diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h index 5b7b849a8036..eb8967ded875 100644 --- a/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/include/llvm/CodeGen/SelectionDAGNodes.h @@ -1127,13 +1127,15 @@ class AtomicSDNode : public MemSDNode { AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) { // This must match encodeMemSDNodeFlags() in SelectionDAG.cpp. - assert((SuccessOrdering & 15) == SuccessOrdering && + assert((AtomicOrdering)((unsigned)SuccessOrdering & 15) == + SuccessOrdering && "Ordering may not require more than 4 bits!"); - assert((FailureOrdering & 15) == FailureOrdering && + assert((AtomicOrdering)((unsigned)FailureOrdering & 15) == + FailureOrdering && "Ordering may not require more than 4 bits!"); assert((SynchScope & 1) == SynchScope && "SynchScope may not require more than 1 bit!"); - SubclassData |= SuccessOrdering << 8; + SubclassData |= (unsigned)SuccessOrdering << 8; SubclassData |= SynchScope << 12; this->FailureOrdering = FailureOrdering; assert(getSuccessOrdering() == SuccessOrdering && diff --git a/include/llvm/IR/Instructions.h b/include/llvm/IR/Instructions.h index 5b5834d341a4..4fd5cadb3f98 100644 --- a/include/llvm/IR/Instructions.h +++ b/include/llvm/IR/Instructions.h @@ -36,10 +36,16 @@ class ConstantRange; class DataLayout; class LLVMContext; -enum AtomicOrdering { +/// C++ defines ordering as a lattice. LLVM supplements this with NotAtomic and +/// Unordered, which are both below the C++ orders. See docs/Atomics.rst for +/// details. +/// +/// not_atomic-->unordered-->relaxed-->release--------------->acq_rel-->seq_cst +/// \-->consume-->acquire--/ +enum class AtomicOrdering { NotAtomic = 0, Unordered = 1, - Monotonic = 2, + Monotonic = 2, // Equivalent to C++'s relaxed. // Consume = 3, // Not specified yet. Acquire = 4, Release = 5, @@ -47,26 +53,68 @@ enum AtomicOrdering { SequentiallyConsistent = 7 }; +/// String used by LLVM IR to represent atomic ordering. +static inline const char *toIRString(AtomicOrdering ao) { + static const char *names[8] = {"not_atomic", "unordered", "monotonic", + "consume", "acquire", "release", + "acq_rel", "seq_cst"}; + return names[(size_t)ao]; +} + +/// Returns true if ao is stronger than other as defined by the AtomicOrdering +/// lattice, which is based on C++'s definition. +static inline bool isStrongerThan(AtomicOrdering ao, AtomicOrdering other) { + static const bool lookup[8][8] = { + // NA UN RX CO AC RE AR SC + /* NotAtomic */ {0, 0, 0, 0, 0, 0, 0, 0}, + /* Unordered */ {1, 0, 0, 0, 0, 0, 0, 0}, + /* relaxed */ {1, 1, 0, 0, 0, 0, 0, 0}, + /* consume */ {1, 1, 1, 0, 0, 0, 0, 0}, + /* acquire */ {1, 1, 1, 1, 0, 0, 0, 0}, + /* release */ {1, 1, 1, 0, 0, 0, 0, 0}, + /* acq_rel */ {1, 1, 1, 1, 1, 1, 0, 0}, + /* seq_cst */ {1, 1, 1, 1, 1, 1, 1, 0}, + }; + return lookup[(size_t)ao][(size_t)other]; +} + +static inline bool isAtLeastOrStrongerThan(AtomicOrdering ao, + AtomicOrdering other) { + static const bool lookup[8][8] = { + // NA UN RX CO AC RE AR SC + /* NotAtomic */ {1, 0, 0, 0, 0, 0, 0, 0}, + /* Unordered */ {1, 1, 0, 0, 0, 0, 0, 0}, + /* relaxed */ {1, 1, 1, 0, 0, 0, 0, 0}, + /* consume */ {1, 1, 1, 1, 0, 0, 0, 0}, + /* acquire */ {1, 1, 1, 1, 1, 0, 0, 0}, + /* release */ {1, 1, 1, 0, 0, 1, 0, 0}, + /* acq_rel */ {1, 1, 1, 1, 1, 1, 1, 0}, + /* seq_cst */ {1, 1, 1, 1, 1, 1, 1, 1}, + }; + return lookup[(size_t)ao][(size_t)other]; +} + +static inline bool isStrongerThanUnordered(AtomicOrdering Ord) { + return isStrongerThan(Ord, AtomicOrdering::Unordered); +} + +static inline bool isStrongerThanMonotonic(AtomicOrdering Ord) { + return isStrongerThan(Ord, AtomicOrdering::Monotonic); +} + +static inline bool isAcquireOrStronger(AtomicOrdering Ord) { + return isAtLeastOrStrongerThan(Ord, AtomicOrdering::Acquire); +} + +static inline bool isReleaseOrStronger(AtomicOrdering Ord) { + return isAtLeastOrStrongerThan(Ord, AtomicOrdering::Release); +} + enum SynchronizationScope { SingleThread = 0, CrossThread = 1 }; -/// Returns true if the ordering is at least as strong as acquire -/// (i.e. acquire, acq_rel or seq_cst) -inline bool isAtLeastAcquire(AtomicOrdering Ord) { - return (Ord == Acquire || - Ord == AcquireRelease || - Ord == SequentiallyConsistent); -} - -/// Returns true if the ordering is at least as strong as release -/// (i.e. release, acq_rel or seq_cst) -inline bool isAtLeastRelease(AtomicOrdering Ord) { -return (Ord == Release || - Ord == AcquireRelease || - Ord == SequentiallyConsistent); -} //===----------------------------------------------------------------------===// // AllocaInst Class @@ -269,7 +317,7 @@ class LoadInst : public UnaryInstruction { /// AcquireRelease. void setOrdering(AtomicOrdering Ordering) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | - (Ordering << 7)); + ((unsigned)Ordering << 7)); } SynchronizationScope getSynchScope() const { @@ -292,7 +340,9 @@ class LoadInst : public UnaryInstruction { bool isSimple() const { return !isAtomic() && !isVolatile(); } bool isUnordered() const { - return getOrdering() <= Unordered && !isVolatile(); + return (getOrdering() == AtomicOrdering::NotAtomic || + getOrdering() == AtomicOrdering::Unordered) && + !isVolatile(); } Value *getPointerOperand() { return getOperand(0); } @@ -390,7 +440,7 @@ class StoreInst : public Instruction { /// AcquireRelease. void setOrdering(AtomicOrdering Ordering) { setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) | - (Ordering << 7)); + ((unsigned)Ordering << 7)); } SynchronizationScope getSynchScope() const { @@ -413,7 +463,9 @@ class StoreInst : public Instruction { bool isSimple() const { return !isAtomic() && !isVolatile(); } bool isUnordered() const { - return getOrdering() <= Unordered && !isVolatile(); + return (getOrdering() == AtomicOrdering::NotAtomic || + getOrdering() == AtomicOrdering::Unordered) && + !isVolatile(); } Value *getValueOperand() { return getOperand(0); } @@ -489,7 +541,7 @@ class FenceInst : public Instruction { /// AcquireRelease, or SequentiallyConsistent. void setOrdering(AtomicOrdering Ordering) { setInstructionSubclassData((getSubclassDataFromInstruction() & 1) | - (Ordering << 1)); + ((unsigned)Ordering << 1)); } SynchronizationScope getSynchScope() const { @@ -584,17 +636,17 @@ class AtomicCmpXchgInst : public Instruction { /// Set the ordering constraint on this cmpxchg. void setSuccessOrdering(AtomicOrdering Ordering) { - assert(Ordering != NotAtomic && + assert(Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."); setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) | - (Ordering << 2)); + ((unsigned)Ordering << 2)); } void setFailureOrdering(AtomicOrdering Ordering) { - assert(Ordering != NotAtomic && + assert(Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic."); setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) | - (Ordering << 5)); + ((unsigned)Ordering << 5)); } /// Specify whether this cmpxchg is atomic and orders other operations with @@ -646,15 +698,16 @@ class AtomicCmpXchgInst : public Instruction { static AtomicOrdering getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { switch (SuccessOrdering) { - default: llvm_unreachable("invalid cmpxchg success ordering"); - case Release: - case Monotonic: - return Monotonic; - case AcquireRelease: - case Acquire: - return Acquire; - case SequentiallyConsistent: - return SequentiallyConsistent; + default: + llvm_unreachable("invalid cmpxchg success ordering"); + case AtomicOrdering::Release: + case AtomicOrdering::Monotonic: + return AtomicOrdering::Monotonic; + case AtomicOrdering::AcquireRelease: + case AtomicOrdering::Acquire: + return AtomicOrdering::Acquire; + case AtomicOrdering::SequentiallyConsistent: + return AtomicOrdering::SequentiallyConsistent; } } @@ -770,10 +823,10 @@ class AtomicRMWInst : public Instruction { /// Set the ordering constraint on this RMW. void setOrdering(AtomicOrdering Ordering) { - assert(Ordering != NotAtomic && + assert(Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic."); setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) | - (Ordering << 2)); + ((unsigned)Ordering << 2)); } /// Specify whether this RMW orders other operations with respect to all diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index 8528d468aa6f..c0a762be0dc7 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -1108,7 +1108,7 @@ class TargetLoweringBase { virtual Instruction *emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const { - if (isAtLeastRelease(Ord) && IsStore) + if (isReleaseOrStronger(Ord) && IsStore) return Builder.CreateFence(Ord); else return nullptr; @@ -1117,7 +1117,7 @@ class TargetLoweringBase { virtual Instruction *emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const { - if (isAtLeastAcquire(Ord)) + if (isAcquireOrStronger(Ord)) return Builder.CreateFence(Ord); else return nullptr; diff --git a/lib/Analysis/AliasAnalysis.cpp b/lib/Analysis/AliasAnalysis.cpp index 453bc3dbe674..117f8cb123f4 100644 --- a/lib/Analysis/AliasAnalysis.cpp +++ b/lib/Analysis/AliasAnalysis.cpp @@ -389,7 +389,7 @@ ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet, ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX, const MemoryLocation &Loc) { // Acquire/Release cmpxchg has properties that matter for arbitrary addresses. - if (CX->getSuccessOrdering() > Monotonic) + if (isStrongerThanMonotonic(CX->getSuccessOrdering())) return MRI_ModRef; // If the cmpxchg address does not alias the location, it does not access it. @@ -402,7 +402,7 @@ ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX, ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW, const MemoryLocation &Loc) { // Acquire/Release atomicrmw has properties that matter for arbitrary addresses. - if (RMW->getOrdering() > Monotonic) + if (isStrongerThanMonotonic(RMW->getOrdering())) return MRI_ModRef; // If the atomicrmw address does not alias the location, it does not access it. diff --git a/lib/Analysis/AliasSetTracker.cpp b/lib/Analysis/AliasSetTracker.cpp index e3a055f2d1ba..3cafb46f2897 100644 --- a/lib/Analysis/AliasSetTracker.cpp +++ b/lib/Analysis/AliasSetTracker.cpp @@ -300,7 +300,7 @@ bool AliasSetTracker::add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo) { bool AliasSetTracker::add(LoadInst *LI) { - if (LI->getOrdering() > Monotonic) return addUnknown(LI); + if (isStrongerThanMonotonic(LI->getOrdering())) return addUnknown(LI); AAMDNodes AAInfo; LI->getAAMetadata(AAInfo); @@ -316,7 +316,7 @@ bool AliasSetTracker::add(LoadInst *LI) { } bool AliasSetTracker::add(StoreInst *SI) { - if (SI->getOrdering() > Monotonic) return addUnknown(SI); + if (isStrongerThanMonotonic(SI->getOrdering())) return addUnknown(SI); AAMDNodes AAInfo; SI->getAAMetadata(AAInfo); diff --git a/lib/Analysis/MemoryDependenceAnalysis.cpp b/lib/Analysis/MemoryDependenceAnalysis.cpp index ce6519abb4ea..e23cd819f07b 100644 --- a/lib/Analysis/MemoryDependenceAnalysis.cpp +++ b/lib/Analysis/MemoryDependenceAnalysis.cpp @@ -93,7 +93,7 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc, Loc = MemoryLocation::get(LI); return MRI_Ref; } - if (LI->getOrdering() == Monotonic) { + if (LI->getOrdering() == AtomicOrdering::Monotonic) { Loc = MemoryLocation::get(LI); return MRI_ModRef; } @@ -106,7 +106,7 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc, Loc = MemoryLocation::get(SI); return MRI_Mod; } - if (SI->getOrdering() == Monotonic) { + if (SI->getOrdering() == AtomicOrdering::Monotonic) { Loc = MemoryLocation::get(SI); return MRI_ModRef; } @@ -518,11 +518,11 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom( // A Monotonic (or higher) load is OK if the query inst is itself not // atomic. // FIXME: This is overly conservative. - if (LI->isAtomic() && LI->getOrdering() > Unordered) { + if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) { if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || isOtherMemAccess(QueryInst)) return MemDepResult::getClobber(LI); - if (LI->getOrdering() != Monotonic) + if (LI->getOrdering() != AtomicOrdering::Monotonic) return MemDepResult::getClobber(LI); } @@ -588,7 +588,7 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom( if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) || isOtherMemAccess(QueryInst)) return MemDepResult::getClobber(SI); - if (SI->getOrdering() != Monotonic) + if (SI->getOrdering() != AtomicOrdering::Monotonic) return MemDepResult::getClobber(SI); } @@ -644,9 +644,9 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom( // loads. DSE uses this to find preceeding stores to delete and thus we // can't bypass the fence if the query instruction is a store. if (FenceInst *FI = dyn_cast(Inst)) - if (isLoad && FI->getOrdering() == Release) + if (isLoad && FI->getOrdering() == AtomicOrdering::Release) continue; - + // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. ModRefInfo MR = AA.getModRefInfo(Inst, MemLoc); // If necessary, perform additional analysis. @@ -1708,4 +1708,3 @@ bool MemoryDependenceWrapperPass::runOnFunction(Function &F) { MemDep.emplace(AA, AC, TLI, DT); return false; } - diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp index c8543206e211..bf1d2f005ee0 100644 --- a/lib/AsmParser/LLParser.cpp +++ b/lib/AsmParser/LLParser.cpp @@ -1810,12 +1810,16 @@ bool LLParser::ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope, bool LLParser::ParseOrdering(AtomicOrdering &Ordering) { switch (Lex.getKind()) { default: return TokError("Expected ordering on atomic instruction"); - case lltok::kw_unordered: Ordering = Unordered; break; - case lltok::kw_monotonic: Ordering = Monotonic; break; - case lltok::kw_acquire: Ordering = Acquire; break; - case lltok::kw_release: Ordering = Release; break; - case lltok::kw_acq_rel: Ordering = AcquireRelease; break; - case lltok::kw_seq_cst: Ordering = SequentiallyConsistent; break; + case lltok::kw_unordered: Ordering = AtomicOrdering::Unordered; break; + case lltok::kw_monotonic: Ordering = AtomicOrdering::Monotonic; break; + // Not specified yet: + // case lltok::kw_consume: Ordering = AtomicOrdering::Consume; break; + case lltok::kw_acquire: Ordering = AtomicOrdering::Acquire; break; + case lltok::kw_release: Ordering = AtomicOrdering::Release; break; + case lltok::kw_acq_rel: Ordering = AtomicOrdering::AcquireRelease; break; + case lltok::kw_seq_cst: + Ordering = AtomicOrdering::SequentiallyConsistent; + break; } Lex.Lex(); return false; @@ -5884,7 +5888,7 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) { unsigned Alignment = 0; bool AteExtraComma = false; bool isAtomic = false; - AtomicOrdering Ordering = NotAtomic; + AtomicOrdering Ordering = AtomicOrdering::NotAtomic; SynchronizationScope Scope = CrossThread; if (Lex.getKind() == lltok::kw_atomic) { @@ -5911,7 +5915,8 @@ int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) { return Error(Loc, "load operand must be a pointer to a first class type"); if (isAtomic && !Alignment) return Error(Loc, "atomic load must have explicit non-zero alignment"); - if (Ordering == Release || Ordering == AcquireRelease) + if (Ordering == AtomicOrdering::Release || + Ordering == AtomicOrdering::AcquireRelease) return Error(Loc, "atomic load cannot use Release ordering"); if (Ty != cast(Val->getType())->getElementType()) @@ -5932,7 +5937,7 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) { unsigned Alignment = 0; bool AteExtraComma = false; bool isAtomic = false; - AtomicOrdering Ordering = NotAtomic; + AtomicOrdering Ordering = AtomicOrdering::NotAtomic; SynchronizationScope Scope = CrossThread; if (Lex.getKind() == lltok::kw_atomic) { @@ -5961,7 +5966,8 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) { return Error(Loc, "stored value and pointer type do not match"); if (isAtomic && !Alignment) return Error(Loc, "atomic store must have explicit non-zero alignment"); - if (Ordering == Acquire || Ordering == AcquireRelease) + if (Ordering == AtomicOrdering::Acquire || + Ordering == AtomicOrdering::AcquireRelease) return Error(Loc, "atomic store cannot use Acquire ordering"); Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, Scope); @@ -5974,8 +5980,8 @@ int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) { int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) { Value *Ptr, *Cmp, *New; LocTy PtrLoc, CmpLoc, NewLoc; bool AteExtraComma = false; - AtomicOrdering SuccessOrdering = NotAtomic; - AtomicOrdering FailureOrdering = NotAtomic; + AtomicOrdering SuccessOrdering = AtomicOrdering::NotAtomic; + AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic; SynchronizationScope Scope = CrossThread; bool isVolatile = false; bool isWeak = false; @@ -5995,12 +6001,16 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) { ParseOrdering(FailureOrdering)) return true; - if (SuccessOrdering == Unordered || FailureOrdering == Unordered) + if (SuccessOrdering == AtomicOrdering::Unordered || + FailureOrdering == AtomicOrdering::Unordered) return TokError("cmpxchg cannot be unordered"); - if (SuccessOrdering < FailureOrdering) - return TokError("cmpxchg must be at least as ordered on success as failure"); - if (FailureOrdering == Release || FailureOrdering == AcquireRelease) - return TokError("cmpxchg failure ordering cannot include release semantics"); + if (isStrongerThan(FailureOrdering, SuccessOrdering)) + return TokError("cmpxchg failure argument shall be no stronger than the " + "success argument"); + if (FailureOrdering == AtomicOrdering::Release || + FailureOrdering == AtomicOrdering::AcquireRelease) + return TokError( + "cmpxchg failure ordering cannot include release semantics"); if (!Ptr->getType()->isPointerTy()) return Error(PtrLoc, "cmpxchg operand must be a pointer"); if (cast(Ptr->getType())->getElementType() != Cmp->getType()) @@ -6023,7 +6033,7 @@ int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) { int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) { Value *Ptr, *Val; LocTy PtrLoc, ValLoc; bool AteExtraComma = false; - AtomicOrdering Ordering = NotAtomic; + AtomicOrdering Ordering = AtomicOrdering::NotAtomic; SynchronizationScope Scope = CrossThread; bool isVolatile = false; AtomicRMWInst::BinOp Operation; @@ -6053,7 +6063,7 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) { ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering)) return true; - if (Ordering == Unordered) + if (Ordering == AtomicOrdering::Unordered) return TokError("atomicrmw cannot be unordered"); if (!Ptr->getType()->isPointerTy()) return Error(PtrLoc, "atomicrmw operand must be a pointer"); @@ -6076,14 +6086,14 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) { /// ParseFence /// ::= 'fence' 'singlethread'? AtomicOrdering int LLParser::ParseFence(Instruction *&Inst, PerFunctionState &PFS) { - AtomicOrdering Ordering = NotAtomic; + AtomicOrdering Ordering = AtomicOrdering::NotAtomic; SynchronizationScope Scope = CrossThread; if (ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering)) return true; - if (Ordering == Unordered) + if (Ordering == AtomicOrdering::Unordered) return TokError("fence cannot be unordered"); - if (Ordering == Monotonic) + if (Ordering == AtomicOrdering::Monotonic) return TokError("fence cannot be monotonic"); Inst = new FenceInst(Context, Ordering, Scope); diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp index 85a331cc00dd..0e74fe38d634 100644 --- a/lib/Bitcode/Reader/BitcodeReader.cpp +++ b/lib/Bitcode/Reader/BitcodeReader.cpp @@ -808,14 +808,14 @@ static AtomicRMWInst::BinOp getDecodedRMWOperation(unsigned Val) { static AtomicOrdering getDecodedOrdering(unsigned Val) { switch (Val) { - case bitc::ORDERING_NOTATOMIC: return NotAtomic; - case bitc::ORDERING_UNORDERED: return Unordered; - case bitc::ORDERING_MONOTONIC: return Monotonic; - case bitc::ORDERING_ACQUIRE: return Acquire; - case bitc::ORDERING_RELEASE: return Release; - case bitc::ORDERING_ACQREL: return AcquireRelease; + case bitc::ORDERING_NOTATOMIC: return AtomicOrdering::NotAtomic; + case bitc::ORDERING_UNORDERED: return AtomicOrdering::Unordered; + case bitc::ORDERING_MONOTONIC: return AtomicOrdering::Monotonic; + case bitc::ORDERING_ACQUIRE: return AtomicOrdering::Acquire; + case bitc::ORDERING_RELEASE: return AtomicOrdering::Release; + case bitc::ORDERING_ACQREL: return AtomicOrdering::AcquireRelease; default: // Map unknown orderings to sequentially-consistent. - case bitc::ORDERING_SEQCST: return SequentiallyConsistent; + case bitc::ORDERING_SEQCST: return AtomicOrdering::SequentiallyConsistent; } } @@ -4936,10 +4936,11 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) { Ty = cast(Op->getType())->getElementType(); AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); - if (Ordering == NotAtomic || Ordering == Release || - Ordering == AcquireRelease) + if (Ordering == AtomicOrdering::NotAtomic || + Ordering == AtomicOrdering::Release || + Ordering == AtomicOrdering::AcquireRelease) return error("Invalid record"); - if (Ordering != NotAtomic && Record[OpNum] == 0) + if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]); @@ -4992,11 +4993,12 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) { typeCheckLoadStoreInst(Val->getType(), Ptr->getType())) return EC; AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); - if (Ordering == NotAtomic || Ordering == Acquire || - Ordering == AcquireRelease) + if (Ordering == AtomicOrdering::NotAtomic || + Ordering == AtomicOrdering::Acquire || + Ordering == AtomicOrdering::AcquireRelease) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]); - if (Ordering != NotAtomic && Record[OpNum] == 0) + if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0) return error("Invalid record"); unsigned Align; @@ -5022,7 +5024,8 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) { Record.size() < OpNum + 3 || Record.size() > OpNum + 5) return error("Invalid record"); AtomicOrdering SuccessOrdering = getDecodedOrdering(Record[OpNum + 1]); - if (SuccessOrdering == NotAtomic || SuccessOrdering == Unordered) + if (SuccessOrdering == AtomicOrdering::NotAtomic || + SuccessOrdering == AtomicOrdering::Unordered) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 2]); @@ -5067,7 +5070,8 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) { Operation > AtomicRMWInst::LAST_BINOP) return error("Invalid record"); AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]); - if (Ordering == NotAtomic || Ordering == Unordered) + if (Ordering == AtomicOrdering::NotAtomic || + Ordering == AtomicOrdering::Unordered) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]); I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope); @@ -5079,8 +5083,9 @@ std::error_code BitcodeReader::parseFunctionBody(Function *F) { if (2 != Record.size()) return error("Invalid record"); AtomicOrdering Ordering = getDecodedOrdering(Record[0]); - if (Ordering == NotAtomic || Ordering == Unordered || - Ordering == Monotonic) + if (Ordering == AtomicOrdering::NotAtomic || + Ordering == AtomicOrdering::Unordered || + Ordering == AtomicOrdering::Monotonic) return error("Invalid record"); SynchronizationScope SynchScope = getDecodedSynchScope(Record[1]); I = new FenceInst(Context, Ordering, SynchScope); diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp index 99046d993a85..cbaaea8bbc64 100644 --- a/lib/Bitcode/Writer/BitcodeWriter.cpp +++ b/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -133,13 +133,13 @@ static unsigned GetEncodedRMWOperation(AtomicRMWInst::BinOp Op) { static unsigned GetEncodedOrdering(AtomicOrdering Ordering) { switch (Ordering) { - case NotAtomic: return bitc::ORDERING_NOTATOMIC; - case Unordered: return bitc::ORDERING_UNORDERED; - case Monotonic: return bitc::ORDERING_MONOTONIC; - case Acquire: return bitc::ORDERING_ACQUIRE; - case Release: return bitc::ORDERING_RELEASE; - case AcquireRelease: return bitc::ORDERING_ACQREL; - case SequentiallyConsistent: return bitc::ORDERING_SEQCST; + case AtomicOrdering::NotAtomic: return bitc::ORDERING_NOTATOMIC; + case AtomicOrdering::Unordered: return bitc::ORDERING_UNORDERED; + case AtomicOrdering::Monotonic: return bitc::ORDERING_MONOTONIC; + case AtomicOrdering::Acquire: return bitc::ORDERING_ACQUIRE; + case AtomicOrdering::Release: return bitc::ORDERING_RELEASE; + case AtomicOrdering::AcquireRelease: return bitc::ORDERING_ACQREL; + case AtomicOrdering::SequentiallyConsistent: return bitc::ORDERING_SEQCST; } llvm_unreachable("Invalid ordering"); } diff --git a/lib/CodeGen/AtomicExpandPass.cpp b/lib/CodeGen/AtomicExpandPass.cpp index 40140e41a03f..8c0c0f4acba6 100644 --- a/lib/CodeGen/AtomicExpandPass.cpp +++ b/lib/CodeGen/AtomicExpandPass.cpp @@ -101,37 +101,37 @@ bool AtomicExpand::runOnFunction(Function &F) { assert((LI || SI || RMWI || CASI) && "Unknown atomic instruction"); if (TLI->shouldInsertFencesForAtomic(I)) { - auto FenceOrdering = Monotonic; + auto FenceOrdering = AtomicOrdering::Monotonic; bool IsStore, IsLoad; - if (LI && isAtLeastAcquire(LI->getOrdering())) { + if (LI && isAcquireOrStronger(LI->getOrdering())) { FenceOrdering = LI->getOrdering(); - LI->setOrdering(Monotonic); + LI->setOrdering(AtomicOrdering::Monotonic); IsStore = false; IsLoad = true; - } else if (SI && isAtLeastRelease(SI->getOrdering())) { + } else if (SI && isReleaseOrStronger(SI->getOrdering())) { FenceOrdering = SI->getOrdering(); - SI->setOrdering(Monotonic); + SI->setOrdering(AtomicOrdering::Monotonic); IsStore = true; IsLoad = false; - } else if (RMWI && (isAtLeastRelease(RMWI->getOrdering()) || - isAtLeastAcquire(RMWI->getOrdering()))) { + } else if (RMWI && (isReleaseOrStronger(RMWI->getOrdering()) || + isAcquireOrStronger(RMWI->getOrdering()))) { FenceOrdering = RMWI->getOrdering(); - RMWI->setOrdering(Monotonic); + RMWI->setOrdering(AtomicOrdering::Monotonic); IsStore = IsLoad = true; } else if (CASI && !TLI->shouldExpandAtomicCmpXchgInIR(CASI) && - (isAtLeastRelease(CASI->getSuccessOrdering()) || - isAtLeastAcquire(CASI->getSuccessOrdering()))) { + (isReleaseOrStronger(CASI->getSuccessOrdering()) || + isAcquireOrStronger(CASI->getSuccessOrdering()))) { // If a compare and swap is lowered to LL/SC, we can do smarter fence // insertion, with a stronger one on the success path than on the // failure path. As a result, fence insertion is directly done by // expandAtomicCmpXchg in that case. FenceOrdering = CASI->getSuccessOrdering(); - CASI->setSuccessOrdering(Monotonic); - CASI->setFailureOrdering(Monotonic); + CASI->setSuccessOrdering(AtomicOrdering::Monotonic); + CASI->setFailureOrdering(AtomicOrdering::Monotonic); IsStore = IsLoad = true; } - if (FenceOrdering != Monotonic) { + if (FenceOrdering != AtomicOrdering::Monotonic) { MadeChange |= bracketInstWithFences(I, FenceOrdering, IsStore, IsLoad); } } @@ -520,7 +520,7 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { // should preserve the ordering. bool ShouldInsertFencesForAtomic = TLI->shouldInsertFencesForAtomic(CI); AtomicOrdering MemOpOrder = - ShouldInsertFencesForAtomic ? Monotonic : SuccessOrder; + ShouldInsertFencesForAtomic ? AtomicOrdering::Monotonic : SuccessOrder; // In implementations which use a barrier to achieve release semantics, we can // delay emitting this barrier until we know a store is actually going to be @@ -532,8 +532,9 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) { // minimal loop. Unfortunately, this puts too much stress on later // optimisations so we avoid emitting the extra logic in those cases too. bool HasReleasedLoadBB = !CI->isWeak() && ShouldInsertFencesForAtomic && - SuccessOrder != Monotonic && - SuccessOrder != Acquire && !F->optForMinSize(); + SuccessOrder != AtomicOrdering::Monotonic && + SuccessOrder != AtomicOrdering::Acquire && + !F->optForMinSize(); // There's no overhead for sinking the release barrier in a weak cmpxchg, so // do it even on minsize. @@ -767,8 +768,9 @@ bool llvm::expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun CreateCmpXchg) { assert(AI); - AtomicOrdering MemOpOrder = - AI->getOrdering() == Unordered ? Monotonic : AI->getOrdering(); + AtomicOrdering MemOpOrder = AI->getOrdering() == AtomicOrdering::Unordered + ? AtomicOrdering::Monotonic + : AI->getOrdering(); Value *Addr = AI->getPointerOperand(); BasicBlock *BB = AI->getParent(); Function *F = BB->getParent(); diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index f6b4262ffd73..5ad42936169f 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -3903,7 +3903,7 @@ void SelectionDAGBuilder::visitFence(const FenceInst &I) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDValue Ops[3]; Ops[0] = getRoot(); - Ops[1] = DAG.getConstant(I.getOrdering(), dl, + Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl, TLI.getPointerTy(DAG.getDataLayout())); Ops[2] = DAG.getConstant(I.getSynchScope(), dl, TLI.getPointerTy(DAG.getDataLayout())); diff --git a/lib/IR/AsmWriter.cpp b/lib/IR/AsmWriter.cpp index a347bc60654d..0dd49a43022c 100644 --- a/lib/IR/AsmWriter.cpp +++ b/lib/IR/AsmWriter.cpp @@ -2110,7 +2110,7 @@ void AssemblyWriter::writeOperand(const Value *Operand, bool PrintType) { void AssemblyWriter::writeAtomic(AtomicOrdering Ordering, SynchronizationScope SynchScope) { - if (Ordering == NotAtomic) + if (Ordering == AtomicOrdering::NotAtomic) return; switch (SynchScope) { @@ -2118,46 +2118,22 @@ void AssemblyWriter::writeAtomic(AtomicOrdering Ordering, case CrossThread: break; } - switch (Ordering) { - default: Out << " "; break; - case Unordered: Out << " unordered"; break; - case Monotonic: Out << " monotonic"; break; - case Acquire: Out << " acquire"; break; - case Release: Out << " release"; break; - case AcquireRelease: Out << " acq_rel"; break; - case SequentiallyConsistent: Out << " seq_cst"; break; - } + Out << " " << toIRString(Ordering); } void AssemblyWriter::writeAtomicCmpXchg(AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, SynchronizationScope SynchScope) { - assert(SuccessOrdering != NotAtomic && FailureOrdering != NotAtomic); + assert(SuccessOrdering != AtomicOrdering::NotAtomic && + FailureOrdering != AtomicOrdering::NotAtomic); switch (SynchScope) { case SingleThread: Out << " singlethread"; break; case CrossThread: break; } - switch (SuccessOrdering) { - default: Out << " "; break; - case Unordered: Out << " unordered"; break; - case Monotonic: Out << " monotonic"; break; - case Acquire: Out << " acquire"; break; - case Release: Out << " release"; break; - case AcquireRelease: Out << " acq_rel"; break; - case SequentiallyConsistent: Out << " seq_cst"; break; - } - - switch (FailureOrdering) { - default: Out << " "; break; - case Unordered: Out << " unordered"; break; - case Monotonic: Out << " monotonic"; break; - case Acquire: Out << " acquire"; break; - case Release: Out << " release"; break; - case AcquireRelease: Out << " acq_rel"; break; - case SequentiallyConsistent: Out << " seq_cst"; break; - } + Out << " " << toIRString(SuccessOrdering); + Out << " " << toIRString(FailureOrdering); } void AssemblyWriter::writeParamOperand(const Value *Operand, diff --git a/lib/IR/Core.cpp b/lib/IR/Core.cpp index b2c2740218e8..0bdd2d6483c0 100644 --- a/lib/IR/Core.cpp +++ b/lib/IR/Core.cpp @@ -2602,14 +2602,15 @@ LLVMValueRef LLVMBuildStore(LLVMBuilderRef B, LLVMValueRef Val, static AtomicOrdering mapFromLLVMOrdering(LLVMAtomicOrdering Ordering) { switch (Ordering) { - case LLVMAtomicOrderingNotAtomic: return NotAtomic; - case LLVMAtomicOrderingUnordered: return Unordered; - case LLVMAtomicOrderingMonotonic: return Monotonic; - case LLVMAtomicOrderingAcquire: return Acquire; - case LLVMAtomicOrderingRelease: return Release; - case LLVMAtomicOrderingAcquireRelease: return AcquireRelease; + case LLVMAtomicOrderingNotAtomic: return AtomicOrdering::NotAtomic; + case LLVMAtomicOrderingUnordered: return AtomicOrdering::Unordered; + case LLVMAtomicOrderingMonotonic: return AtomicOrdering::Monotonic; + case LLVMAtomicOrderingAcquire: return AtomicOrdering::Acquire; + case LLVMAtomicOrderingRelease: return AtomicOrdering::Release; + case LLVMAtomicOrderingAcquireRelease: + return AtomicOrdering::AcquireRelease; case LLVMAtomicOrderingSequentiallyConsistent: - return SequentiallyConsistent; + return AtomicOrdering::SequentiallyConsistent; } llvm_unreachable("Invalid LLVMAtomicOrdering value!"); @@ -2617,13 +2618,14 @@ static AtomicOrdering mapFromLLVMOrdering(LLVMAtomicOrdering Ordering) { static LLVMAtomicOrdering mapToLLVMOrdering(AtomicOrdering Ordering) { switch (Ordering) { - case NotAtomic: return LLVMAtomicOrderingNotAtomic; - case Unordered: return LLVMAtomicOrderingUnordered; - case Monotonic: return LLVMAtomicOrderingMonotonic; - case Acquire: return LLVMAtomicOrderingAcquire; - case Release: return LLVMAtomicOrderingRelease; - case AcquireRelease: return LLVMAtomicOrderingAcquireRelease; - case SequentiallyConsistent: + case AtomicOrdering::NotAtomic: return LLVMAtomicOrderingNotAtomic; + case AtomicOrdering::Unordered: return LLVMAtomicOrderingUnordered; + case AtomicOrdering::Monotonic: return LLVMAtomicOrderingMonotonic; + case AtomicOrdering::Acquire: return LLVMAtomicOrderingAcquire; + case AtomicOrdering::Release: return LLVMAtomicOrderingRelease; + case AtomicOrdering::AcquireRelease: + return LLVMAtomicOrderingAcquireRelease; + case AtomicOrdering::SequentiallyConsistent: return LLVMAtomicOrderingSequentiallyConsistent; } diff --git a/lib/IR/Instruction.cpp b/lib/IR/Instruction.cpp index 4b33d2e66ea1..e2b5e2777fab 100644 --- a/lib/IR/Instruction.cpp +++ b/lib/IR/Instruction.cpp @@ -461,9 +461,9 @@ bool Instruction::isAtomic() const { case Instruction::Fence: return true; case Instruction::Load: - return cast(this)->getOrdering() != NotAtomic; + return cast(this)->getOrdering() != AtomicOrdering::NotAtomic; case Instruction::Store: - return cast(this)->getOrdering() != NotAtomic; + return cast(this)->getOrdering() != AtomicOrdering::NotAtomic; } } diff --git a/lib/IR/Instructions.cpp b/lib/IR/Instructions.cpp index 9c2f765687da..fcc9cc4ab2e6 100644 --- a/lib/IR/Instructions.cpp +++ b/lib/IR/Instructions.cpp @@ -1209,13 +1209,13 @@ LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, unsigned Align, Instruction *InsertBef) - : LoadInst(Ty, Ptr, Name, isVolatile, Align, NotAtomic, CrossThread, - InsertBef) {} + : LoadInst(Ty, Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, + CrossThread, InsertBef) {} LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile, unsigned Align, BasicBlock *InsertAE) - : LoadInst(Ptr, Name, isVolatile, Align, NotAtomic, CrossThread, InsertAE) { -} + : LoadInst(Ptr, Name, isVolatile, Align, AtomicOrdering::NotAtomic, + CrossThread, InsertAE) {} LoadInst::LoadInst(Type *Ty, Value *Ptr, const Twine &Name, bool isVolatile, unsigned Align, AtomicOrdering Order, @@ -1247,7 +1247,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, Instruction *InsertBef) Load, Ptr, InsertBef) { setVolatile(false); setAlignment(0); - setAtomic(NotAtomic); + setAtomic(AtomicOrdering::NotAtomic); AssertOK(); if (Name && Name[0]) setName(Name); } @@ -1257,7 +1257,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, BasicBlock *InsertAE) Load, Ptr, InsertAE) { setVolatile(false); setAlignment(0); - setAtomic(NotAtomic); + setAtomic(AtomicOrdering::NotAtomic); AssertOK(); if (Name && Name[0]) setName(Name); } @@ -1268,7 +1268,7 @@ LoadInst::LoadInst(Type *Ty, Value *Ptr, const char *Name, bool isVolatile, assert(Ty == cast(Ptr->getType())->getElementType()); setVolatile(isVolatile); setAlignment(0); - setAtomic(NotAtomic); + setAtomic(AtomicOrdering::NotAtomic); AssertOK(); if (Name && Name[0]) setName(Name); } @@ -1279,7 +1279,7 @@ LoadInst::LoadInst(Value *Ptr, const char *Name, bool isVolatile, Load, Ptr, InsertAE) { setVolatile(isVolatile); setAlignment(0); - setAtomic(NotAtomic); + setAtomic(AtomicOrdering::NotAtomic); AssertOK(); if (Name && Name[0]) setName(Name); } @@ -1324,13 +1324,13 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, Instruction *InsertBefore) - : StoreInst(val, addr, isVolatile, Align, NotAtomic, CrossThread, - InsertBefore) {} + : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, + CrossThread, InsertBefore) {} StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, BasicBlock *InsertAtEnd) - : StoreInst(val, addr, isVolatile, Align, NotAtomic, CrossThread, - InsertAtEnd) {} + : StoreInst(val, addr, isVolatile, Align, AtomicOrdering::NotAtomic, + CrossThread, InsertAtEnd) {} StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, unsigned Align, AtomicOrdering Order, @@ -1398,13 +1398,15 @@ void AtomicCmpXchgInst::Init(Value *Ptr, Value *Cmp, Value *NewVal, assert(getOperand(2)->getType() == cast(getOperand(0)->getType())->getElementType() && "Ptr must be a pointer to NewVal type!"); - assert(SuccessOrdering != NotAtomic && + assert(SuccessOrdering != AtomicOrdering::NotAtomic && "AtomicCmpXchg instructions must be atomic!"); - assert(FailureOrdering != NotAtomic && + assert(FailureOrdering != AtomicOrdering::NotAtomic && "AtomicCmpXchg instructions must be atomic!"); - assert(SuccessOrdering >= FailureOrdering && - "AtomicCmpXchg success ordering must be at least as strong as fail"); - assert(FailureOrdering != Release && FailureOrdering != AcquireRelease && + assert(!isStrongerThan(FailureOrdering, SuccessOrdering) && + "AtomicCmpXchg failure argument shall be no stronger than the success " + "argument"); + assert(FailureOrdering != AtomicOrdering::Release && + FailureOrdering != AtomicOrdering::AcquireRelease && "AtomicCmpXchg failure ordering cannot include release semantics"); } @@ -1454,7 +1456,7 @@ void AtomicRMWInst::Init(BinOp Operation, Value *Ptr, Value *Val, assert(getOperand(1)->getType() == cast(getOperand(0)->getType())->getElementType() && "Ptr must be a pointer to Val type!"); - assert(Ordering != NotAtomic && + assert(Ordering != AtomicOrdering::NotAtomic && "AtomicRMW instructions must be atomic!"); } diff --git a/lib/IR/Verifier.cpp b/lib/IR/Verifier.cpp index 212e2bf38cdf..24e92e14dfa5 100644 --- a/lib/IR/Verifier.cpp +++ b/lib/IR/Verifier.cpp @@ -2919,7 +2919,8 @@ void Verifier::visitLoadInst(LoadInst &LI) { Assert(LI.getAlignment() <= Value::MaximumAlignment, "huge alignment values are unsupported", &LI); if (LI.isAtomic()) { - Assert(LI.getOrdering() != Release && LI.getOrdering() != AcquireRelease, + Assert(LI.getOrdering() != AtomicOrdering::Release && + LI.getOrdering() != AtomicOrdering::AcquireRelease, "Load cannot have Release ordering", &LI); Assert(LI.getAlignment() != 0, "Atomic load must specify explicit alignment", &LI); @@ -2946,7 +2947,8 @@ void Verifier::visitStoreInst(StoreInst &SI) { Assert(SI.getAlignment() <= Value::MaximumAlignment, "huge alignment values are unsupported", &SI); if (SI.isAtomic()) { - Assert(SI.getOrdering() != Acquire && SI.getOrdering() != AcquireRelease, + Assert(SI.getOrdering() != AtomicOrdering::Acquire && + SI.getOrdering() != AtomicOrdering::AcquireRelease, "Store cannot have Acquire ordering", &SI); Assert(SI.getAlignment() != 0, "Atomic store must specify explicit alignment", &SI); @@ -3022,19 +3024,20 @@ void Verifier::visitAllocaInst(AllocaInst &AI) { void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) { // FIXME: more conditions??? - Assert(CXI.getSuccessOrdering() != NotAtomic, + Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic, "cmpxchg instructions must be atomic.", &CXI); - Assert(CXI.getFailureOrdering() != NotAtomic, + Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic, "cmpxchg instructions must be atomic.", &CXI); - Assert(CXI.getSuccessOrdering() != Unordered, + Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered, "cmpxchg instructions cannot be unordered.", &CXI); - Assert(CXI.getFailureOrdering() != Unordered, + Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered, "cmpxchg instructions cannot be unordered.", &CXI); - Assert(CXI.getSuccessOrdering() >= CXI.getFailureOrdering(), - "cmpxchg instructions be at least as constrained on success as fail", + Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()), + "cmpxchg instructions failure argument shall be no stronger than the " + "success argument", &CXI); - Assert(CXI.getFailureOrdering() != Release && - CXI.getFailureOrdering() != AcquireRelease, + Assert(CXI.getFailureOrdering() != AtomicOrdering::Release && + CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease, "cmpxchg failure ordering cannot include release semantics", &CXI); PointerType *PTy = dyn_cast(CXI.getOperand(0)->getType()); @@ -3053,9 +3056,9 @@ void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) { } void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) { - Assert(RMWI.getOrdering() != NotAtomic, + Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic, "atomicrmw instructions must be atomic.", &RMWI); - Assert(RMWI.getOrdering() != Unordered, + Assert(RMWI.getOrdering() != AtomicOrdering::Unordered, "atomicrmw instructions cannot be unordered.", &RMWI); PointerType *PTy = dyn_cast(RMWI.getOperand(0)->getType()); Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI); @@ -3074,10 +3077,12 @@ void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) { void Verifier::visitFenceInst(FenceInst &FI) { const AtomicOrdering Ordering = FI.getOrdering(); - Assert(Ordering == Acquire || Ordering == Release || - Ordering == AcquireRelease || Ordering == SequentiallyConsistent, - "fence instructions may only have " - "acquire, release, acq_rel, or seq_cst ordering.", + Assert(Ordering == AtomicOrdering::Acquire || + Ordering == AtomicOrdering::Release || + Ordering == AtomicOrdering::AcquireRelease || + Ordering == AtomicOrdering::SequentiallyConsistent, + "fence instructions may only have acquire, release, acq_rel, or " + "seq_cst ordering.", &FI); visitInstruction(FI); } diff --git a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 134f107698d0..e43919960419 100644 --- a/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -608,7 +608,7 @@ static bool isWorthFoldingADDlow(SDValue N) { // ldar and stlr have much more restrictive addressing modes (just a // register). - if (cast(Use)->getOrdering() > Monotonic) + if (isStrongerThanMonotonic(cast(Use)->getOrdering())) return false; } diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 2d3ddaa060f5..2efe8473832c 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10132,7 +10132,7 @@ Value *AArch64TargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); Type *ValTy = cast(Addr->getType())->getElementType(); - bool IsAcquire = isAtLeastAcquire(Ord); + bool IsAcquire = isAcquireOrStronger(Ord); // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd // intrinsic must return {i64, i64} and we have to recombine them into a @@ -10174,7 +10174,7 @@ Value *AArch64TargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); - bool IsRelease = isAtLeastRelease(Ord); + bool IsRelease = isReleaseOrStronger(Ord); // Since the intrinsics must have legal type, the i128 intrinsics take two // parameters: "i64, i64". We must marshal Val into the appropriate form diff --git a/lib/Target/AArch64/AArch64InstrAtomics.td b/lib/Target/AArch64/AArch64InstrAtomics.td index 4923a1161dfc..a88e7e8491a8 100644 --- a/lib/Target/AArch64/AArch64InstrAtomics.td +++ b/lib/Target/AArch64/AArch64InstrAtomics.td @@ -29,7 +29,7 @@ def : Pat<(atomic_fence (imm), (imm)), (DMB (i32 0xb))>; class acquiring_load : PatFrag<(ops node:$ptr), (base node:$ptr), [{ AtomicOrdering Ordering = cast(N)->getOrdering(); - return isAtLeastAcquire(Ordering); + return isAcquireOrStronger(Ordering); }]>; // An atomic load operation that does not need either acquire or release @@ -37,7 +37,7 @@ class acquiring_load class relaxed_load : PatFrag<(ops node:$ptr), (base node:$ptr), [{ AtomicOrdering Ordering = cast(N)->getOrdering(); - return !isAtLeastAcquire(Ordering); + return !isAcquireOrStronger(Ordering); }]>; // 8-bit loads @@ -112,15 +112,16 @@ def : Pat<(relaxed_load class releasing_store : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{ AtomicOrdering Ordering = cast(N)->getOrdering(); - assert(Ordering != AcquireRelease && "unexpected store ordering"); - return isAtLeastRelease(Ordering); + assert(Ordering != AtomicOrdering::AcquireRelease && + "unexpected store ordering"); + return isReleaseOrStronger(Ordering); }]>; // An atomic store operation that doesn't actually need to be atomic on AArch64. class relaxed_store : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{ AtomicOrdering Ordering = cast(N)->getOrdering(); - return !isAtLeastRelease(Ordering); + return !isReleaseOrStronger(Ordering); }]>; // 8-bit stores diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 00446b61ed7c..4c006d862fac 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -3011,7 +3011,7 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG, if (Subtarget->isMClass()) { // Only a full system barrier exists in the M-class architectures. Domain = ARM_MB::SY; - } else if (Subtarget->isSwift() && Ord == Release) { + } else if (Subtarget->isSwift() && Ord == AtomicOrdering::Release) { // Swift happens to implement ISHST barriers in a way that's compatible with // Release semantics but weaker than ISH so we'd be fools not to use // it. Beware: other processors probably don't! @@ -6932,13 +6932,13 @@ void ARMTargetLowering::ExpandDIV_Windows( } static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) { - // Monotonic load/store is legal for all targets - if (cast(Op)->getOrdering() <= Monotonic) - return Op; + if (isStrongerThanMonotonic(cast(Op)->getOrdering())) + // Acquire/Release load/store is not legal for targets without a dmb or + // equivalent available. + return SDValue(); - // Acquire/Release load/store is not legal for targets without a - // dmb or equivalent available. - return SDValue(); + // Monotonic load/store is legal for all targets. + return Op; } static void ReplaceREADCYCLECOUNTER(SDNode *N, @@ -12076,18 +12076,18 @@ Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const { switch (Ord) { - case NotAtomic: - case Unordered: + case AtomicOrdering::NotAtomic: + case AtomicOrdering::Unordered: llvm_unreachable("Invalid fence: unordered/non-atomic"); - case Monotonic: - case Acquire: + case AtomicOrdering::Monotonic: + case AtomicOrdering::Acquire: return nullptr; // Nothing to do - case SequentiallyConsistent: + case AtomicOrdering::SequentiallyConsistent: if (!IsStore) return nullptr; // Nothing to do /*FALLTHROUGH*/ - case Release: - case AcquireRelease: + case AtomicOrdering::Release: + case AtomicOrdering::AcquireRelease: if (Subtarget->isSwift()) return makeDMB(Builder, ARM_MB::ISHST); // FIXME: add a comment with a link to documentation justifying this. @@ -12101,15 +12101,15 @@ Instruction* ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const { switch (Ord) { - case NotAtomic: - case Unordered: + case AtomicOrdering::NotAtomic: + case AtomicOrdering::Unordered: llvm_unreachable("Invalid fence: unordered/not-atomic"); - case Monotonic: - case Release: + case AtomicOrdering::Monotonic: + case AtomicOrdering::Release: return nullptr; // Nothing to do - case Acquire: - case AcquireRelease: - case SequentiallyConsistent: + case AtomicOrdering::Acquire: + case AtomicOrdering::AcquireRelease: + case AtomicOrdering::SequentiallyConsistent: return makeDMB(Builder, ARM_MB::ISH); } llvm_unreachable("Unknown fence ordering in emitTrailingFence"); @@ -12204,7 +12204,7 @@ Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, AtomicOrdering Ord) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); Type *ValTy = cast(Addr->getType())->getElementType(); - bool IsAcquire = isAtLeastAcquire(Ord); + bool IsAcquire = isAcquireOrStronger(Ord); // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd // intrinsic must return {i32, i32} and we have to recombine them into a @@ -12248,7 +12248,7 @@ Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val, Value *Addr, AtomicOrdering Ord) const { Module *M = Builder.GetInsertBlock()->getParent()->getParent(); - bool IsRelease = isAtLeastRelease(Ord); + bool IsRelease = isReleaseOrStronger(Ord); // Since the intrinsics must have legal type, the i64 intrinsics take two // parameters: "i32, i32". We must marshal Val into the appropriate form diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index ca3b9d95f458..d116e9d08a33 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -4761,7 +4761,7 @@ def : ARMPat<(stlex_2 (and GPR:$Rt, 0xffff), addr_offset_none:$addr), class acquiring_load : PatFrag<(ops node:$ptr), (base node:$ptr), [{ AtomicOrdering Ordering = cast(N)->getOrdering(); - return isAtLeastAcquire(Ordering); + return isAcquireOrStronger(Ordering); }]>; def atomic_load_acquire_8 : acquiring_load; @@ -4771,7 +4771,7 @@ def atomic_load_acquire_32 : acquiring_load; class releasing_store : PatFrag<(ops node:$ptr, node:$val), (base node:$ptr, node:$val), [{ AtomicOrdering Ordering = cast(N)->getOrdering(); - return isAtLeastRelease(Ordering); + return isReleaseOrStronger(Ordering); }]>; def atomic_store_release_8 : releasing_store; diff --git a/lib/Target/CppBackend/CPPBackend.cpp b/lib/Target/CppBackend/CPPBackend.cpp index cfa1f2ce6196..a086e710d162 100644 --- a/lib/Target/CppBackend/CPPBackend.cpp +++ b/lib/Target/CppBackend/CPPBackend.cpp @@ -1091,13 +1091,14 @@ std::string CppWriter::getOpName(const Value* V) { static StringRef ConvertAtomicOrdering(AtomicOrdering Ordering) { switch (Ordering) { - case NotAtomic: return "NotAtomic"; - case Unordered: return "Unordered"; - case Monotonic: return "Monotonic"; - case Acquire: return "Acquire"; - case Release: return "Release"; - case AcquireRelease: return "AcquireRelease"; - case SequentiallyConsistent: return "SequentiallyConsistent"; + case AtomicOrdering::NotAtomic: return "NotAtomic"; + case AtomicOrdering::Unordered: return "Unordered"; + case AtomicOrdering::Monotonic: return "Monotonic"; + case AtomicOrdering::Acquire: return "Acquire"; + case AtomicOrdering::Release: return "Release"; + case AtomicOrdering::AcquireRelease: return "AcquireRelease"; + case AtomicOrdering::SequentiallyConsistent: + return "SequentiallyConsistent"; } llvm_unreachable("Unknown ordering"); } diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 70d00c2682ad..0aedb419201b 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -8323,9 +8323,9 @@ static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) { Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const { - if (Ord == SequentiallyConsistent) + if (Ord == AtomicOrdering::SequentiallyConsistent) return callIntrinsic(Builder, Intrinsic::ppc_sync); - if (isAtLeastRelease(Ord)) + if (isReleaseOrStronger(Ord)) return callIntrinsic(Builder, Intrinsic::ppc_lwsync); return nullptr; } @@ -8333,7 +8333,7 @@ Instruction* PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, Instruction* PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord, bool IsStore, bool IsLoad) const { - if (IsLoad && isAtLeastAcquire(Ord)) + if (IsLoad && isAcquireOrStronger(Ord)) return callIntrinsic(Builder, Intrinsic::ppc_lwsync); // FIXME: this is too conservative, a dependent branch + isync is enough. // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index ebb779243240..b0fa6a19a9a2 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -2929,12 +2929,12 @@ static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG, } static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) { - // Monotonic load/stores are legal. - if (cast(Op)->getOrdering() <= Monotonic) - return Op; - - // Otherwise, expand with a fence. + if (isStrongerThanMonotonic(cast(Op)->getOrdering())) + // Expand with a fence. return SDValue(); + + // Monotonic load/stores are legal. + return Op; } SDValue SparcTargetLowering:: diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index 9848c2a571a8..310110632472 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -3130,9 +3130,11 @@ SDValue SystemZTargetLowering::lowerATOMIC_FENCE(SDValue Op, // The only fence that needs an instruction is a sequentially-consistent // cross-thread fence. - if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { + if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && + FenceScope == CrossThread) { return SDValue(DAG.getMachineNode(SystemZ::Serialize, DL, MVT::Other, - Op.getOperand(0)), 0); + Op.getOperand(0)), + 0); } // MEMBARRIER is a compiler barrier; it codegens to a no-op. diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index eb3f0350846d..8ff00194b251 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -20464,7 +20464,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const { // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is // lowered to just a load without a fence. A mfence flushes the store buffer, // making the optimization clearly correct. - // FIXME: it is required if isAtLeastRelease(Order) but it is not clear + // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear // otherwise, we might be able to be more aggressive on relaxed idempotent // rmw. In practice, they do not look useful, so we don't try to be // especially clever. @@ -20503,7 +20503,8 @@ static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget, // The only fence that needs an instruction is a sequentially-consistent // cross-thread fence. - if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) { + if (FenceOrdering == AtomicOrdering::SequentiallyConsistent && + FenceScope == CrossThread) { if (Subtarget.hasMFence()) return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); @@ -20986,7 +20987,8 @@ static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) { // FIXME: On 32-bit, store -> fist or movq would be more efficient // (The only way to get a 16-byte store is cmpxchg16b) // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment. - if (cast(Node)->getOrdering() == SequentiallyConsistent || + if (cast(Node)->getOrdering() == + AtomicOrdering::SequentiallyConsistent || !DAG.getTargetLoweringInfo().isTypeLegal(VT)) { SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, cast(Node)->getMemoryVT(), diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 90da23b8cdb5..6dbaa84a6806 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -970,8 +970,9 @@ SDValue XCoreTargetLowering:: LowerATOMIC_LOAD(SDValue Op, SelectionDAG &DAG) const { AtomicSDNode *N = cast(Op); assert(N->getOpcode() == ISD::ATOMIC_LOAD && "Bad Atomic OP"); - assert(N->getOrdering() <= Monotonic && - "setInsertFencesForAtomic(true) and yet greater than Monotonic"); + assert((N->getOrdering() == AtomicOrdering::Unordered || + N->getOrdering() == AtomicOrdering::Monotonic) && + "setInsertFencesForAtomic(true) expects unordered / monotonic"); if (N->getMemoryVT() == MVT::i32) { if (N->getAlignment() < 4) report_fatal_error("atomic load must be aligned"); @@ -1000,8 +1001,9 @@ SDValue XCoreTargetLowering:: LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) const { AtomicSDNode *N = cast(Op); assert(N->getOpcode() == ISD::ATOMIC_STORE && "Bad Atomic OP"); - assert(N->getOrdering() <= Monotonic && - "setInsertFencesForAtomic(true) and yet greater than Monotonic"); + assert((N->getOrdering() == AtomicOrdering::Unordered || + N->getOrdering() == AtomicOrdering::Monotonic) && + "setInsertFencesForAtomic(true) expects unordered / monotonic"); if (N->getMemoryVT() == MVT::i32) { if (N->getAlignment() < 4) report_fatal_error("atomic store must be aligned"); diff --git a/lib/Transforms/IPO/GlobalOpt.cpp b/lib/Transforms/IPO/GlobalOpt.cpp index f6458f50696b..e793d1b7d7be 100644 --- a/lib/Transforms/IPO/GlobalOpt.cpp +++ b/lib/Transforms/IPO/GlobalOpt.cpp @@ -1503,7 +1503,7 @@ static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI, // into multiple malloc'd arrays, one for each field. This is basically // SRoA for malloc'd memory. - if (Ordering != NotAtomic) + if (Ordering != AtomicOrdering::NotAtomic) return false; // If this is an allocation of a fixed size array of structs, analyze as a @@ -1982,7 +1982,7 @@ bool GlobalOpt::processInternalGlobal(GlobalVariable *GV, // Otherwise, if the global was not a boolean, we can shrink it to be a // boolean. if (Constant *SOVConstant = dyn_cast(GS.StoredOnceValue)) { - if (GS.Ordering == NotAtomic) { + if (GS.Ordering == AtomicOrdering::NotAtomic) { if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { ++NumShrunkToBool; return true; @@ -2581,4 +2581,3 @@ bool GlobalOpt::runOnModule(Module &M) { return Changed; } - diff --git a/lib/Transforms/IPO/MergeFunctions.cpp b/lib/Transforms/IPO/MergeFunctions.cpp index 7024c68c3775..719603a38d89 100644 --- a/lib/Transforms/IPO/MergeFunctions.cpp +++ b/lib/Transforms/IPO/MergeFunctions.cpp @@ -401,6 +401,7 @@ class FunctionComparator { int cmpTypes(Type *TyL, Type *TyR) const; int cmpNumbers(uint64_t L, uint64_t R) const; + int cmpOrderings(AtomicOrdering L, AtomicOrdering R) const; int cmpAPInts(const APInt &L, const APInt &R) const; int cmpAPFloats(const APFloat &L, const APFloat &R) const; int cmpInlineAsm(const InlineAsm *L, const InlineAsm *R) const; @@ -477,6 +478,12 @@ int FunctionComparator::cmpNumbers(uint64_t L, uint64_t R) const { return 0; } +int FunctionComparator::cmpOrderings(AtomicOrdering L, AtomicOrdering R) const { + if ((int)L < (int)R) return -1; + if ((int)L > (int)R) return 1; + return 0; +} + int FunctionComparator::cmpAPInts(const APInt &L, const APInt &R) const { if (int Res = cmpNumbers(L.getBitWidth(), R.getBitWidth())) return Res; @@ -939,7 +946,7 @@ int FunctionComparator::cmpOperations(const Instruction *L, cmpNumbers(LI->getAlignment(), cast(R)->getAlignment())) return Res; if (int Res = - cmpNumbers(LI->getOrdering(), cast(R)->getOrdering())) + cmpOrderings(LI->getOrdering(), cast(R)->getOrdering())) return Res; if (int Res = cmpNumbers(LI->getSynchScope(), cast(R)->getSynchScope())) @@ -955,7 +962,7 @@ int FunctionComparator::cmpOperations(const Instruction *L, cmpNumbers(SI->getAlignment(), cast(R)->getAlignment())) return Res; if (int Res = - cmpNumbers(SI->getOrdering(), cast(R)->getOrdering())) + cmpOrderings(SI->getOrdering(), cast(R)->getOrdering())) return Res; return cmpNumbers(SI->getSynchScope(), cast(R)->getSynchScope()); } @@ -1009,7 +1016,7 @@ int FunctionComparator::cmpOperations(const Instruction *L, } if (const FenceInst *FI = dyn_cast(L)) { if (int Res = - cmpNumbers(FI->getOrdering(), cast(R)->getOrdering())) + cmpOrderings(FI->getOrdering(), cast(R)->getOrdering())) return Res; return cmpNumbers(FI->getSynchScope(), cast(R)->getSynchScope()); } @@ -1021,11 +1028,13 @@ int FunctionComparator::cmpOperations(const Instruction *L, if (int Res = cmpNumbers(CXI->isWeak(), cast(R)->isWeak())) return Res; - if (int Res = cmpNumbers(CXI->getSuccessOrdering(), - cast(R)->getSuccessOrdering())) + if (int Res = + cmpOrderings(CXI->getSuccessOrdering(), + cast(R)->getSuccessOrdering())) return Res; - if (int Res = cmpNumbers(CXI->getFailureOrdering(), - cast(R)->getFailureOrdering())) + if (int Res = + cmpOrderings(CXI->getFailureOrdering(), + cast(R)->getFailureOrdering())) return Res; return cmpNumbers(CXI->getSynchScope(), cast(R)->getSynchScope()); @@ -1037,7 +1046,7 @@ int FunctionComparator::cmpOperations(const Instruction *L, if (int Res = cmpNumbers(RMWI->isVolatile(), cast(R)->isVolatile())) return Res; - if (int Res = cmpNumbers(RMWI->getOrdering(), + if (int Res = cmpOrderings(RMWI->getOrdering(), cast(R)->getOrdering())) return Res; return cmpNumbers(RMWI->getSynchScope(), diff --git a/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 1931e7d8bc1e..ed3715a3c4fa 100644 --- a/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1222,34 +1222,34 @@ struct MemorySanitizerVisitor : public InstVisitor { AtomicOrdering addReleaseOrdering(AtomicOrdering a) { switch (a) { - case NotAtomic: - return NotAtomic; - case Unordered: - case Monotonic: - case Release: - return Release; - case Acquire: - case AcquireRelease: - return AcquireRelease; - case SequentiallyConsistent: - return SequentiallyConsistent; + case AtomicOrdering::NotAtomic: + return AtomicOrdering::NotAtomic; + case AtomicOrdering::Unordered: + case AtomicOrdering::Monotonic: + case AtomicOrdering::Release: + return AtomicOrdering::Release; + case AtomicOrdering::Acquire: + case AtomicOrdering::AcquireRelease: + return AtomicOrdering::AcquireRelease; + case AtomicOrdering::SequentiallyConsistent: + return AtomicOrdering::SequentiallyConsistent; } llvm_unreachable("Unknown ordering"); } AtomicOrdering addAcquireOrdering(AtomicOrdering a) { switch (a) { - case NotAtomic: - return NotAtomic; - case Unordered: - case Monotonic: - case Acquire: - return Acquire; - case Release: - case AcquireRelease: - return AcquireRelease; - case SequentiallyConsistent: - return SequentiallyConsistent; + case AtomicOrdering::NotAtomic: + return AtomicOrdering::NotAtomic; + case AtomicOrdering::Unordered: + case AtomicOrdering::Monotonic: + case AtomicOrdering::Acquire: + return AtomicOrdering::Acquire; + case AtomicOrdering::Release: + case AtomicOrdering::AcquireRelease: + return AtomicOrdering::AcquireRelease; + case AtomicOrdering::SequentiallyConsistent: + return AtomicOrdering::SequentiallyConsistent; } llvm_unreachable("Unknown ordering"); } diff --git a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp index 1fac003feeb5..f7bf44c662be 100644 --- a/lib/Transforms/Instrumentation/SanitizerCoverage.cpp +++ b/lib/Transforms/Instrumentation/SanitizerCoverage.cpp @@ -551,7 +551,7 @@ void SanitizerCoverageModule::InjectCoverageAtBlock(Function &F, BasicBlock &BB, IRB.CreateCall(SanCovWithCheckFunction, GuardP); } else { LoadInst *Load = IRB.CreateLoad(GuardP); - Load->setAtomic(Monotonic); + Load->setAtomic(AtomicOrdering::Monotonic); Load->setAlignment(4); SetNoSanitizeMetadata(Load); Value *Cmp = diff --git a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index 38d87cb1c584..dad42a81068a 100644 --- a/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -480,14 +480,16 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I, static ConstantInt *createOrdering(IRBuilder<> *IRB, AtomicOrdering ord) { uint32_t v = 0; switch (ord) { - case NotAtomic: llvm_unreachable("unexpected atomic ordering!"); - case Unordered: // Fall-through. - case Monotonic: v = 0; break; - // case Consume: v = 1; break; // Not specified yet. - case Acquire: v = 2; break; - case Release: v = 3; break; - case AcquireRelease: v = 4; break; - case SequentiallyConsistent: v = 5; break; + case AtomicOrdering::NotAtomic: + llvm_unreachable("unexpected atomic ordering!"); + case AtomicOrdering::Unordered: // Fall-through. + case AtomicOrdering::Monotonic: v = 0; break; + // Not specified yet: + // case AtomicOrdering::Consume: v = 1; break; + case AtomicOrdering::Acquire: v = 2; break; + case AtomicOrdering::Release: v = 3; break; + case AtomicOrdering::AcquireRelease: v = 4; break; + case AtomicOrdering::SequentiallyConsistent: v = 5; break; } return IRB->getInt32(v); } diff --git a/lib/Transforms/Scalar/EarlyCSE.cpp b/lib/Transforms/Scalar/EarlyCSE.cpp index e5bbad9f73fe..3707cecd0141 100644 --- a/lib/Transforms/Scalar/EarlyCSE.cpp +++ b/lib/Transforms/Scalar/EarlyCSE.cpp @@ -673,7 +673,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) { // to advance the generation. We do need to prevent DSE across the fence, // but that's handled above. if (FenceInst *FI = dyn_cast(Inst)) - if (FI->getOrdering() == Release) { + if (FI->getOrdering() == AtomicOrdering::Release) { assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above"); continue; } diff --git a/lib/Transforms/Scalar/LowerAtomic.cpp b/lib/Transforms/Scalar/LowerAtomic.cpp index 41511bcb7b04..47d374248dc8 100644 --- a/lib/Transforms/Scalar/LowerAtomic.cpp +++ b/lib/Transforms/Scalar/LowerAtomic.cpp @@ -100,12 +100,12 @@ static bool LowerFenceInst(FenceInst *FI) { } static bool LowerLoadInst(LoadInst *LI) { - LI->setAtomic(NotAtomic); + LI->setAtomic(AtomicOrdering::NotAtomic); return true; } static bool LowerStoreInst(StoreInst *SI) { - SI->setAtomic(NotAtomic); + SI->setAtomic(AtomicOrdering::NotAtomic); return true; } diff --git a/lib/Transforms/Utils/GlobalStatus.cpp b/lib/Transforms/Utils/GlobalStatus.cpp index 3893a752503b..8be42aed9724 100644 --- a/lib/Transforms/Utils/GlobalStatus.cpp +++ b/lib/Transforms/Utils/GlobalStatus.cpp @@ -20,11 +20,11 @@ using namespace llvm; /// and release, then return AcquireRelease. /// static AtomicOrdering strongerOrdering(AtomicOrdering X, AtomicOrdering Y) { - if (X == Acquire && Y == Release) - return AcquireRelease; - if (Y == Acquire && X == Release) - return AcquireRelease; - return (AtomicOrdering)std::max(X, Y); + if (X == AtomicOrdering::Acquire && Y == AtomicOrdering::Release) + return AtomicOrdering::AcquireRelease; + if (Y == AtomicOrdering::Acquire && X == AtomicOrdering::Release) + return AtomicOrdering::AcquireRelease; + return (AtomicOrdering)std::max((unsigned)X, (unsigned)Y); } /// It is safe to destroy a constant iff it is only used by constants itself. @@ -185,4 +185,4 @@ GlobalStatus::GlobalStatus() : IsCompared(false), IsLoaded(false), StoredType(NotStored), StoredOnceValue(nullptr), AccessingFunction(nullptr), HasMultipleAccessingFunctions(false), HasNonInstructionUser(false), - Ordering(NotAtomic) {} + Ordering(AtomicOrdering::NotAtomic) {} diff --git a/unittests/Analysis/AliasAnalysisTest.cpp b/unittests/Analysis/AliasAnalysisTest.cpp index 2d0eaca692c3..3ef635cf3b0b 100644 --- a/unittests/Analysis/AliasAnalysisTest.cpp +++ b/unittests/Analysis/AliasAnalysisTest.cpp @@ -179,12 +179,12 @@ TEST_F(AliasAnalysisTest, getModRefInfo) { auto *Load1 = new LoadInst(Addr, "load", BB); auto *Add1 = BinaryOperator::CreateAdd(Value, Value, "add", BB); auto *VAArg1 = new VAArgInst(Addr, PtrType, "vaarg", BB); - auto *CmpXChg1 = new AtomicCmpXchgInst(Addr, ConstantInt::get(IntType, 0), - ConstantInt::get(IntType, 1), - Monotonic, Monotonic, CrossThread, BB); + auto *CmpXChg1 = new AtomicCmpXchgInst( + Addr, ConstantInt::get(IntType, 0), ConstantInt::get(IntType, 1), + AtomicOrdering::Monotonic, AtomicOrdering::Monotonic, CrossThread, BB); auto *AtomicRMW = new AtomicRMWInst(AtomicRMWInst::Xchg, Addr, ConstantInt::get(IntType, 1), - Monotonic, CrossThread, BB); + AtomicOrdering::Monotonic, CrossThread, BB); ReturnInst::Create(C, nullptr, BB);