Skip to content

Commit

Permalink
NFC: make AtomicOrdering an enum class
Browse files Browse the repository at this point in the history
Summary:
In the context of http://wg21.link/lwg2445 C++ uses the concept of
'stronger' ordering but doesn't define it properly. This should be fixed
in C++17 barring a small question that's still open.

The code currently plays fast and loose with the AtomicOrdering
enum. Using an enum class is one step towards tightening things. I later
also want to tighten related enums, such as clang's
AtomicOrderingKind (which should be shared with LLVM as a 'C++ ABI'
enum).

This change touches a few lines of code which can be improved later, I'd
like to keep it as NFC for now as it's already quite complex. I have
related changes for clang.

As a follow-up I'll add:
  bool operator<(AtomicOrdering, AtomicOrdering) = delete;
  bool operator>(AtomicOrdering, AtomicOrdering) = delete;
  bool operator<=(AtomicOrdering, AtomicOrdering) = delete;
  bool operator>=(AtomicOrdering, AtomicOrdering) = delete;
This is separate so that clang and LLVM changes don't need to be in sync.

Reviewers: jyknight, reames

Subscribers: jyknight, llvm-commits

Differential Revision: http://reviews.llvm.org/D18775

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@265602 91177308-0d34-0410-b5e6-96231b3b80d8
  • Loading branch information
jfbastien committed Apr 6, 2016
1 parent a154e6b commit b36d1a8
Show file tree
Hide file tree
Showing 37 changed files with 386 additions and 312 deletions.
2 changes: 1 addition & 1 deletion docs/Atomics.rst
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ Predicates for optimizer writers to query:
that they return true for any operation which is volatile or at least
Monotonic.

* ``isAtLeastAcquire()``/``isAtLeastRelease()``: These are predicates on
* ``isStrongerThan`` / ``isAtLeastOrStrongerThan``: These are predicates on
orderings. They can be useful for passes that are aware of atomics, for
example to do DSE across a single atomic access, but not across a
release-acquire pair (see MemoryDependencyAnalysis for an example of this)
Expand Down
8 changes: 5 additions & 3 deletions include/llvm/CodeGen/SelectionDAGNodes.h
Original file line number Diff line number Diff line change
Expand Up @@ -1127,13 +1127,15 @@ class AtomicSDNode : public MemSDNode {
AtomicOrdering FailureOrdering,
SynchronizationScope SynchScope) {
// This must match encodeMemSDNodeFlags() in SelectionDAG.cpp.
assert((SuccessOrdering & 15) == SuccessOrdering &&
assert((AtomicOrdering)((unsigned)SuccessOrdering & 15) ==
SuccessOrdering &&
"Ordering may not require more than 4 bits!");
assert((FailureOrdering & 15) == FailureOrdering &&
assert((AtomicOrdering)((unsigned)FailureOrdering & 15) ==
FailureOrdering &&
"Ordering may not require more than 4 bits!");
assert((SynchScope & 1) == SynchScope &&
"SynchScope may not require more than 1 bit!");
SubclassData |= SuccessOrdering << 8;
SubclassData |= (unsigned)SuccessOrdering << 8;
SubclassData |= SynchScope << 12;
this->FailureOrdering = FailureOrdering;
assert(getSuccessOrdering() == SuccessOrdering &&
Expand Down
127 changes: 90 additions & 37 deletions include/llvm/IR/Instructions.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,37 +36,85 @@ class ConstantRange;
class DataLayout;
class LLVMContext;

enum AtomicOrdering {
/// C++ defines ordering as a lattice. LLVM supplements this with NotAtomic and
/// Unordered, which are both below the C++ orders. See docs/Atomics.rst for
/// details.
///
/// not_atomic-->unordered-->relaxed-->release--------------->acq_rel-->seq_cst
/// \-->consume-->acquire--/
enum class AtomicOrdering {
NotAtomic = 0,
Unordered = 1,
Monotonic = 2,
Monotonic = 2, // Equivalent to C++'s relaxed.
// Consume = 3, // Not specified yet.
Acquire = 4,
Release = 5,
AcquireRelease = 6,
SequentiallyConsistent = 7
};

/// String used by LLVM IR to represent atomic ordering.
static inline const char *toIRString(AtomicOrdering ao) {
static const char *names[8] = {"not_atomic", "unordered", "monotonic",
"consume", "acquire", "release",
"acq_rel", "seq_cst"};
return names[(size_t)ao];
}

/// Returns true if ao is stronger than other as defined by the AtomicOrdering
/// lattice, which is based on C++'s definition.
static inline bool isStrongerThan(AtomicOrdering ao, AtomicOrdering other) {
static const bool lookup[8][8] = {
// NA UN RX CO AC RE AR SC
/* NotAtomic */ {0, 0, 0, 0, 0, 0, 0, 0},
/* Unordered */ {1, 0, 0, 0, 0, 0, 0, 0},
/* relaxed */ {1, 1, 0, 0, 0, 0, 0, 0},
/* consume */ {1, 1, 1, 0, 0, 0, 0, 0},
/* acquire */ {1, 1, 1, 1, 0, 0, 0, 0},
/* release */ {1, 1, 1, 0, 0, 0, 0, 0},
/* acq_rel */ {1, 1, 1, 1, 1, 1, 0, 0},
/* seq_cst */ {1, 1, 1, 1, 1, 1, 1, 0},
};
return lookup[(size_t)ao][(size_t)other];
}

static inline bool isAtLeastOrStrongerThan(AtomicOrdering ao,
AtomicOrdering other) {
static const bool lookup[8][8] = {
// NA UN RX CO AC RE AR SC
/* NotAtomic */ {1, 0, 0, 0, 0, 0, 0, 0},
/* Unordered */ {1, 1, 0, 0, 0, 0, 0, 0},
/* relaxed */ {1, 1, 1, 0, 0, 0, 0, 0},
/* consume */ {1, 1, 1, 1, 0, 0, 0, 0},
/* acquire */ {1, 1, 1, 1, 1, 0, 0, 0},
/* release */ {1, 1, 1, 0, 0, 1, 0, 0},
/* acq_rel */ {1, 1, 1, 1, 1, 1, 1, 0},
/* seq_cst */ {1, 1, 1, 1, 1, 1, 1, 1},
};
return lookup[(size_t)ao][(size_t)other];
}

static inline bool isStrongerThanUnordered(AtomicOrdering Ord) {
return isStrongerThan(Ord, AtomicOrdering::Unordered);
}

static inline bool isStrongerThanMonotonic(AtomicOrdering Ord) {
return isStrongerThan(Ord, AtomicOrdering::Monotonic);
}

static inline bool isAcquireOrStronger(AtomicOrdering Ord) {
return isAtLeastOrStrongerThan(Ord, AtomicOrdering::Acquire);
}

static inline bool isReleaseOrStronger(AtomicOrdering Ord) {
return isAtLeastOrStrongerThan(Ord, AtomicOrdering::Release);
}

enum SynchronizationScope {
SingleThread = 0,
CrossThread = 1
};

/// Returns true if the ordering is at least as strong as acquire
/// (i.e. acquire, acq_rel or seq_cst)
inline bool isAtLeastAcquire(AtomicOrdering Ord) {
return (Ord == Acquire ||
Ord == AcquireRelease ||
Ord == SequentiallyConsistent);
}

/// Returns true if the ordering is at least as strong as release
/// (i.e. release, acq_rel or seq_cst)
inline bool isAtLeastRelease(AtomicOrdering Ord) {
return (Ord == Release ||
Ord == AcquireRelease ||
Ord == SequentiallyConsistent);
}

//===----------------------------------------------------------------------===//
// AllocaInst Class
Expand Down Expand Up @@ -269,7 +317,7 @@ class LoadInst : public UnaryInstruction {
/// AcquireRelease.
void setOrdering(AtomicOrdering Ordering) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
(Ordering << 7));
((unsigned)Ordering << 7));
}

SynchronizationScope getSynchScope() const {
Expand All @@ -292,7 +340,9 @@ class LoadInst : public UnaryInstruction {

bool isSimple() const { return !isAtomic() && !isVolatile(); }
bool isUnordered() const {
return getOrdering() <= Unordered && !isVolatile();
return (getOrdering() == AtomicOrdering::NotAtomic ||
getOrdering() == AtomicOrdering::Unordered) &&
!isVolatile();
}

Value *getPointerOperand() { return getOperand(0); }
Expand Down Expand Up @@ -390,7 +440,7 @@ class StoreInst : public Instruction {
/// AcquireRelease.
void setOrdering(AtomicOrdering Ordering) {
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 7)) |
(Ordering << 7));
((unsigned)Ordering << 7));
}

SynchronizationScope getSynchScope() const {
Expand All @@ -413,7 +463,9 @@ class StoreInst : public Instruction {

bool isSimple() const { return !isAtomic() && !isVolatile(); }
bool isUnordered() const {
return getOrdering() <= Unordered && !isVolatile();
return (getOrdering() == AtomicOrdering::NotAtomic ||
getOrdering() == AtomicOrdering::Unordered) &&
!isVolatile();
}

Value *getValueOperand() { return getOperand(0); }
Expand Down Expand Up @@ -489,7 +541,7 @@ class FenceInst : public Instruction {
/// AcquireRelease, or SequentiallyConsistent.
void setOrdering(AtomicOrdering Ordering) {
setInstructionSubclassData((getSubclassDataFromInstruction() & 1) |
(Ordering << 1));
((unsigned)Ordering << 1));
}

SynchronizationScope getSynchScope() const {
Expand Down Expand Up @@ -584,17 +636,17 @@ class AtomicCmpXchgInst : public Instruction {

/// Set the ordering constraint on this cmpxchg.
void setSuccessOrdering(AtomicOrdering Ordering) {
assert(Ordering != NotAtomic &&
assert(Ordering != AtomicOrdering::NotAtomic &&
"CmpXchg instructions can only be atomic.");
setInstructionSubclassData((getSubclassDataFromInstruction() & ~0x1c) |
(Ordering << 2));
((unsigned)Ordering << 2));
}

void setFailureOrdering(AtomicOrdering Ordering) {
assert(Ordering != NotAtomic &&
assert(Ordering != AtomicOrdering::NotAtomic &&
"CmpXchg instructions can only be atomic.");
setInstructionSubclassData((getSubclassDataFromInstruction() & ~0xe0) |
(Ordering << 5));
((unsigned)Ordering << 5));
}

/// Specify whether this cmpxchg is atomic and orders other operations with
Expand Down Expand Up @@ -646,15 +698,16 @@ class AtomicCmpXchgInst : public Instruction {
static AtomicOrdering
getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
switch (SuccessOrdering) {
default: llvm_unreachable("invalid cmpxchg success ordering");
case Release:
case Monotonic:
return Monotonic;
case AcquireRelease:
case Acquire:
return Acquire;
case SequentiallyConsistent:
return SequentiallyConsistent;
default:
llvm_unreachable("invalid cmpxchg success ordering");
case AtomicOrdering::Release:
case AtomicOrdering::Monotonic:
return AtomicOrdering::Monotonic;
case AtomicOrdering::AcquireRelease:
case AtomicOrdering::Acquire:
return AtomicOrdering::Acquire;
case AtomicOrdering::SequentiallyConsistent:
return AtomicOrdering::SequentiallyConsistent;
}
}

Expand Down Expand Up @@ -770,10 +823,10 @@ class AtomicRMWInst : public Instruction {

/// Set the ordering constraint on this RMW.
void setOrdering(AtomicOrdering Ordering) {
assert(Ordering != NotAtomic &&
assert(Ordering != AtomicOrdering::NotAtomic &&
"atomicrmw instructions can only be atomic.");
setInstructionSubclassData((getSubclassDataFromInstruction() & ~(7 << 2)) |
(Ordering << 2));
((unsigned)Ordering << 2));
}

/// Specify whether this RMW orders other operations with respect to all
Expand Down
4 changes: 2 additions & 2 deletions include/llvm/Target/TargetLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -1108,7 +1108,7 @@ class TargetLoweringBase {
virtual Instruction *emitLeadingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
if (isAtLeastRelease(Ord) && IsStore)
if (isReleaseOrStronger(Ord) && IsStore)
return Builder.CreateFence(Ord);
else
return nullptr;
Expand All @@ -1117,7 +1117,7 @@ class TargetLoweringBase {
virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
AtomicOrdering Ord, bool IsStore,
bool IsLoad) const {
if (isAtLeastAcquire(Ord))
if (isAcquireOrStronger(Ord))
return Builder.CreateFence(Ord);
else
return nullptr;
Expand Down
4 changes: 2 additions & 2 deletions lib/Analysis/AliasAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@ ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
const MemoryLocation &Loc) {
// Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
if (CX->getSuccessOrdering() > Monotonic)
if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
return MRI_ModRef;

// If the cmpxchg address does not alias the location, it does not access it.
Expand All @@ -402,7 +402,7 @@ ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
const MemoryLocation &Loc) {
// Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
if (RMW->getOrdering() > Monotonic)
if (isStrongerThanMonotonic(RMW->getOrdering()))
return MRI_ModRef;

// If the atomicrmw address does not alias the location, it does not access it.
Expand Down
4 changes: 2 additions & 2 deletions lib/Analysis/AliasSetTracker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,7 @@ bool AliasSetTracker::add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo) {


bool AliasSetTracker::add(LoadInst *LI) {
if (LI->getOrdering() > Monotonic) return addUnknown(LI);
if (isStrongerThanMonotonic(LI->getOrdering())) return addUnknown(LI);

AAMDNodes AAInfo;
LI->getAAMetadata(AAInfo);
Expand All @@ -316,7 +316,7 @@ bool AliasSetTracker::add(LoadInst *LI) {
}

bool AliasSetTracker::add(StoreInst *SI) {
if (SI->getOrdering() > Monotonic) return addUnknown(SI);
if (isStrongerThanMonotonic(SI->getOrdering())) return addUnknown(SI);

AAMDNodes AAInfo;
SI->getAAMetadata(AAInfo);
Expand Down
15 changes: 7 additions & 8 deletions lib/Analysis/MemoryDependenceAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
Loc = MemoryLocation::get(LI);
return MRI_Ref;
}
if (LI->getOrdering() == Monotonic) {
if (LI->getOrdering() == AtomicOrdering::Monotonic) {
Loc = MemoryLocation::get(LI);
return MRI_ModRef;
}
Expand All @@ -106,7 +106,7 @@ static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
Loc = MemoryLocation::get(SI);
return MRI_Mod;
}
if (SI->getOrdering() == Monotonic) {
if (SI->getOrdering() == AtomicOrdering::Monotonic) {
Loc = MemoryLocation::get(SI);
return MRI_ModRef;
}
Expand Down Expand Up @@ -518,11 +518,11 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
// A Monotonic (or higher) load is OK if the query inst is itself not
// atomic.
// FIXME: This is overly conservative.
if (LI->isAtomic() && LI->getOrdering() > Unordered) {
if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) {
if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
isOtherMemAccess(QueryInst))
return MemDepResult::getClobber(LI);
if (LI->getOrdering() != Monotonic)
if (LI->getOrdering() != AtomicOrdering::Monotonic)
return MemDepResult::getClobber(LI);
}

Expand Down Expand Up @@ -588,7 +588,7 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
isOtherMemAccess(QueryInst))
return MemDepResult::getClobber(SI);
if (SI->getOrdering() != Monotonic)
if (SI->getOrdering() != AtomicOrdering::Monotonic)
return MemDepResult::getClobber(SI);
}

Expand Down Expand Up @@ -644,9 +644,9 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
// loads. DSE uses this to find preceeding stores to delete and thus we
// can't bypass the fence if the query instruction is a store.
if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
if (isLoad && FI->getOrdering() == Release)
if (isLoad && FI->getOrdering() == AtomicOrdering::Release)
continue;

// See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
ModRefInfo MR = AA.getModRefInfo(Inst, MemLoc);
// If necessary, perform additional analysis.
Expand Down Expand Up @@ -1708,4 +1708,3 @@ bool MemoryDependenceWrapperPass::runOnFunction(Function &F) {
MemDep.emplace(AA, AC, TLI, DT);
return false;
}

Loading

0 comments on commit b36d1a8

Please sign in to comment.