Skip to content

Commit

Permalink
Revert r296575 "[SLP] Fixes the bug due to absence of in order uses o…
Browse files Browse the repository at this point in the history
…f scalars which needs to be available"

It caused miscompiles, e.g. in Chromium (PR32109).

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@296654 91177308-0d34-0410-b5e6-96231b3b80d8
  • Loading branch information
zmodem committed Mar 1, 2017
1 parent 0863625 commit 4024478
Show file tree
Hide file tree
Showing 5 changed files with 81 additions and 138 deletions.
7 changes: 2 additions & 5 deletions include/llvm/Analysis/LoopAccessAnalysis.h
Original file line number Diff line number Diff line change
Expand Up @@ -660,15 +660,12 @@ int64_t getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, const Loop *Lp,
/// \brief Try to sort an array of loads / stores.
///
/// An array of loads / stores can only be sorted if all pointer operands
/// refer to the same object, and the differences between these pointers
/// refer to the same object, and the differences between these pointers
/// are known to be constant. If that is the case, this returns true, and the
/// sorted array is returned in \p Sorted. Otherwise, this returns false, and
/// \p Sorted is invalid.
// If \p Mask is not null, it also returns the \p Mask which is the shuffle
// mask for actual memory access order.
bool sortMemAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
ScalarEvolution &SE, SmallVectorImpl<Value *> &Sorted,
SmallVectorImpl<unsigned> *Mask = nullptr);
ScalarEvolution &SE, SmallVectorImpl<Value *> &Sorted);

/// \brief Returns true if the memory operations \p A and \p B are consecutive.
/// This is a simple API that does not depend on the analysis pass.
Expand Down
31 changes: 8 additions & 23 deletions lib/Analysis/LoopAccessAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1040,8 +1040,7 @@ static unsigned getAddressSpaceOperand(Value *I) {

bool llvm::sortMemAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
ScalarEvolution &SE,
SmallVectorImpl<Value *> &Sorted,
SmallVectorImpl<unsigned> *Mask) {
SmallVectorImpl<Value *> &Sorted) {
SmallVector<std::pair<int64_t, Value *>, 4> OffValPairs;
OffValPairs.reserve(VL.size());
Sorted.reserve(VL.size());
Expand All @@ -1051,6 +1050,7 @@ bool llvm::sortMemAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
Value *Ptr0 = getPointerOperand(VL[0]);
const SCEV *Scev0 = SE.getSCEV(Ptr0);
Value *Obj0 = GetUnderlyingObject(Ptr0, DL);

for (auto *Val : VL) {
// The only kind of access we care about here is load.
if (!isa<LoadInst>(Val))
Expand All @@ -1077,29 +1077,14 @@ bool llvm::sortMemAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
OffValPairs.emplace_back(Diff->getAPInt().getSExtValue(), Val);
}

SmallVector<unsigned, 4> UseOrder(VL.size());
for (unsigned i = 0; i < VL.size(); i++)
UseOrder[i] = i;

// Sort the memory accesses and keep the order of their uses in UseOrder.
std::sort(UseOrder.begin(), UseOrder.end(),
[&OffValPairs](unsigned Left, unsigned Right) {
return OffValPairs[Left].first < OffValPairs[Right].first;
std::sort(OffValPairs.begin(), OffValPairs.end(),
[](const std::pair<int64_t, Value *> &Left,
const std::pair<int64_t, Value *> &Right) {
return Left.first < Right.first;
});

for (unsigned i = 0; i < VL.size(); i++)
Sorted.emplace_back(OffValPairs[UseOrder[i]].second);

// Sort UseOrder to compute the Mask.
if (Mask) {
Mask->reserve(VL.size());
for (unsigned i = 0; i < VL.size(); i++)
Mask->emplace_back(i);
std::sort(Mask->begin(), Mask->end(),
[&UseOrder](unsigned Left, unsigned Right) {
return UseOrder[Left] < UseOrder[Right];
});
}
for (auto &it : OffValPairs)
Sorted.push_back(it.second);

return true;
}
Expand Down
Loading

0 comments on commit 4024478

Please sign in to comment.