Skip to content

Commit

Permalink
Preserve the assumption cache more often
Browse files Browse the repository at this point in the history
We were clearing it out in LoopUnswitch and InlineFunction instead of
attempting to preserve it.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@278860 91177308-0d34-0410-b5e6-96231b3b80d8
  • Loading branch information
majnemer committed Aug 16, 2016
1 parent 764b87b commit 4184eb5
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 18 deletions.
13 changes: 7 additions & 6 deletions lib/Transforms/Scalar/LoopUnswitch.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1078,10 +1078,6 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
F->getBasicBlockList(),
NewBlocks[0]->getIterator(), F->end());

// FIXME: We could register any cloned assumptions instead of clearing the
// whole function's cache.
AC->clear();

// Now we create the new Loop object for the versioned loop.
Loop *NewLoop = CloneLoop(L, L->getParentLoop(), VMap, LI, LPM);

Expand Down Expand Up @@ -1131,10 +1127,15 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val,
}

// Rewrite the code to refer to itself.
for (unsigned i = 0, e = NewBlocks.size(); i != e; ++i)
for (Instruction &I : *NewBlocks[i])
for (unsigned i = 0, e = NewBlocks.size(); i != e; ++i) {
for (Instruction &I : *NewBlocks[i]) {
RemapInstruction(&I, VMap,
RF_NoModuleLevelChanges | RF_IgnoreMissingLocals);
if (auto *II = dyn_cast<IntrinsicInst>(&I))
if (II->getIntrinsicID() == Intrinsic::assume)
AC->registerAssumption(II);
}
}

// Rewrite the original preheader to select between versions of the loop.
BranchInst *OldBR = cast<BranchInst>(loopPreheader->getTerminator());
Expand Down
34 changes: 22 additions & 12 deletions lib/Transforms/Utils/InlineFunction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1055,6 +1055,9 @@ static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
if (!PreserveAlignmentAssumptions)
return;
AssumptionCache *AC = IFI.GetAssumptionCache
? &(*IFI.GetAssumptionCache)(*CS.getCaller())
: nullptr;
auto &DL = CS.getCaller()->getParent()->getDataLayout();

// To avoid inserting redundant assumptions, we should check for assumptions
Expand All @@ -1077,13 +1080,13 @@ static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
// If we can already prove the asserted alignment in the context of the
// caller, then don't bother inserting the assumption.
Value *Arg = CS.getArgument(I->getArgNo());
if (getKnownAlignment(Arg, DL, CS.getInstruction(),
&(*IFI.GetAssumptionCache)(*CS.getCaller()),
&DT) >= Align)
if (getKnownAlignment(Arg, DL, CS.getInstruction(), AC, &DT) >= Align)
continue;

IRBuilder<>(CS.getInstruction())
.CreateAlignmentAssumption(DL, Arg, Align);
CallInst *NewAssumption = IRBuilder<>(CS.getInstruction())
.CreateAlignmentAssumption(DL, Arg, Align);
if (AC)
AC->registerAssumption(NewAssumption);
}
}
}
Expand Down Expand Up @@ -1194,12 +1197,13 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
return Arg;

AssumptionCache *AC =
IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
const DataLayout &DL = Caller->getParent()->getDataLayout();

// If the pointer is already known to be sufficiently aligned, or if we can
// round it up to a larger alignment, then we don't need a temporary.
if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall,
&(*IFI.GetAssumptionCache)(*Caller)) >=
if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >=
ByValAlignment)
return Arg;

Expand Down Expand Up @@ -1609,10 +1613,15 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// Propagate llvm.mem.parallel_loop_access if necessary.
PropagateParallelLoopAccessMetadata(CS, VMap);

// FIXME: We could register any cloned assumptions instead of clearing the
// whole function's cache.
// Register any cloned assumptions.
if (IFI.GetAssumptionCache)
(*IFI.GetAssumptionCache)(*Caller).clear();
for (BasicBlock &NewBlock :
make_range(FirstNewBlock->getIterator(), Caller->end()))
for (Instruction &I : NewBlock) {
if (auto *II = dyn_cast<IntrinsicInst>(&I))
if (II->getIntrinsicID() == Intrinsic::assume)
(*IFI.GetAssumptionCache)(*Caller).registerAssumption(II);
}
}

// If there are any alloca instructions in the block that used to be the entry
Expand Down Expand Up @@ -2130,9 +2139,10 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// the entries are the same or undef). If so, remove the PHI so it doesn't
// block other optimizations.
if (PHI) {
AssumptionCache *AC =
IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
auto &DL = Caller->getParent()->getDataLayout();
if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr,
&(*IFI.GetAssumptionCache)(*Caller))) {
if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr, AC)) {
PHI->replaceAllUsesWith(V);
PHI->eraseFromParent();
}
Expand Down

0 comments on commit 4184eb5

Please sign in to comment.