Preserve the assumption cache more often

We were clearing it out in LoopUnswitch and InlineFunction instead of
attempting to preserve it.

llvm-svn: 278860
This commit is contained in:
David Majnemer
2016-08-16 22:07:32 +00:00
parent e47df1ac62
commit 744a8753db
2 changed files with 29 additions and 18 deletions

View File

@@ -1055,6 +1055,9 @@ static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
if (!PreserveAlignmentAssumptions)
return;
AssumptionCache *AC = IFI.GetAssumptionCache
? &(*IFI.GetAssumptionCache)(*CS.getCaller())
: nullptr;
auto &DL = CS.getCaller()->getParent()->getDataLayout();
// To avoid inserting redundant assumptions, we should check for assumptions
@@ -1077,13 +1080,13 @@ static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
// If we can already prove the asserted alignment in the context of the
// caller, then don't bother inserting the assumption.
Value *Arg = CS.getArgument(I->getArgNo());
if (getKnownAlignment(Arg, DL, CS.getInstruction(),
&(*IFI.GetAssumptionCache)(*CS.getCaller()),
&DT) >= Align)
if (getKnownAlignment(Arg, DL, CS.getInstruction(), AC, &DT) >= Align)
continue;
IRBuilder<>(CS.getInstruction())
.CreateAlignmentAssumption(DL, Arg, Align);
CallInst *NewAssumption = IRBuilder<>(CS.getInstruction())
.CreateAlignmentAssumption(DL, Arg, Align);
if (AC)
AC->registerAssumption(NewAssumption);
}
}
}
@@ -1194,12 +1197,13 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
return Arg;
AssumptionCache *AC =
IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
const DataLayout &DL = Caller->getParent()->getDataLayout();
// If the pointer is already known to be sufficiently aligned, or if we can
// round it up to a larger alignment, then we don't need a temporary.
if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall,
&(*IFI.GetAssumptionCache)(*Caller)) >=
if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall, AC) >=
ByValAlignment)
return Arg;
@@ -1609,10 +1613,15 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// Propagate llvm.mem.parallel_loop_access if necessary.
PropagateParallelLoopAccessMetadata(CS, VMap);
// FIXME: We could register any cloned assumptions instead of clearing the
// whole function's cache.
// Register any cloned assumptions.
if (IFI.GetAssumptionCache)
(*IFI.GetAssumptionCache)(*Caller).clear();
for (BasicBlock &NewBlock :
make_range(FirstNewBlock->getIterator(), Caller->end()))
for (Instruction &I : NewBlock) {
if (auto *II = dyn_cast<IntrinsicInst>(&I))
if (II->getIntrinsicID() == Intrinsic::assume)
(*IFI.GetAssumptionCache)(*Caller).registerAssumption(II);
}
}
// If there are any alloca instructions in the block that used to be the entry
@@ -2130,9 +2139,10 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
// the entries are the same or undef). If so, remove the PHI so it doesn't
// block other optimizations.
if (PHI) {
AssumptionCache *AC =
IFI.GetAssumptionCache ? &(*IFI.GetAssumptionCache)(*Caller) : nullptr;
auto &DL = Caller->getParent()->getDataLayout();
if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr,
&(*IFI.GetAssumptionCache)(*Caller))) {
if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr, AC)) {
PHI->replaceAllUsesWith(V);
PHI->eraseFromParent();
}