[SLP] Revert everything that has to do with memory access sorting.
This reverts r293386, r294027, r294029 and r296411. Turns out the SLP tree isn't actually a "tree" and we don't handle accessing the same packet of loads in several different orders well, causing miscompiles. Revert until we can fix this properly. llvm-svn: 297493
This commit is contained in:
@@ -428,10 +428,8 @@ private:
|
||||
/// be vectorized to use the original vector (or aggregate "bitcast" to a vector).
|
||||
bool canReuseExtract(ArrayRef<Value *> VL, unsigned Opcode) const;
|
||||
|
||||
/// Vectorize a single entry in the tree. VL icontains all isomorphic scalars
|
||||
/// in order of its usage in a user program, for example ADD1, ADD2 and so on
|
||||
/// or LOAD1 , LOAD2 etc.
|
||||
Value *vectorizeTree(ArrayRef<Value *> VL, TreeEntry *E);
|
||||
/// Vectorize a single entry in the tree.
|
||||
Value *vectorizeTree(TreeEntry *E);
|
||||
|
||||
/// Vectorize a single entry in the tree, starting in \p VL.
|
||||
Value *vectorizeTree(ArrayRef<Value *> VL);
|
||||
@@ -473,7 +471,7 @@ private:
|
||||
struct TreeEntry {
|
||||
TreeEntry(std::vector<TreeEntry> &Container)
|
||||
: Scalars(), VectorizedValue(nullptr), NeedToGather(0),
|
||||
NeedToShuffle(0), Container(Container) {}
|
||||
Container(Container) {}
|
||||
|
||||
/// \returns true if the scalars in VL are equal to this entry.
|
||||
bool isSame(ArrayRef<Value *> VL) const {
|
||||
@@ -481,17 +479,6 @@ private:
|
||||
return std::equal(VL.begin(), VL.end(), Scalars.begin());
|
||||
}
|
||||
|
||||
/// \returns true if the scalars in VL are found in this tree entry.
|
||||
bool isFoundJumbled(ArrayRef<Value *> VL, const DataLayout &DL,
|
||||
ScalarEvolution &SE) const {
|
||||
assert(VL.size() == Scalars.size() && "Invalid size");
|
||||
SmallVector<Value *, 8> List;
|
||||
if (!sortMemAccesses(VL, DL, SE, List))
|
||||
return false;
|
||||
|
||||
return std::equal(List.begin(), List.end(), Scalars.begin());
|
||||
}
|
||||
|
||||
/// A vector of scalars.
|
||||
ValueList Scalars;
|
||||
|
||||
@@ -501,9 +488,6 @@ private:
|
||||
/// Do we need to gather this sequence ?
|
||||
bool NeedToGather;
|
||||
|
||||
/// Do we need to shuffle the load ?
|
||||
bool NeedToShuffle;
|
||||
|
||||
/// Points back to the VectorizableTree.
|
||||
///
|
||||
/// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has
|
||||
@@ -519,13 +503,12 @@ private:
|
||||
|
||||
/// Create a new VectorizableTree entry.
|
||||
TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized,
|
||||
bool NeedToShuffle, int &UserTreeIdx) {
|
||||
int &UserTreeIdx) {
|
||||
VectorizableTree.emplace_back(VectorizableTree);
|
||||
int idx = VectorizableTree.size() - 1;
|
||||
TreeEntry *Last = &VectorizableTree[idx];
|
||||
Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end());
|
||||
Last->NeedToGather = !Vectorized;
|
||||
Last->NeedToShuffle = NeedToShuffle;
|
||||
if (Vectorized) {
|
||||
for (int i = 0, e = VL.size(); i != e; ++i) {
|
||||
assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!");
|
||||
@@ -1129,21 +1112,21 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
|
||||
if (Depth == RecursionMaxDepth) {
|
||||
DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
|
||||
// Don't handle vectors.
|
||||
if (VL[0]->getType()->isVectorTy()) {
|
||||
DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
|
||||
if (SI->getValueOperand()->getType()->isVectorTy()) {
|
||||
DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
unsigned Opcode = getSameOpcode(VL);
|
||||
@@ -1160,7 +1143,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
// If all of the operands are identical or constant we have a simple solution.
|
||||
if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !Opcode) {
|
||||
DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1172,7 +1155,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
if (EphValues.count(VL[i])) {
|
||||
DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
|
||||
") is ephemeral.\n");
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -1185,7 +1168,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n");
|
||||
if (E->Scalars[i] != VL[i]) {
|
||||
DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -1201,7 +1184,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
if (ScalarToTreeEntry.count(VL[i])) {
|
||||
DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
|
||||
") is already in tree.\n");
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -1211,7 +1194,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
for (unsigned i = 0, e = VL.size(); i != e; ++i) {
|
||||
if (MustGather.count(VL[i])) {
|
||||
DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -1225,7 +1208,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
// Don't go into unreachable blocks. They may contain instructions with
|
||||
// dependency cycles which confuse the final scheduling.
|
||||
DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1234,7 +1217,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
for (unsigned j = i+1; j < e; ++j)
|
||||
if (VL[i] == VL[j]) {
|
||||
DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -1249,7 +1232,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
assert((!BS.getScheduleData(VL[0]) ||
|
||||
!BS.getScheduleData(VL[0])->isPartOfBundle()) &&
|
||||
"tryScheduleBundle should cancelScheduling on failure");
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
|
||||
@@ -1266,12 +1249,12 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
if (Term) {
|
||||
DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n");
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
newTreeEntry(VL, true, false, UserTreeIdx);
|
||||
newTreeEntry(VL, true, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
|
||||
|
||||
for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
|
||||
@@ -1293,7 +1276,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
} else {
|
||||
BS.cancelScheduling(VL);
|
||||
}
|
||||
newTreeEntry(VL, Reuse, false, UserTreeIdx);
|
||||
newTreeEntry(VL, Reuse, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
case Instruction::Load: {
|
||||
@@ -1309,7 +1292,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
if (DL->getTypeSizeInBits(ScalarTy) !=
|
||||
DL->getTypeAllocSizeInBits(ScalarTy)) {
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
|
||||
return;
|
||||
}
|
||||
@@ -1320,13 +1303,15 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
LoadInst *L = cast<LoadInst>(VL[i]);
|
||||
if (!L->isSimple()) {
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the loads are consecutive, reversed, or neither.
|
||||
// TODO: What we really want is to sort the loads, but for now, check
|
||||
// the two likely directions.
|
||||
bool Consecutive = true;
|
||||
bool ReverseConsecutive = true;
|
||||
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
|
||||
@@ -1340,7 +1325,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
|
||||
if (Consecutive) {
|
||||
++NumLoadsWantToKeepOrder;
|
||||
newTreeEntry(VL, true, false, UserTreeIdx);
|
||||
newTreeEntry(VL, true, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: added a vector of loads.\n");
|
||||
return;
|
||||
}
|
||||
@@ -1354,26 +1339,8 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
break;
|
||||
}
|
||||
|
||||
if (VL.size() > 2 && !ReverseConsecutive) {
|
||||
bool ShuffledLoads = true;
|
||||
SmallVector<Value *, 8> Sorted;
|
||||
if (sortMemAccesses(VL, *DL, *SE, Sorted)) {
|
||||
auto NewVL = makeArrayRef(Sorted.begin(), Sorted.end());
|
||||
for (unsigned i = 0, e = NewVL.size() - 1; i < e; ++i) {
|
||||
if (!isConsecutiveAccess(NewVL[i], NewVL[i + 1], *DL, *SE)) {
|
||||
ShuffledLoads = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (ShuffledLoads) {
|
||||
newTreeEntry(NewVL, true, true, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
|
||||
if (ReverseConsecutive) {
|
||||
++NumLoadsWantToChangeOrder;
|
||||
@@ -1396,16 +1363,16 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
case Instruction::FPTrunc:
|
||||
case Instruction::BitCast: {
|
||||
Type *SrcTy = VL0->getOperand(0)->getType();
|
||||
for (Value *Val : VL) {
|
||||
Type *Ty = cast<Instruction>(Val)->getOperand(0)->getType();
|
||||
for (unsigned i = 0; i < VL.size(); ++i) {
|
||||
Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType();
|
||||
if (Ty != SrcTy || !isValidElementType(Ty)) {
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
newTreeEntry(VL, true, false, UserTreeIdx);
|
||||
newTreeEntry(VL, true, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: added a vector of casts.\n");
|
||||
|
||||
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
|
||||
@@ -1428,13 +1395,13 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
if (Cmp->getPredicate() != P0 ||
|
||||
Cmp->getOperand(0)->getType() != ComparedTy) {
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
newTreeEntry(VL, true, false, UserTreeIdx);
|
||||
newTreeEntry(VL, true, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: added a vector of compares.\n");
|
||||
|
||||
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
|
||||
@@ -1466,7 +1433,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
case Instruction::And:
|
||||
case Instruction::Or:
|
||||
case Instruction::Xor: {
|
||||
newTreeEntry(VL, true, false, UserTreeIdx);
|
||||
newTreeEntry(VL, true, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: added a vector of bin op.\n");
|
||||
|
||||
// Sort operands of the instructions so that each side is more likely to
|
||||
@@ -1491,11 +1458,11 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
}
|
||||
case Instruction::GetElementPtr: {
|
||||
// We don't combine GEPs with complicated (nested) indexing.
|
||||
for (Value *Val : VL) {
|
||||
if (cast<Instruction>(Val)->getNumOperands() != 2) {
|
||||
for (unsigned j = 0; j < VL.size(); ++j) {
|
||||
if (cast<Instruction>(VL[j])->getNumOperands() != 2) {
|
||||
DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -1503,29 +1470,29 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
// We can't combine several GEPs into one vector if they operate on
|
||||
// different types.
|
||||
Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType();
|
||||
for (Value *Val : VL) {
|
||||
Type *CurTy = cast<Instruction>(Val)->getOperand(0)->getType();
|
||||
for (unsigned j = 0; j < VL.size(); ++j) {
|
||||
Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType();
|
||||
if (Ty0 != CurTy) {
|
||||
DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n");
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// We don't combine GEPs with non-constant indexes.
|
||||
for (Value *Val : VL) {
|
||||
auto Op = cast<Instruction>(Val)->getOperand(1);
|
||||
for (unsigned j = 0; j < VL.size(); ++j) {
|
||||
auto Op = cast<Instruction>(VL[j])->getOperand(1);
|
||||
if (!isa<ConstantInt>(Op)) {
|
||||
DEBUG(
|
||||
dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n");
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
newTreeEntry(VL, true, false, UserTreeIdx);
|
||||
newTreeEntry(VL, true, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
|
||||
for (unsigned i = 0, e = 2; i < e; ++i) {
|
||||
ValueList Operands;
|
||||
@@ -1542,12 +1509,12 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
|
||||
if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
newTreeEntry(VL, true, false, UserTreeIdx);
|
||||
newTreeEntry(VL, true, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: added a vector of stores.\n");
|
||||
|
||||
ValueList Operands;
|
||||
@@ -1565,7 +1532,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
|
||||
if (!isTriviallyVectorizable(ID)) {
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
|
||||
return;
|
||||
}
|
||||
@@ -1579,7 +1546,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
|
||||
!CI->hasIdenticalOperandBundleSchema(*CI2)) {
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i]
|
||||
<< "\n");
|
||||
return;
|
||||
@@ -1590,7 +1557,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
Value *A1J = CI2->getArgOperand(1);
|
||||
if (A1I != A1J) {
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
|
||||
<< " argument "<< A1I<<"!=" << A1J
|
||||
<< "\n");
|
||||
@@ -1603,14 +1570,14 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
CI->op_begin() + CI->getBundleOperandsEndIndex(),
|
||||
CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!="
|
||||
<< *VL[i] << '\n');
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
newTreeEntry(VL, true, false, UserTreeIdx);
|
||||
newTreeEntry(VL, true, UserTreeIdx);
|
||||
for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
|
||||
ValueList Operands;
|
||||
// Prepare the operand vector.
|
||||
@@ -1627,11 +1594,11 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
// then do not vectorize this instruction.
|
||||
if (!isAltShuffle) {
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
|
||||
return;
|
||||
}
|
||||
newTreeEntry(VL, true, false, UserTreeIdx);
|
||||
newTreeEntry(VL, true, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
|
||||
|
||||
// Reorder operands if reordering would enable vectorization.
|
||||
@@ -1655,7 +1622,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
|
||||
}
|
||||
default:
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false, false, UserTreeIdx);
|
||||
newTreeEntry(VL, false, UserTreeIdx);
|
||||
DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
|
||||
return;
|
||||
}
|
||||
@@ -1894,10 +1861,6 @@ int BoUpSLP::getEntryCost(TreeEntry *E) {
|
||||
TTI->getMemoryOpCost(Instruction::Load, ScalarTy, alignment, 0);
|
||||
int VecLdCost = TTI->getMemoryOpCost(Instruction::Load,
|
||||
VecTy, alignment, 0);
|
||||
if (E->NeedToShuffle) {
|
||||
VecLdCost += TTI->getShuffleCost(
|
||||
TargetTransformInfo::SK_PermuteSingleSrc, VecTy, 0);
|
||||
}
|
||||
return VecLdCost - ScalarLdCost;
|
||||
}
|
||||
case Instruction::Store: {
|
||||
@@ -2469,8 +2432,8 @@ Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
|
||||
if (ScalarToTreeEntry.count(VL[0])) {
|
||||
int Idx = ScalarToTreeEntry[VL[0]];
|
||||
TreeEntry *E = &VectorizableTree[Idx];
|
||||
if (E->isSame(VL) || (E->NeedToShuffle && E->isFoundJumbled(VL, *DL, *SE)))
|
||||
return vectorizeTree(VL, E);
|
||||
if (E->isSame(VL))
|
||||
return vectorizeTree(E);
|
||||
}
|
||||
|
||||
Type *ScalarTy = VL[0]->getType();
|
||||
@@ -2481,10 +2444,10 @@ Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
|
||||
return Gather(VL, VecTy);
|
||||
}
|
||||
|
||||
Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL, TreeEntry *E) {
|
||||
Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
|
||||
IRBuilder<>::InsertPointGuard Guard(Builder);
|
||||
|
||||
if (E->VectorizedValue && !E->NeedToShuffle) {
|
||||
if (E->VectorizedValue) {
|
||||
DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
|
||||
return E->VectorizedValue;
|
||||
}
|
||||
@@ -2718,35 +2681,7 @@ Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL, TreeEntry *E) {
|
||||
LI->setAlignment(Alignment);
|
||||
E->VectorizedValue = LI;
|
||||
++NumVectorInstructions;
|
||||
propagateMetadata(LI, E->Scalars);
|
||||
|
||||
// As program order of scalar loads are jumbled, the vectorized 'load'
|
||||
// must be followed by a 'shuffle' with the required jumbled mask.
|
||||
if (!VL.empty() && (E->NeedToShuffle)) {
|
||||
assert(VL.size() == E->Scalars.size() &&
|
||||
"Equal number of scalars expected");
|
||||
SmallVector<Constant *, 8> Mask;
|
||||
for (Value *Val : VL) {
|
||||
if (ScalarToTreeEntry.count(Val)) {
|
||||
int Idx = ScalarToTreeEntry[Val];
|
||||
TreeEntry *E = &VectorizableTree[Idx];
|
||||
for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) {
|
||||
if (E->Scalars[Lane] == Val) {
|
||||
Mask.push_back(Builder.getInt32(Lane));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Generate shuffle for jumbled memory access
|
||||
Value *Undef = UndefValue::get(VecTy);
|
||||
Value *Shuf = Builder.CreateShuffleVector((Value *)LI, Undef,
|
||||
ConstantVector::get(Mask));
|
||||
return Shuf;
|
||||
}
|
||||
|
||||
return LI;
|
||||
return propagateMetadata(LI, E->Scalars);
|
||||
}
|
||||
case Instruction::Store: {
|
||||
StoreInst *SI = cast<StoreInst>(VL0);
|
||||
@@ -2927,7 +2862,7 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
|
||||
}
|
||||
|
||||
Builder.SetInsertPoint(&F->getEntryBlock().front());
|
||||
auto *VectorRoot = vectorizeTree(ArrayRef<Value *>(), &VectorizableTree[0]);
|
||||
auto *VectorRoot = vectorizeTree(&VectorizableTree[0]);
|
||||
|
||||
// If the vectorized tree can be rewritten in a smaller type, we truncate the
|
||||
// vectorized root. InstCombine will then rewrite the entire expression. We
|
||||
|
||||
Reference in New Issue
Block a user