[SLP] Vectorize jumbled memory loads.

Summary:
This patch tries to vectorize loads of consecutive memory accesses, accessed
in non-consecutive or jumbled way. An earlier attempt was made with patch D26905
which was reverted back due to some basic issue with representing the 'use mask' of
jumbled accesses.

This patch fixes the mask representation by recording the 'use mask' in the usertree entry.

Change-Id: I9fe7f5045f065d84c126fa307ef6ebe0787296df

Reviewers: mkuper, loladiro, Ayal, zvi, danielcdh

Reviewed By: Ayal

Subscribers: hans, mzolotukhin

Differential Revision: https://reviews.llvm.org/D36130

llvm-svn: 314806
This commit is contained in:
Mohammad Shahid 2017-10-03 15:28:48 +00:00
parent 80a5a5c1e0
commit 9bab937b54
7 changed files with 373 additions and 136 deletions

View File

@ -667,6 +667,21 @@ int64_t getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, const Loop *Lp,
const ValueToValueMap &StridesMap = ValueToValueMap(), const ValueToValueMap &StridesMap = ValueToValueMap(),
bool Assume = false, bool ShouldCheckWrap = true); bool Assume = false, bool ShouldCheckWrap = true);
/// \brief Attempt to sort the 'loads' in \p VL and return the sorted values in
/// \p Sorted.
///
/// Returns 'false' if sorting is not legal or feasible, otherwise returns
/// 'true'. If \p Mask is not null, it also returns the \p Mask which is the
/// shuffle mask for actual memory access order.
///
/// For example, for a given VL of memory accesses in program order, a[i+2],
/// a[i+0], a[i+1] and a[i+3], this function will sort the VL and save the
/// sorted value in 'Sorted' as a[i+0], a[i+1], a[i+2], a[i+3] and saves the
/// mask for actual memory accesses in program order in 'Mask' as <2,0,1,3>
bool sortLoadAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
ScalarEvolution &SE, SmallVectorImpl<Value *> &Sorted,
SmallVectorImpl<unsigned> *Mask = nullptr);
/// \brief Returns true if the memory operations \p A and \p B are consecutive. /// \brief Returns true if the memory operations \p A and \p B are consecutive.
/// This is a simple API that does not depend on the analysis pass. /// This is a simple API that does not depend on the analysis pass.
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,

View File

@ -1107,6 +1107,77 @@ static unsigned getAddressSpaceOperand(Value *I) {
return -1; return -1;
} }
// TODO:This API can be improved by using the permutation of given width as the
// accesses are entered into the map.
bool llvm::sortLoadAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
ScalarEvolution &SE,
SmallVectorImpl<Value *> &Sorted,
SmallVectorImpl<unsigned> *Mask) {
SmallVector<std::pair<int64_t, Value *>, 4> OffValPairs;
OffValPairs.reserve(VL.size());
Sorted.reserve(VL.size());
// Walk over the pointers, and map each of them to an offset relative to
// first pointer in the array.
Value *Ptr0 = getPointerOperand(VL[0]);
const SCEV *Scev0 = SE.getSCEV(Ptr0);
Value *Obj0 = GetUnderlyingObject(Ptr0, DL);
PointerType *PtrTy = dyn_cast<PointerType>(Ptr0->getType());
uint64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
for (auto *Val : VL) {
// The only kind of access we care about here is load.
if (!isa<LoadInst>(Val))
return false;
Value *Ptr = getPointerOperand(Val);
assert(Ptr && "Expected value to have a pointer operand.");
// If a pointer refers to a different underlying object, bail - the
// pointers are by definition incomparable.
Value *CurrObj = GetUnderlyingObject(Ptr, DL);
if (CurrObj != Obj0)
return false;
const SCEVConstant *Diff =
dyn_cast<SCEVConstant>(SE.getMinusSCEV(SE.getSCEV(Ptr), Scev0));
// The pointers may not have a constant offset from each other, or SCEV
// may just not be smart enough to figure out they do. Regardless,
// there's nothing we can do.
if (!Diff || static_cast<unsigned>(Diff->getAPInt().abs().getSExtValue()) >
(VL.size() - 1) * Size)
return false;
OffValPairs.emplace_back(Diff->getAPInt().getSExtValue(), Val);
}
SmallVector<unsigned, 4> UseOrder(VL.size());
for (unsigned i = 0; i < VL.size(); i++) {
UseOrder[i] = i;
}
// Sort the memory accesses and keep the order of their uses in UseOrder.
std::sort(UseOrder.begin(), UseOrder.end(),
[&OffValPairs](unsigned Left, unsigned Right) {
return OffValPairs[Left].first < OffValPairs[Right].first;
});
for (unsigned i = 0; i < VL.size(); i++)
Sorted.emplace_back(OffValPairs[UseOrder[i]].second);
// Sort UseOrder to compute the Mask.
if (Mask) {
Mask->reserve(VL.size());
for (unsigned i = 0; i < VL.size(); i++)
Mask->emplace_back(i);
std::sort(Mask->begin(), Mask->end(),
[&UseOrder](unsigned Left, unsigned Right) {
return UseOrder[Left] < UseOrder[Right];
});
}
return true;
}
/// Returns true if the memory operations \p A and \p B are consecutive. /// Returns true if the memory operations \p A and \p B are consecutive.
bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL, bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
ScalarEvolution &SE, bool CheckType) { ScalarEvolution &SE, bool CheckType) {

View File

@ -637,17 +637,23 @@ private:
int getEntryCost(TreeEntry *E); int getEntryCost(TreeEntry *E);
/// This is the recursive part of buildTree. /// This is the recursive part of buildTree.
void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, int); void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth, int UserIndx = -1,
int OpdNum = 0);
/// \returns True if the ExtractElement/ExtractValue instructions in VL can /// \returns True if the ExtractElement/ExtractValue instructions in VL can
/// be vectorized to use the original vector (or aggregate "bitcast" to a vector). /// be vectorized to use the original vector (or aggregate "bitcast" to a vector).
bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue) const; bool canReuseExtract(ArrayRef<Value *> VL, Value *OpValue) const;
/// Vectorize a single entry in the tree. /// Vectorize a single entry in the tree.\p OpdNum indicate the ordinality of
Value *vectorizeTree(TreeEntry *E); /// operand corrsponding to this tree entry \p E for the user tree entry
/// indicated by \p UserIndx.
// In other words, "E == TreeEntry[UserIndx].getOperand(OpdNum)".
Value *vectorizeTree(TreeEntry *E, int OpdNum = 0, int UserIndx = -1);
/// Vectorize a single entry in the tree, starting in \p VL. /// Vectorize a single entry in the tree, starting in \p VL.\p OpdNum indicate
Value *vectorizeTree(ArrayRef<Value *> VL); /// the ordinality of operand corrsponding to the \p VL of scalar values for the
/// user indicated by \p UserIndx this \p VL feeds into.
Value *vectorizeTree(ArrayRef<Value *> VL, int OpdNum = 0, int UserIndx = -1);
/// \returns the pointer to the vectorized value if \p VL is already /// \returns the pointer to the vectorized value if \p VL is already
/// vectorized, or NULL. They may happen in cycles. /// vectorized, or NULL. They may happen in cycles.
@ -685,7 +691,7 @@ private:
SmallVectorImpl<Value *> &Left, SmallVectorImpl<Value *> &Left,
SmallVectorImpl<Value *> &Right); SmallVectorImpl<Value *> &Right);
struct TreeEntry { struct TreeEntry {
TreeEntry(std::vector<TreeEntry> &Container) : Container(Container) {} TreeEntry(std::vector<TreeEntry> &Container) : ShuffleMask(), Container(Container) {}
/// \returns true if the scalars in VL are equal to this entry. /// \returns true if the scalars in VL are equal to this entry.
bool isSame(ArrayRef<Value *> VL) const { bool isSame(ArrayRef<Value *> VL) const {
@ -693,6 +699,16 @@ private:
return std::equal(VL.begin(), VL.end(), Scalars.begin()); return std::equal(VL.begin(), VL.end(), Scalars.begin());
} }
/// \returns true if the scalars in VL are found in this tree entry.
bool isFoundJumbled(ArrayRef<Value *> VL, const DataLayout &DL,
ScalarEvolution &SE) const {
assert(VL.size() == Scalars.size() && "Invalid size");
SmallVector<Value *, 8> List;
if (!sortLoadAccesses(VL, DL, SE, List))
return false;
return std::equal(List.begin(), List.end(), Scalars.begin());
}
/// A vector of scalars. /// A vector of scalars.
ValueList Scalars; ValueList Scalars;
@ -702,6 +718,14 @@ private:
/// Do we need to gather this sequence ? /// Do we need to gather this sequence ?
bool NeedToGather = false; bool NeedToGather = false;
/// Records optional shuffle mask for the uses of jumbled memory accesses.
/// For example, a non-empty ShuffleMask[1] represents the permutation of
/// lanes that operand #1 of this vectorized instruction should undergo
/// before feeding this vectorized instruction, whereas an empty
/// ShuffleMask[0] indicates that the lanes of operand #0 of this vectorized
/// instruction need not be permuted at all.
SmallVector<unsigned, 4> ShuffleMask[3];
/// Points back to the VectorizableTree. /// Points back to the VectorizableTree.
/// ///
/// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has /// Only used for Graphviz right now. Unfortunately GraphTrait::NodeRef has
@ -717,12 +741,25 @@ private:
/// Create a new VectorizableTree entry. /// Create a new VectorizableTree entry.
TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized, TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized,
int &UserTreeIdx) { int &UserTreeIdx, const InstructionsState &S,
ArrayRef<unsigned> ShuffleMask = None,
int OpdNum = 0) {
assert((!Vectorized || S.Opcode != 0) &&
"Vectorized TreeEntry without opcode");
VectorizableTree.emplace_back(VectorizableTree); VectorizableTree.emplace_back(VectorizableTree);
int idx = VectorizableTree.size() - 1; int idx = VectorizableTree.size() - 1;
TreeEntry *Last = &VectorizableTree[idx]; TreeEntry *Last = &VectorizableTree[idx];
Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end()); Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end());
Last->NeedToGather = !Vectorized; Last->NeedToGather = !Vectorized;
TreeEntry *UserEntry = &VectorizableTree[UserTreeIdx];
if (!ShuffleMask.empty()) {
assert(UserEntry->ShuffleMask[OpdNum].empty() && "Mask already present!");
UserEntry->ShuffleMask[OpdNum].insert(
UserEntry->ShuffleMask[OpdNum].begin(), ShuffleMask.begin(),
ShuffleMask.end());
}
if (Vectorized) { if (Vectorized) {
for (int i = 0, e = VL.size(); i != e; ++i) { for (int i = 0, e = VL.size(); i != e; ++i) {
assert(!getTreeEntry(VL[i]) && "Scalar already in tree!"); assert(!getTreeEntry(VL[i]) && "Scalar already in tree!");
@ -1373,34 +1410,34 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
} }
void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth, void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
int UserTreeIdx) { int UserTreeIdx, int OpdNum) {
assert((allConstant(VL) || allSameType(VL)) && "Invalid types!"); assert((allConstant(VL) || allSameType(VL)) && "Invalid types!");
InstructionsState S = getSameOpcode(VL); InstructionsState S = getSameOpcode(VL);
if (Depth == RecursionMaxDepth) { if (Depth == RecursionMaxDepth) {
DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n"); DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
// Don't handle vectors. // Don't handle vectors.
if (S.OpValue->getType()->isVectorTy()) { if (S.OpValue->getType()->isVectorTy()) {
DEBUG(dbgs() << "SLP: Gathering due to vector type.\n"); DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue)) if (StoreInst *SI = dyn_cast<StoreInst>(S.OpValue))
if (SI->getValueOperand()->getType()->isVectorTy()) { if (SI->getValueOperand()->getType()->isVectorTy()) {
DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n"); DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
// If all of the operands are identical or constant we have a simple solution. // If all of the operands are identical or constant we have a simple solution.
if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.Opcode) { if (allConstant(VL) || isSplat(VL) || !allSameBlock(VL) || !S.Opcode) {
DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n"); DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
@ -1412,7 +1449,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (EphValues.count(VL[i])) { if (EphValues.count(VL[i])) {
DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
") is ephemeral.\n"); ") is ephemeral.\n");
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
} }
@ -1423,7 +1460,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n"); DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n");
if (E->Scalars[i] != VL[i]) { if (E->Scalars[i] != VL[i]) {
DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n"); DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
} }
@ -1442,7 +1479,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (getTreeEntry(I)) { if (getTreeEntry(I)) {
DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] << DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
") is already in tree.\n"); ") is already in tree.\n");
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
} }
@ -1452,7 +1489,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
for (unsigned i = 0, e = VL.size(); i != e; ++i) { for (unsigned i = 0, e = VL.size(); i != e; ++i) {
if (MustGather.count(VL[i])) { if (MustGather.count(VL[i])) {
DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n"); DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
} }
@ -1466,7 +1503,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
// Don't go into unreachable blocks. They may contain instructions with // Don't go into unreachable blocks. They may contain instructions with
// dependency cycles which confuse the final scheduling. // dependency cycles which confuse the final scheduling.
DEBUG(dbgs() << "SLP: bundle in unreachable block.\n"); DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
@ -1475,7 +1512,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
for (unsigned j = i + 1; j < e; ++j) for (unsigned j = i + 1; j < e; ++j)
if (VL[i] == VL[j]) { if (VL[i] == VL[j]) {
DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n"); DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
@ -1490,7 +1527,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
assert((!BS.getScheduleData(VL0) || assert((!BS.getScheduleData(VL0) ||
!BS.getScheduleData(VL0)->isPartOfBundle()) && !BS.getScheduleData(VL0)->isPartOfBundle()) &&
"tryScheduleBundle should cancelScheduling on failure"); "tryScheduleBundle should cancelScheduling on failure");
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n"); DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
@ -1509,12 +1546,12 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (Term) { if (Term) {
DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n"); DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n");
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
} }
newTreeEntry(VL, true, UserTreeIdx); newTreeEntry(VL, true, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n"); DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) { for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
@ -1524,7 +1561,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock( Operands.push_back(cast<PHINode>(j)->getIncomingValueForBlock(
PH->getIncomingBlock(i))); PH->getIncomingBlock(i)));
buildTree_rec(Operands, Depth + 1, UserTreeIdx); buildTree_rec(Operands, Depth + 1, UserTreeIdx, i);
} }
return; return;
} }
@ -1536,7 +1573,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
} else { } else {
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
} }
newTreeEntry(VL, Reuse, UserTreeIdx); newTreeEntry(VL, Reuse, UserTreeIdx, S);
return; return;
} }
case Instruction::Load: { case Instruction::Load: {
@ -1551,7 +1588,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (DL->getTypeSizeInBits(ScalarTy) != if (DL->getTypeSizeInBits(ScalarTy) !=
DL->getTypeAllocSizeInBits(ScalarTy)) { DL->getTypeAllocSizeInBits(ScalarTy)) {
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n"); DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
return; return;
} }
@ -1562,15 +1599,13 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
LoadInst *L = cast<LoadInst>(VL[i]); LoadInst *L = cast<LoadInst>(VL[i]);
if (!L->isSimple()) { if (!L->isSimple()) {
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n"); DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
return; return;
} }
} }
// Check if the loads are consecutive, reversed, or neither. // Check if the loads are consecutive, reversed, or neither.
// TODO: What we really want is to sort the loads, but for now, check
// the two likely directions.
bool Consecutive = true; bool Consecutive = true;
bool ReverseConsecutive = true; bool ReverseConsecutive = true;
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) { for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
@ -1584,7 +1619,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (Consecutive) { if (Consecutive) {
++NumLoadsWantToKeepOrder; ++NumLoadsWantToKeepOrder;
newTreeEntry(VL, true, UserTreeIdx); newTreeEntry(VL, true, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: added a vector of loads.\n"); DEBUG(dbgs() << "SLP: added a vector of loads.\n");
return; return;
} }
@ -1598,15 +1633,41 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
break; break;
} }
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx);
if (ReverseConsecutive) { if (ReverseConsecutive) {
++NumLoadsWantToChangeOrder;
DEBUG(dbgs() << "SLP: Gathering reversed loads.\n"); DEBUG(dbgs() << "SLP: Gathering reversed loads.\n");
} else { ++NumLoadsWantToChangeOrder;
DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n"); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, S);
return;
} }
if (VL.size() > 2) {
bool ShuffledLoads = true;
SmallVector<Value *, 8> Sorted;
SmallVector<unsigned, 4> Mask;
if (sortLoadAccesses(VL, *DL, *SE, Sorted, &Mask)) {
auto NewVL = makeArrayRef(Sorted.begin(), Sorted.end());
for (unsigned i = 0, e = NewVL.size() - 1; i < e; ++i) {
if (!isConsecutiveAccess(NewVL[i], NewVL[i + 1], *DL, *SE)) {
ShuffledLoads = false;
break;
}
}
// TODO: Tracking how many load wants to have arbitrary shuffled order
// would be usefull.
if (ShuffledLoads) {
DEBUG(dbgs() << "SLP: added a vector of loads which needs "
"permutation of loaded lanes.\n");
newTreeEntry(NewVL, true, UserTreeIdx, S,
makeArrayRef(Mask.begin(), Mask.end()), OpdNum);
return;
}
}
}
DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
case Instruction::ZExt: case Instruction::ZExt:
@ -1626,12 +1687,12 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType(); Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType();
if (Ty != SrcTy || !isValidElementType(Ty)) { if (Ty != SrcTy || !isValidElementType(Ty)) {
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n"); DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n");
return; return;
} }
} }
newTreeEntry(VL, true, UserTreeIdx); newTreeEntry(VL, true, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: added a vector of casts.\n"); DEBUG(dbgs() << "SLP: added a vector of casts.\n");
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
@ -1640,7 +1701,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
for (Value *j : VL) for (Value *j : VL)
Operands.push_back(cast<Instruction>(j)->getOperand(i)); Operands.push_back(cast<Instruction>(j)->getOperand(i));
buildTree_rec(Operands, Depth + 1, UserTreeIdx); buildTree_rec(Operands, Depth + 1, UserTreeIdx, i);
} }
return; return;
} }
@ -1654,13 +1715,13 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (Cmp->getPredicate() != P0 || if (Cmp->getPredicate() != P0 ||
Cmp->getOperand(0)->getType() != ComparedTy) { Cmp->getOperand(0)->getType() != ComparedTy) {
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n"); DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n");
return; return;
} }
} }
newTreeEntry(VL, true, UserTreeIdx); newTreeEntry(VL, true, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: added a vector of compares.\n"); DEBUG(dbgs() << "SLP: added a vector of compares.\n");
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) { for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
@ -1669,7 +1730,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
for (Value *j : VL) for (Value *j : VL)
Operands.push_back(cast<Instruction>(j)->getOperand(i)); Operands.push_back(cast<Instruction>(j)->getOperand(i));
buildTree_rec(Operands, Depth + 1, UserTreeIdx); buildTree_rec(Operands, Depth + 1, UserTreeIdx, i);
} }
return; return;
} }
@ -1692,7 +1753,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
case Instruction::And: case Instruction::And:
case Instruction::Or: case Instruction::Or:
case Instruction::Xor: case Instruction::Xor:
newTreeEntry(VL, true, UserTreeIdx); newTreeEntry(VL, true, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: added a vector of bin op.\n"); DEBUG(dbgs() << "SLP: added a vector of bin op.\n");
// Sort operands of the instructions so that each side is more likely to // Sort operands of the instructions so that each side is more likely to
@ -1701,7 +1762,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
ValueList Left, Right; ValueList Left, Right;
reorderInputsAccordingToOpcode(S.Opcode, VL, Left, Right); reorderInputsAccordingToOpcode(S.Opcode, VL, Left, Right);
buildTree_rec(Left, Depth + 1, UserTreeIdx); buildTree_rec(Left, Depth + 1, UserTreeIdx);
buildTree_rec(Right, Depth + 1, UserTreeIdx); buildTree_rec(Right, Depth + 1, UserTreeIdx, 1);
return; return;
} }
@ -1711,7 +1772,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
for (Value *j : VL) for (Value *j : VL)
Operands.push_back(cast<Instruction>(j)->getOperand(i)); Operands.push_back(cast<Instruction>(j)->getOperand(i));
buildTree_rec(Operands, Depth + 1, UserTreeIdx); buildTree_rec(Operands, Depth + 1, UserTreeIdx, i);
} }
return; return;
@ -1721,7 +1782,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (cast<Instruction>(VL[j])->getNumOperands() != 2) { if (cast<Instruction>(VL[j])->getNumOperands() != 2) {
DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n"); DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
} }
@ -1734,7 +1795,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
if (Ty0 != CurTy) { if (Ty0 != CurTy) {
DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n"); DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n");
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
} }
@ -1746,12 +1807,12 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
DEBUG( DEBUG(
dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n"); dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n");
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
return; return;
} }
} }
newTreeEntry(VL, true, UserTreeIdx); newTreeEntry(VL, true, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: added a vector of GEPs.\n"); DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
for (unsigned i = 0, e = 2; i < e; ++i) { for (unsigned i = 0, e = 2; i < e; ++i) {
ValueList Operands; ValueList Operands;
@ -1759,7 +1820,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
for (Value *j : VL) for (Value *j : VL)
Operands.push_back(cast<Instruction>(j)->getOperand(i)); Operands.push_back(cast<Instruction>(j)->getOperand(i));
buildTree_rec(Operands, Depth + 1, UserTreeIdx); buildTree_rec(Operands, Depth + 1, UserTreeIdx, i);
} }
return; return;
} }
@ -1768,12 +1829,12 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) { if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: Non-consecutive store.\n"); DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
return; return;
} }
newTreeEntry(VL, true, UserTreeIdx); newTreeEntry(VL, true, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: added a vector of stores.\n"); DEBUG(dbgs() << "SLP: added a vector of stores.\n");
ValueList Operands; ValueList Operands;
@ -1791,7 +1852,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI); Intrinsic::ID ID = getVectorIntrinsicIDForCall(CI, TLI);
if (!isTriviallyVectorizable(ID)) { if (!isTriviallyVectorizable(ID)) {
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: Non-vectorizable call.\n"); DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
return; return;
} }
@ -1805,7 +1866,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
getVectorIntrinsicIDForCall(CI2, TLI) != ID || getVectorIntrinsicIDForCall(CI2, TLI) != ID ||
!CI->hasIdenticalOperandBundleSchema(*CI2)) { !CI->hasIdenticalOperandBundleSchema(*CI2)) {
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i] DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i]
<< "\n"); << "\n");
return; return;
@ -1816,7 +1877,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
Value *A1J = CI2->getArgOperand(1); Value *A1J = CI2->getArgOperand(1);
if (A1I != A1J) { if (A1I != A1J) {
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
<< " argument "<< A1I<<"!=" << A1J << " argument "<< A1I<<"!=" << A1J
<< "\n"); << "\n");
@ -1829,14 +1890,14 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
CI->op_begin() + CI->getBundleOperandsEndIndex(), CI->op_begin() + CI->getBundleOperandsEndIndex(),
CI2->op_begin() + CI2->getBundleOperandsStartIndex())) { CI2->op_begin() + CI2->getBundleOperandsStartIndex())) {
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!=" DEBUG(dbgs() << "SLP: mismatched bundle operands in calls:" << *CI << "!="
<< *VL[i] << '\n'); << *VL[i] << '\n');
return; return;
} }
} }
newTreeEntry(VL, true, UserTreeIdx); newTreeEntry(VL, true, UserTreeIdx, S);
for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) { for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
ValueList Operands; ValueList Operands;
// Prepare the operand vector. // Prepare the operand vector.
@ -1844,7 +1905,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
CallInst *CI2 = dyn_cast<CallInst>(j); CallInst *CI2 = dyn_cast<CallInst>(j);
Operands.push_back(CI2->getArgOperand(i)); Operands.push_back(CI2->getArgOperand(i));
} }
buildTree_rec(Operands, Depth + 1, UserTreeIdx); buildTree_rec(Operands, Depth + 1, UserTreeIdx, i);
} }
return; return;
} }
@ -1853,11 +1914,11 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
// then do not vectorize this instruction. // then do not vectorize this instruction.
if (!S.IsAltShuffle) { if (!S.IsAltShuffle) {
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n"); DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
return; return;
} }
newTreeEntry(VL, true, UserTreeIdx); newTreeEntry(VL, true, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n"); DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
// Reorder operands if reordering would enable vectorization. // Reorder operands if reordering would enable vectorization.
@ -1865,7 +1926,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
ValueList Left, Right; ValueList Left, Right;
reorderAltShuffleOperands(S.Opcode, VL, Left, Right); reorderAltShuffleOperands(S.Opcode, VL, Left, Right);
buildTree_rec(Left, Depth + 1, UserTreeIdx); buildTree_rec(Left, Depth + 1, UserTreeIdx);
buildTree_rec(Right, Depth + 1, UserTreeIdx); buildTree_rec(Right, Depth + 1, UserTreeIdx, 1);
return; return;
} }
@ -1875,13 +1936,13 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth,
for (Value *j : VL) for (Value *j : VL)
Operands.push_back(cast<Instruction>(j)->getOperand(i)); Operands.push_back(cast<Instruction>(j)->getOperand(i));
buildTree_rec(Operands, Depth + 1, UserTreeIdx); buildTree_rec(Operands, Depth + 1, UserTreeIdx, i);
} }
return; return;
default: default:
BS.cancelScheduling(VL, VL0); BS.cancelScheduling(VL, VL0);
newTreeEntry(VL, false, UserTreeIdx); newTreeEntry(VL, false, UserTreeIdx, S);
DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n"); DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
return; return;
} }
@ -2719,12 +2780,15 @@ Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL, Value *OpValue) const {
return nullptr; return nullptr;
} }
Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) { Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL, int OpdNum, int UserIndx) {
InstructionsState S = getSameOpcode(VL); InstructionsState S = getSameOpcode(VL);
if (S.Opcode) { if (S.Opcode) {
if (TreeEntry *E = getTreeEntry(S.OpValue)) { if (TreeEntry *E = getTreeEntry(S.OpValue)) {
if (E->isSame(VL)) TreeEntry *UserTreeEntry = &VectorizableTree[UserIndx];
return vectorizeTree(E); if (E->isSame(VL) ||
(UserTreeEntry && !UserTreeEntry->ShuffleMask[OpdNum].empty() &&
E->isFoundJumbled(VL, *DL, *SE)))
return vectorizeTree(E, OpdNum, UserIndx);
} }
} }
@ -2736,9 +2800,10 @@ Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
return Gather(VL, VecTy); return Gather(VL, VecTy);
} }
Value *BoUpSLP::vectorizeTree(TreeEntry *E) { Value *BoUpSLP::vectorizeTree(TreeEntry *E, int OpdNum, int UserIndx) {
IRBuilder<>::InsertPointGuard Guard(Builder); IRBuilder<>::InsertPointGuard Guard(Builder);
TreeEntry *UserTreeEntry = nullptr;
if (E->VectorizedValue) { if (E->VectorizedValue) {
DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n"); DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
return E->VectorizedValue; return E->VectorizedValue;
@ -2758,6 +2823,10 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
return V; return V;
} }
assert(ScalarToTreeEntry.count(E->Scalars[0]) &&
"Expected user tree entry, missing!");
int CurrIndx = ScalarToTreeEntry[E->Scalars[0]];
unsigned ShuffleOrOp = S.IsAltShuffle ? unsigned ShuffleOrOp = S.IsAltShuffle ?
(unsigned) Instruction::ShuffleVector : S.Opcode; (unsigned) Instruction::ShuffleVector : S.Opcode;
switch (ShuffleOrOp) { switch (ShuffleOrOp) {
@ -2787,7 +2856,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
Builder.SetInsertPoint(IBB->getTerminator()); Builder.SetInsertPoint(IBB->getTerminator());
Builder.SetCurrentDebugLocation(PH->getDebugLoc()); Builder.SetCurrentDebugLocation(PH->getDebugLoc());
Value *Vec = vectorizeTree(Operands); Value *Vec = vectorizeTree(Operands, i, CurrIndx);
NewPhi->addIncoming(Vec, IBB); NewPhi->addIncoming(Vec, IBB);
} }
@ -2840,7 +2909,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
setInsertPointAfterBundle(E->Scalars, VL0); setInsertPointAfterBundle(E->Scalars, VL0);
Value *InVec = vectorizeTree(INVL); Value *InVec = vectorizeTree(INVL, 0, CurrIndx);
if (Value *V = alreadyVectorized(E->Scalars, VL0)) if (Value *V = alreadyVectorized(E->Scalars, VL0))
return V; return V;
@ -2861,8 +2930,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
setInsertPointAfterBundle(E->Scalars, VL0); setInsertPointAfterBundle(E->Scalars, VL0);
Value *L = vectorizeTree(LHSV); Value *L = vectorizeTree(LHSV, 0, CurrIndx);
Value *R = vectorizeTree(RHSV); Value *R = vectorizeTree(RHSV, 1, CurrIndx);
if (Value *V = alreadyVectorized(E->Scalars, VL0)) if (Value *V = alreadyVectorized(E->Scalars, VL0))
return V; return V;
@ -2889,9 +2958,9 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
setInsertPointAfterBundle(E->Scalars, VL0); setInsertPointAfterBundle(E->Scalars, VL0);
Value *Cond = vectorizeTree(CondVec); Value *Cond = vectorizeTree(CondVec, 0, CurrIndx);
Value *True = vectorizeTree(TrueVec); Value *True = vectorizeTree(TrueVec, 1, CurrIndx);
Value *False = vectorizeTree(FalseVec); Value *False = vectorizeTree(FalseVec, 2, CurrIndx);
if (Value *V = alreadyVectorized(E->Scalars, VL0)) if (Value *V = alreadyVectorized(E->Scalars, VL0))
return V; return V;
@ -2932,8 +3001,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
setInsertPointAfterBundle(E->Scalars, VL0); setInsertPointAfterBundle(E->Scalars, VL0);
Value *LHS = vectorizeTree(LHSVL); Value *LHS = vectorizeTree(LHSVL, 0, CurrIndx);
Value *RHS = vectorizeTree(RHSVL); Value *RHS = vectorizeTree(RHSVL, 1, CurrIndx);
if (Value *V = alreadyVectorized(E->Scalars, VL0)) if (Value *V = alreadyVectorized(E->Scalars, VL0))
return V; return V;
@ -2954,7 +3023,17 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
// sink them all the way down past store instructions. // sink them all the way down past store instructions.
setInsertPointAfterBundle(E->Scalars, VL0); setInsertPointAfterBundle(E->Scalars, VL0);
LoadInst *LI = cast<LoadInst>(VL0); if(UserIndx != -1) {
UserTreeEntry = &VectorizableTree[UserIndx];
}
LoadInst *LI = NULL;
if (UserTreeEntry && !UserTreeEntry->ShuffleMask[OpdNum].empty()) {
LI = cast<LoadInst>(E->Scalars[0]);
} else {
LI = cast<LoadInst>(VL0);
}
Type *ScalarLoadTy = LI->getType(); Type *ScalarLoadTy = LI->getType();
unsigned AS = LI->getPointerAddressSpace(); unsigned AS = LI->getPointerAddressSpace();
@ -2976,7 +3055,24 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
LI->setAlignment(Alignment); LI->setAlignment(Alignment);
E->VectorizedValue = LI; E->VectorizedValue = LI;
++NumVectorInstructions; ++NumVectorInstructions;
return propagateMetadata(LI, E->Scalars); propagateMetadata(LI, E->Scalars);
if (UserTreeEntry && !UserTreeEntry->ShuffleMask[OpdNum].empty()) {
SmallVector<Constant *, 8> Mask;
for (unsigned Lane = 0, LE = UserTreeEntry->ShuffleMask[OpdNum].size();
Lane != LE; ++Lane) {
Mask.push_back(
Builder.getInt32(UserTreeEntry->ShuffleMask[OpdNum][Lane]));
}
// Generate shuffle for jumbled memory access
Value *Undef = UndefValue::get(VecTy);
Value *Shuf = Builder.CreateShuffleVector((Value *)LI, Undef,
ConstantVector::get(Mask));
E->VectorizedValue = Shuf;
++NumVectorInstructions;
return Shuf;
}
return LI;
} }
case Instruction::Store: { case Instruction::Store: {
StoreInst *SI = cast<StoreInst>(VL0); StoreInst *SI = cast<StoreInst>(VL0);
@ -2989,7 +3085,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
setInsertPointAfterBundle(E->Scalars, VL0); setInsertPointAfterBundle(E->Scalars, VL0);
Value *VecValue = vectorizeTree(ScalarStoreValues); Value *VecValue = vectorizeTree(ScalarStoreValues, 0, CurrIndx);
Value *ScalarPtr = SI->getPointerOperand(); Value *ScalarPtr = SI->getPointerOperand();
Value *VecPtr = Builder.CreateBitCast(ScalarPtr, VecTy->getPointerTo(AS)); Value *VecPtr = Builder.CreateBitCast(ScalarPtr, VecTy->getPointerTo(AS));
StoreInst *S = Builder.CreateStore(VecValue, VecPtr); StoreInst *S = Builder.CreateStore(VecValue, VecPtr);
@ -3015,7 +3111,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
for (Value *V : E->Scalars) for (Value *V : E->Scalars)
Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0)); Op0VL.push_back(cast<GetElementPtrInst>(V)->getOperand(0));
Value *Op0 = vectorizeTree(Op0VL); Value *Op0 = vectorizeTree(Op0VL, 0, CurrIndx);
std::vector<Value *> OpVecs; std::vector<Value *> OpVecs;
for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e; for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e;
@ -3024,7 +3120,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
for (Value *V : E->Scalars) for (Value *V : E->Scalars)
OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j)); OpVL.push_back(cast<GetElementPtrInst>(V)->getOperand(j));
Value *OpVec = vectorizeTree(OpVL); Value *OpVec = vectorizeTree(OpVL, j, CurrIndx);
OpVecs.push_back(OpVec); OpVecs.push_back(OpVec);
} }
@ -3063,7 +3159,7 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
OpVL.push_back(CEI->getArgOperand(j)); OpVL.push_back(CEI->getArgOperand(j));
} }
Value *OpVec = vectorizeTree(OpVL); Value *OpVec = vectorizeTree(OpVL, j, CurrIndx);
DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n"); DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
OpVecs.push_back(OpVec); OpVecs.push_back(OpVec);
} }
@ -3094,8 +3190,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
reorderAltShuffleOperands(S.Opcode, E->Scalars, LHSVL, RHSVL); reorderAltShuffleOperands(S.Opcode, E->Scalars, LHSVL, RHSVL);
setInsertPointAfterBundle(E->Scalars, VL0); setInsertPointAfterBundle(E->Scalars, VL0);
Value *LHS = vectorizeTree(LHSVL); Value *LHS = vectorizeTree(LHSVL, 0, CurrIndx);
Value *RHS = vectorizeTree(RHSVL); Value *RHS = vectorizeTree(RHSVL, 1, CurrIndx);
if (Value *V = alreadyVectorized(E->Scalars, VL0)) if (Value *V = alreadyVectorized(E->Scalars, VL0))
return V; return V;
@ -3195,9 +3291,14 @@ BoUpSLP::vectorizeTree(ExtraValueToDebugLocsMap &ExternallyUsedValues) {
continue; continue;
TreeEntry *E = getTreeEntry(Scalar); TreeEntry *E = getTreeEntry(Scalar);
assert(E && "Invalid scalar"); assert(E && "Invalid scalar");
assert(!E->NeedToGather && "Extracting from a gather list"); assert((!E->NeedToGather) && "Extracting from a gather list");
Value *Vec = E->VectorizedValue; Value *Vec = dyn_cast<ShuffleVectorInst>(E->VectorizedValue);
if (Vec && dyn_cast<LoadInst>(cast<Instruction>(Vec)->getOperand(0))) {
Vec = cast<Instruction>(E->VectorizedValue)->getOperand(0);
} else {
Vec = E->VectorizedValue;
}
assert(Vec && "Can't find vectorizable value"); assert(Vec && "Can't find vectorizable value");
Value *Lane = Builder.getInt32(ExternalUse.Lane); Value *Lane = Builder.getInt32(ExternalUse.Lane);

View File

@ -11,20 +11,16 @@
define i32 @fn1() { define i32 @fn1() {
; CHECK-LABEL: @fn1( ; CHECK-LABEL: @fn1(
; CHECK-NEXT: entry: ; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i64 0, i32 0), align 4 ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, <4 x i32>* bitcast ([4 x i32]* @b to <4 x i32>*), align 4
; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i64 0, i32 1), align 4 ; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP0]], <4 x i32> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0>
; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i64 0, i32 2), align 4 ; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([4 x i32], [4 x i32]* @b, i64 0, i32 3), align 4 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> undef, i32 [[TMP1]], i32 0 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> undef, i32 [[TMP3]], i32 0
; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 [[TMP2]], i32 1 ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <4 x i32> [[TMP4]], i32 ptrtoint (i32 ()* @fn1 to i32), i32 1
; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 [[TMP3]], i32 2 ; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> [[TMP5]], i32 ptrtoint (i32 ()* @fn1 to i32), i32 2
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 [[TMP0]], i32 3 ; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> [[TMP6]], i32 8, i32 3
; CHECK-NEXT: [[TMP8:%.*]] = icmp sgt <4 x i32> [[TMP7]], zeroinitializer ; CHECK-NEXT: [[TMP8:%.*]] = select <4 x i1> [[TMP2]], <4 x i32> [[TMP7]], <4 x i32> <i32 6, i32 0, i32 0, i32 0>
; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i32> [[TMP4]], i32 ptrtoint (i32 ()* @fn1 to i32), i32 1 ; CHECK-NEXT: store <4 x i32> [[TMP8]], <4 x i32>* bitcast ([4 x i32]* @a to <4 x i32>*), align 4
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <4 x i32> [[TMP9]], i32 ptrtoint (i32 ()* @fn1 to i32), i32 2
; CHECK-NEXT: [[TMP11:%.*]] = insertelement <4 x i32> [[TMP10]], i32 8, i32 3
; CHECK-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP8]], <4 x i32> [[TMP11]], <4 x i32> <i32 6, i32 0, i32 0, i32 0>
; CHECK-NEXT: store <4 x i32> [[TMP12]], <4 x i32>* bitcast ([4 x i32]* @a to <4 x i32>*), align 4
; CHECK-NEXT: ret i32 0 ; CHECK-NEXT: ret i32 0
; ;
entry: entry:

View File

@ -0,0 +1,68 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -S -mtriple=x86_64-unknown -mattr=+avx -slp-vectorizer | FileCheck %s
;void jumble (int * restrict A, int * restrict B) {
; int tmp0 = A[10]*A[0];
; int tmp1 = A[11]*A[1];
; int tmp2 = A[12]*A[3];
; int tmp3 = A[13]*A[2];
; B[0] = tmp0;
; B[1] = tmp1;
; B[2] = tmp2;
; B[3] = tmp3;
;}
; Function Attrs: norecurse nounwind uwtable
define void @jumble(i32* noalias nocapture readonly %A, i32* noalias nocapture %B) {
; CHECK-LABEL: @jumble(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 10
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 11
; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 1
; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 12
; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 3
; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 13
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[ARRAYIDX]] to <4 x i32>*
; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, <4 x i32>* [[TMP0]], align 4
; CHECK-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2
; CHECK-NEXT: [[TMP2:%.*]] = bitcast i32* [[A]] to <4 x i32>*
; CHECK-NEXT: [[TMP3:%.*]] = load <4 x i32>, <4 x i32>* [[TMP2]], align 4
; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i32> [[TMP3]], <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 3, i32 2>
; CHECK-NEXT: [[TMP5:%.*]] = mul nsw <4 x i32> [[TMP4]], [[TMP1]]
; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 1
; CHECK-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 2
; CHECK-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 3
; CHECK-NEXT: [[TMP6:%.*]] = bitcast i32* [[B]] to <4 x i32>*
; CHECK-NEXT: store <4 x i32> [[TMP5]], <4 x i32>* [[TMP6]], align 4
; CHECK-NEXT: ret void
;
entry:
%arrayidx = getelementptr inbounds i32, i32* %A, i64 10
%0 = load i32, i32* %arrayidx, align 4
%1 = load i32, i32* %A, align 4
%mul = mul nsw i32 %1, %0
%arrayidx2 = getelementptr inbounds i32, i32* %A, i64 11
%2 = load i32, i32* %arrayidx2, align 4
%arrayidx3 = getelementptr inbounds i32, i32* %A, i64 1
%3 = load i32, i32* %arrayidx3, align 4
%mul4 = mul nsw i32 %3, %2
%arrayidx5 = getelementptr inbounds i32, i32* %A, i64 12
%4 = load i32, i32* %arrayidx5, align 4
%arrayidx6 = getelementptr inbounds i32, i32* %A, i64 3
%5 = load i32, i32* %arrayidx6, align 4
%mul7 = mul nsw i32 %5, %4
%arrayidx8 = getelementptr inbounds i32, i32* %A, i64 13
%6 = load i32, i32* %arrayidx8, align 4
%arrayidx9 = getelementptr inbounds i32, i32* %A, i64 2
%7 = load i32, i32* %arrayidx9, align 4
%mul10 = mul nsw i32 %7, %6
store i32 %mul, i32* %B, align 4
%arrayidx12 = getelementptr inbounds i32, i32* %B, i64 1
store i32 %mul4, i32* %arrayidx12, align 4
%arrayidx13 = getelementptr inbounds i32, i32* %B, i64 2
store i32 %mul7, i32* %arrayidx13, align 4
%arrayidx14 = getelementptr inbounds i32, i32* %B, i64 3
store i32 %mul10, i32* %arrayidx14, align 4
ret void
}

View File

@ -5,34 +5,27 @@
define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn, i32* noalias nocapture %out) { define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn, i32* noalias nocapture %out) {
; CHECK-LABEL: @jumbled-load( ; CHECK-LABEL: @jumbled-load(
; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* %in, i64 0 ; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 0
; CHECK-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[IN_ADDR]], align 4
; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3 ; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3
; CHECK-NEXT: [[LOAD_2:%.*]] = load i32, i32* [[GEP_1]], align 4
; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 1 ; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 1
; CHECK-NEXT: [[LOAD_3:%.*]] = load i32, i32* [[GEP_2]], align 4
; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 2 ; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 2
; CHECK-NEXT: [[LOAD_4:%.*]] = load i32, i32* [[GEP_3]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[IN_ADDR]] to <4 x i32>*
; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* %inn, i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
; CHECK-NEXT: [[LOAD_5:%.*]] = load i32, i32* [[INN_ADDR]], align 4 ; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[INN:%.*]], i64 0
; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 2 ; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 2
; CHECK-NEXT: [[LOAD_6:%.*]] = load i32, i32* [[GEP_4]], align 4
; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 3 ; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 3
; CHECK-NEXT: [[LOAD_7:%.*]] = load i32, i32* [[GEP_5]], align 4
; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 1 ; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 1
; CHECK-NEXT: [[LOAD_8:%.*]] = load i32, i32* [[GEP_6]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[INN_ADDR]] to <4 x i32>*
; CHECK-NEXT: [[MUL_1:%.*]] = mul i32 [[LOAD_3]], [[LOAD_5]] ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[TMP4]], align 4
; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 [[LOAD_2]], [[LOAD_8]] ; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 3, i32 2>
; CHECK-NEXT: [[MUL_3:%.*]] = mul i32 [[LOAD_4]], [[LOAD_7]] ; CHECK-NEXT: [[TMP7:%.*]] = mul <4 x i32> [[TMP3]], [[TMP6]]
; CHECK-NEXT: [[MUL_4:%.*]] = mul i32 [[LOAD_1]], [[LOAD_6]] ; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0
; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* %out, i64 0 ; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 1
; CHECK-NEXT: store i32 [[MUL_1]], i32* [[GEP_7]], align 4 ; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 2
; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* %out, i64 1 ; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 3
; CHECK-NEXT: store i32 [[MUL_2]], i32* [[GEP_8]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32* [[GEP_7]] to <4 x i32>*
; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* %out, i64 2 ; CHECK-NEXT: store <4 x i32> [[TMP7]], <4 x i32>* [[TMP8]], align 4
; CHECK-NEXT: store i32 [[MUL_3]], i32* [[GEP_9]], align 4
; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* %out, i64 3
; CHECK-NEXT: store i32 [[MUL_4]], i32* [[GEP_10]], align 4
; CHECK-NEXT: ret i32 undef ; CHECK-NEXT: ret i32 undef
; ;
%in.addr = getelementptr inbounds i32, i32* %in, i64 0 %in.addr = getelementptr inbounds i32, i32* %in, i64 0

View File

@ -6,33 +6,26 @@
define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn, i32* noalias nocapture %out) { define i32 @jumbled-load(i32* noalias nocapture %in, i32* noalias nocapture %inn, i32* noalias nocapture %out) {
; CHECK-LABEL: @jumbled-load( ; CHECK-LABEL: @jumbled-load(
; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 0 ; CHECK-NEXT: [[IN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[IN:%.*]], i64 0
; CHECK-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[IN_ADDR]], align 4
; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 1 ; CHECK-NEXT: [[GEP_1:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 1
; CHECK-NEXT: [[LOAD_2:%.*]] = load i32, i32* [[GEP_1]], align 4
; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 2 ; CHECK-NEXT: [[GEP_2:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 2
; CHECK-NEXT: [[LOAD_3:%.*]] = load i32, i32* [[GEP_2]], align 4
; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3 ; CHECK-NEXT: [[GEP_3:%.*]] = getelementptr inbounds i32, i32* [[IN_ADDR]], i64 3
; CHECK-NEXT: [[LOAD_4:%.*]] = load i32, i32* [[GEP_3]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[IN_ADDR]] to <4 x i32>*
; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, <4 x i32>* [[TMP1]], align 4
; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x i32> [[TMP2]], <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[INN:%.*]], i64 0 ; CHECK-NEXT: [[INN_ADDR:%.*]] = getelementptr inbounds i32, i32* [[INN:%.*]], i64 0
; CHECK-NEXT: [[LOAD_5:%.*]] = load i32, i32* [[INN_ADDR]], align 4
; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 1 ; CHECK-NEXT: [[GEP_4:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 1
; CHECK-NEXT: [[LOAD_6:%.*]] = load i32, i32* [[GEP_4]], align 4
; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 2 ; CHECK-NEXT: [[GEP_5:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 2
; CHECK-NEXT: [[LOAD_7:%.*]] = load i32, i32* [[GEP_5]], align 4
; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 3 ; CHECK-NEXT: [[GEP_6:%.*]] = getelementptr inbounds i32, i32* [[INN_ADDR]], i64 3
; CHECK-NEXT: [[LOAD_8:%.*]] = load i32, i32* [[GEP_6]], align 4 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[INN_ADDR]] to <4 x i32>*
; CHECK-NEXT: [[MUL_1:%.*]] = mul i32 [[LOAD_1]], [[LOAD_5]] ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[TMP4]], align 4
; CHECK-NEXT: [[MUL_2:%.*]] = mul i32 [[LOAD_2]], [[LOAD_6]] ; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <4 x i32> [[TMP5]], <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 0, i32 2>
; CHECK-NEXT: [[MUL_3:%.*]] = mul i32 [[LOAD_3]], [[LOAD_7]] ; CHECK-NEXT: [[TMP7:%.*]] = mul <4 x i32> [[TMP3]], [[TMP6]]
; CHECK-NEXT: [[MUL_4:%.*]] = mul i32 [[LOAD_4]], [[LOAD_8]]
; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0 ; CHECK-NEXT: [[GEP_7:%.*]] = getelementptr inbounds i32, i32* [[OUT:%.*]], i64 0
; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 1 ; CHECK-NEXT: [[GEP_8:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 1
; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 2 ; CHECK-NEXT: [[GEP_9:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 2
; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 3 ; CHECK-NEXT: [[GEP_10:%.*]] = getelementptr inbounds i32, i32* [[OUT]], i64 3
; CHECK-NEXT: store i32 [[MUL_1]], i32* [[GEP_9]], align 4 ; CHECK-NEXT: [[TMP8:%.*]] = bitcast i32* [[GEP_7]] to <4 x i32>*
; CHECK-NEXT: store i32 [[MUL_2]], i32* [[GEP_7]], align 4 ; CHECK-NEXT: store <4 x i32> [[TMP7]], <4 x i32>* [[TMP8]], align 4
; CHECK-NEXT: store i32 [[MUL_3]], i32* [[GEP_10]], align 4
; CHECK-NEXT: store i32 [[MUL_4]], i32* [[GEP_8]], align 4
; CHECK-NEXT: ret i32 undef ; CHECK-NEXT: ret i32 undef
; ;
%in.addr = getelementptr inbounds i32, i32* %in, i64 0 %in.addr = getelementptr inbounds i32, i32* %in, i64 0