mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-03-05 10:57:32 +00:00
[SLPVectorizer] Vectorize reverse-order loads in horizontal reductions
When vectorizing a tree rooted at a store bundle, we currently try to sort the stores before building the tree, so that the stores can be vectorized. For other trees, the order of the root bundle - which determines the order of all other bundles - is arbitrary. That is bad, since if a leaf bundle of consecutive loads happens to appear in the wrong order, we will not vectorize it. This is partially mitigated when the root is a binary operator, by trying to build a "reversed" tree when that's considered profitable. This patch extends the workaround we have for binops to trees rooted in a horizontal reduction. This fixes PR28474. Differential Revision: https://reviews.llvm.org/D22554 llvm-svn: 276477
This commit is contained in:
parent
cefd5ab11e
commit
cf125cd0ce
@ -878,10 +878,10 @@ private:
|
||||
/// List of users to ignore during scheduling and that don't need extracting.
|
||||
ArrayRef<Value *> UserIgnoreList;
|
||||
|
||||
// Number of load-bundles, which contain consecutive loads.
|
||||
// Number of load bundles that contain consecutive loads.
|
||||
int NumLoadsWantToKeepOrder;
|
||||
|
||||
// Number of load-bundles of size 2, which are consecutive loads if reversed.
|
||||
// Number of load bundles that contain consecutive loads in reversed order.
|
||||
int NumLoadsWantToChangeOrder;
|
||||
|
||||
// Analysis and block reference.
|
||||
@ -1154,7 +1154,9 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
|
||||
DEBUG(dbgs() << "SLP: Gathering loads of non-packed type.\n");
|
||||
return;
|
||||
}
|
||||
// Check if the loads are consecutive or of we need to swizzle them.
|
||||
|
||||
// Make sure all loads in the bundle are simple - we can't vectorize
|
||||
// atomic or volatile loads.
|
||||
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
|
||||
LoadInst *L = cast<LoadInst>(VL[i]);
|
||||
if (!L->isSimple()) {
|
||||
@ -1163,20 +1165,47 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
|
||||
DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the loads are consecutive, reversed, or neither.
|
||||
// TODO: What we really want is to sort the loads, but for now, check
|
||||
// the two likely directions.
|
||||
bool Consecutive = true;
|
||||
bool ReverseConsecutive = true;
|
||||
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
|
||||
if (!isConsecutiveAccess(VL[i], VL[i + 1], *DL, *SE)) {
|
||||
if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0], *DL, *SE)) {
|
||||
++NumLoadsWantToChangeOrder;
|
||||
}
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false);
|
||||
DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
|
||||
return;
|
||||
Consecutive = false;
|
||||
break;
|
||||
} else {
|
||||
ReverseConsecutive = false;
|
||||
}
|
||||
}
|
||||
++NumLoadsWantToKeepOrder;
|
||||
newTreeEntry(VL, true);
|
||||
DEBUG(dbgs() << "SLP: added a vector of loads.\n");
|
||||
|
||||
if (Consecutive) {
|
||||
++NumLoadsWantToKeepOrder;
|
||||
newTreeEntry(VL, true);
|
||||
DEBUG(dbgs() << "SLP: added a vector of loads.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
// If none of the load pairs were consecutive when checked in order,
|
||||
// check the reverse order.
|
||||
if (ReverseConsecutive)
|
||||
for (unsigned i = VL.size() - 1; i > 0; --i)
|
||||
if (!isConsecutiveAccess(VL[i], VL[i - 1], *DL, *SE)) {
|
||||
ReverseConsecutive = false;
|
||||
break;
|
||||
}
|
||||
|
||||
BS.cancelScheduling(VL);
|
||||
newTreeEntry(VL, false);
|
||||
|
||||
if (ReverseConsecutive) {
|
||||
++NumLoadsWantToChangeOrder;
|
||||
DEBUG(dbgs() << "SLP: Gathering reversed loads.\n");
|
||||
} else {
|
||||
DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
|
||||
}
|
||||
return;
|
||||
}
|
||||
case Instruction::ZExt:
|
||||
@ -3798,9 +3827,12 @@ bool SLPVectorizerPass::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
|
||||
BuildVectorSlice = BuildVector.slice(i, OpsWidth);
|
||||
|
||||
R.buildTree(Ops, BuildVectorSlice);
|
||||
// TODO: check if we can allow reordering also for other cases than
|
||||
// tryToVectorizePair()
|
||||
// TODO: check if we can allow reordering for more cases.
|
||||
if (allowReorder && R.shouldReorder()) {
|
||||
// Conceptually, there is nothing actually preventing us from trying to
|
||||
// reorder a larger list. In fact, we do exactly this when vectorizing
|
||||
// reductions. However, at this point, we only expect to get here from
|
||||
// tryToVectorizePair().
|
||||
assert(Ops.size() == 2);
|
||||
assert(BuildVectorSlice.empty());
|
||||
Value *ReorderedOps[] = { Ops[1], Ops[0] };
|
||||
@ -4078,7 +4110,12 @@ public:
|
||||
unsigned i = 0;
|
||||
|
||||
for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) {
|
||||
V.buildTree(makeArrayRef(&ReducedVals[i], ReduxWidth), ReductionOps);
|
||||
auto VL = makeArrayRef(&ReducedVals[i], ReduxWidth);
|
||||
V.buildTree(VL, ReductionOps);
|
||||
if (V.shouldReorder()) {
|
||||
SmallVector<Value *, 8> Reversed(VL.rbegin(), VL.rend());
|
||||
V.buildTree(Reversed, ReductionOps);
|
||||
}
|
||||
V.computeMinimumValueSizes();
|
||||
|
||||
// Estimate cost.
|
||||
|
49
test/Transforms/SLPVectorizer/X86/reduction_loads.ll
Normal file
49
test/Transforms/SLPVectorizer/X86/reduction_loads.ll
Normal file
@ -0,0 +1,49 @@
|
||||
; RUN: opt < %s -slp-vectorizer -S -mtriple=x86_64-apple-macosx10.10.0 -mattr=+sse4.2 | FileCheck %s
|
||||
|
||||
; CHECK-LABEL: @test
|
||||
; CHECK: [[CAST:%.*]] = bitcast i32* %p to <8 x i32>*
|
||||
; CHECK: [[LOAD:%.*]] = load <8 x i32>, <8 x i32>* [[CAST]], align 4
|
||||
; CHECK: mul <8 x i32> <i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42, i32 42>, [[LOAD]]
|
||||
|
||||
define i32 @test(i32* nocapture readonly %p) {
|
||||
entry:
|
||||
%arrayidx.1 = getelementptr inbounds i32, i32* %p, i64 1
|
||||
%arrayidx.2 = getelementptr inbounds i32, i32* %p, i64 2
|
||||
%arrayidx.3 = getelementptr inbounds i32, i32* %p, i64 3
|
||||
%arrayidx.4 = getelementptr inbounds i32, i32* %p, i64 4
|
||||
%arrayidx.5 = getelementptr inbounds i32, i32* %p, i64 5
|
||||
%arrayidx.6 = getelementptr inbounds i32, i32* %p, i64 6
|
||||
%arrayidx.7 = getelementptr inbounds i32, i32* %p, i64 7
|
||||
br label %for.body
|
||||
|
||||
for.body:
|
||||
%sum = phi i32 [ 0, %entry ], [ %add.7, %for.body ]
|
||||
%tmp = load i32, i32* %p, align 4
|
||||
%mul = mul i32 %tmp, 42
|
||||
%add = add i32 %mul, %sum
|
||||
%tmp5 = load i32, i32* %arrayidx.1, align 4
|
||||
%mul.1 = mul i32 %tmp5, 42
|
||||
%add.1 = add i32 %mul.1, %add
|
||||
%tmp6 = load i32, i32* %arrayidx.2, align 4
|
||||
%mul.2 = mul i32 %tmp6, 42
|
||||
%add.2 = add i32 %mul.2, %add.1
|
||||
%tmp7 = load i32, i32* %arrayidx.3, align 4
|
||||
%mul.3 = mul i32 %tmp7, 42
|
||||
%add.3 = add i32 %mul.3, %add.2
|
||||
%tmp8 = load i32, i32* %arrayidx.4, align 4
|
||||
%mul.4 = mul i32 %tmp8, 42
|
||||
%add.4 = add i32 %mul.4, %add.3
|
||||
%tmp9 = load i32, i32* %arrayidx.5, align 4
|
||||
%mul.5 = mul i32 %tmp9, 42
|
||||
%add.5 = add i32 %mul.5, %add.4
|
||||
%tmp10 = load i32, i32* %arrayidx.6, align 4
|
||||
%mul.6 = mul i32 %tmp10, 42
|
||||
%add.6 = add i32 %mul.6, %add.5
|
||||
%tmp11 = load i32, i32* %arrayidx.7, align 4
|
||||
%mul.7 = mul i32 %tmp11, 42
|
||||
%add.7 = add i32 %mul.7, %add.6
|
||||
br i1 true, label %for.end, label %for.body
|
||||
|
||||
for.end:
|
||||
ret i32 %add.7
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user