2013-04-09 19:44:35 +00:00
|
|
|
//===- SLPVectorizer.cpp - A bottom up SLP Vectorizer ---------------------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// This pass implements the Bottom Up SLP vectorizer. It detects consecutive
|
|
|
|
// stores that can be put together into vector-stores. Next, it attempts to
|
|
|
|
// construct vectorizable tree using the use-def chains. If a profitable tree
|
|
|
|
// was found, the SLP vectorizer performs vectorization on the tree.
|
|
|
|
//
|
|
|
|
// The pass is inspired by the work described in the paper:
|
|
|
|
// "Loop-Aware SLP in GCC" by Ira Rosen, Dorit Nuzman, Ayal Zaks.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "llvm/Transforms/Vectorize.h"
|
2013-05-22 19:47:32 +00:00
|
|
|
#include "llvm/ADT/MapVector.h"
|
2015-03-23 19:32:43 +00:00
|
|
|
#include "llvm/ADT/Optional.h"
|
2013-06-23 06:15:46 +00:00
|
|
|
#include "llvm/ADT/PostOrderIterator.h"
|
2013-06-22 21:34:10 +00:00
|
|
|
#include "llvm/ADT/SetVector.h"
|
2014-08-01 08:14:28 +00:00
|
|
|
#include "llvm/ADT/Statistic.h"
|
2013-04-09 19:44:35 +00:00
|
|
|
#include "llvm/Analysis/AliasAnalysis.h"
|
2015-01-04 12:03:27 +00:00
|
|
|
#include "llvm/Analysis/AssumptionCache.h"
|
2014-10-15 17:35:01 +00:00
|
|
|
#include "llvm/Analysis/CodeMetrics.h"
|
2014-01-07 11:48:04 +00:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2013-04-09 19:44:35 +00:00
|
|
|
#include "llvm/Analysis/ScalarEvolution.h"
|
2013-06-22 21:34:10 +00:00
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
|
2013-04-09 19:44:35 +00:00
|
|
|
#include "llvm/Analysis/TargetTransformInfo.h"
|
2013-10-02 19:06:06 +00:00
|
|
|
#include "llvm/Analysis/ValueTracking.h"
|
2013-04-09 19:44:35 +00:00
|
|
|
#include "llvm/IR/DataLayout.h"
|
2014-01-13 09:26:24 +00:00
|
|
|
#include "llvm/IR/Dominators.h"
|
2014-01-07 11:48:04 +00:00
|
|
|
#include "llvm/IR/IRBuilder.h"
|
2013-04-09 19:44:35 +00:00
|
|
|
#include "llvm/IR/Instructions.h"
|
2013-04-14 03:22:20 +00:00
|
|
|
#include "llvm/IR/IntrinsicInst.h"
|
2013-04-09 19:44:35 +00:00
|
|
|
#include "llvm/IR/Module.h"
|
2014-05-04 17:10:15 +00:00
|
|
|
#include "llvm/IR/NoFolder.h"
|
2013-04-09 19:44:35 +00:00
|
|
|
#include "llvm/IR/Type.h"
|
|
|
|
#include "llvm/IR/Value.h"
|
2014-01-13 09:26:24 +00:00
|
|
|
#include "llvm/IR/Verifier.h"
|
2013-04-09 19:44:35 +00:00
|
|
|
#include "llvm/Pass.h"
|
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/Debug.h"
|
|
|
|
#include "llvm/Support/raw_ostream.h"
|
2014-04-09 14:20:47 +00:00
|
|
|
#include "llvm/Transforms/Utils/VectorUtils.h"
|
2013-06-22 21:34:10 +00:00
|
|
|
#include <algorithm>
|
2013-04-09 19:44:35 +00:00
|
|
|
#include <map>
|
2014-08-01 09:20:42 +00:00
|
|
|
#include <memory>
|
2013-04-09 19:44:35 +00:00
|
|
|
|
|
|
|
using namespace llvm;
|
|
|
|
|
2014-04-22 02:55:47 +00:00
|
|
|
#define SV_NAME "slp-vectorizer"
|
|
|
|
#define DEBUG_TYPE "SLP"
|
|
|
|
|
2014-08-01 08:14:28 +00:00
|
|
|
STATISTIC(NumVectorInstructions, "Number of vector instructions generated");
|
|
|
|
|
2013-04-09 19:44:35 +00:00
|
|
|
static cl::opt<int>
|
2013-06-20 17:54:36 +00:00
|
|
|
SLPCostThreshold("slp-threshold", cl::init(0), cl::Hidden,
|
2013-06-29 05:37:19 +00:00
|
|
|
cl::desc("Only vectorize if you gain more than this "
|
|
|
|
"number "));
|
2013-09-21 01:06:00 +00:00
|
|
|
|
|
|
|
static cl::opt<bool>
|
|
|
|
ShouldVectorizeHor("slp-vectorize-hor", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc("Attempt to vectorize horizontal reductions"));
|
|
|
|
|
2013-09-25 14:02:32 +00:00
|
|
|
static cl::opt<bool> ShouldStartVectorizeHorAtStore(
|
|
|
|
"slp-vectorize-hor-store", cl::init(false), cl::Hidden,
|
|
|
|
cl::desc(
|
|
|
|
"Attempt to vectorize horizontal reductions feeding into a store"));
|
|
|
|
|
2013-04-09 19:44:35 +00:00
|
|
|
namespace {
|
|
|
|
|
2013-09-18 04:10:17 +00:00
|
|
|
static const unsigned MinVecRegSize = 128;
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-06-24 02:52:43 +00:00
|
|
|
static const unsigned RecursionMaxDepth = 12;
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2015-01-19 09:33:38 +00:00
|
|
|
// Limit the number of alias checks. The limit is chosen so that
|
|
|
|
// it has no negative effect on the llvm benchmarks.
|
|
|
|
static const unsigned AliasedCheckLimit = 10;
|
|
|
|
|
2015-01-22 08:20:51 +00:00
|
|
|
// Another limit for the alias checks: The maximum distance between load/store
|
|
|
|
// instructions where alias checks are done.
|
|
|
|
// This limit is useful for very large basic blocks.
|
2015-01-22 13:57:41 +00:00
|
|
|
static const unsigned MaxMemDepDistance = 160;
|
2015-01-22 08:20:51 +00:00
|
|
|
|
2015-02-12 02:30:56 +00:00
|
|
|
/// \brief Predicate for the element types that the SLP vectorizer supports.
|
|
|
|
///
|
|
|
|
/// The most important thing to filter here are types which are invalid in LLVM
|
|
|
|
/// vectors. We also filter target specific types which have absolutely no
|
|
|
|
/// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
|
|
|
|
/// avoids spending time checking the cost model and realizing that they will
|
|
|
|
/// be inevitably scalarized.
|
|
|
|
static bool isValidElementType(Type *Ty) {
|
|
|
|
return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
|
|
|
|
!Ty->isPPC_FP128Ty();
|
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
/// \returns the parent basic block if all of the instructions in \p VL
|
|
|
|
/// are in the same block or null otherwise.
|
|
|
|
static BasicBlock *getSameBlock(ArrayRef<Value *> VL) {
|
|
|
|
Instruction *I0 = dyn_cast<Instruction>(VL[0]);
|
|
|
|
if (!I0)
|
2014-04-25 05:29:35 +00:00
|
|
|
return nullptr;
|
2013-07-07 06:57:07 +00:00
|
|
|
BasicBlock *BB = I0->getParent();
|
|
|
|
for (int i = 1, e = VL.size(); i < e; i++) {
|
|
|
|
Instruction *I = dyn_cast<Instruction>(VL[i]);
|
|
|
|
if (!I)
|
2014-04-25 05:29:35 +00:00
|
|
|
return nullptr;
|
2013-07-07 06:57:07 +00:00
|
|
|
|
|
|
|
if (BB != I->getParent())
|
2014-04-25 05:29:35 +00:00
|
|
|
return nullptr;
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
|
|
|
return BB;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \returns True if all of the values in \p VL are constants.
|
|
|
|
static bool allConstant(ArrayRef<Value *> VL) {
|
|
|
|
for (unsigned i = 0, e = VL.size(); i < e; ++i)
|
|
|
|
if (!isa<Constant>(VL[i]))
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \returns True if all of the values in \p VL are identical.
|
|
|
|
static bool isSplat(ArrayRef<Value *> VL) {
|
|
|
|
for (unsigned i = 1, e = VL.size(); i < e; ++i)
|
|
|
|
if (VL[i] != VL[0])
|
|
|
|
return false;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-06-20 04:32:48 +00:00
|
|
|
///\returns Opcode that can be clubbed with \p Op to create an alternate
|
|
|
|
/// sequence which can later be merged as a ShuffleVector instruction.
|
|
|
|
static unsigned getAltOpcode(unsigned Op) {
|
|
|
|
switch (Op) {
|
|
|
|
case Instruction::FAdd:
|
|
|
|
return Instruction::FSub;
|
|
|
|
case Instruction::FSub:
|
|
|
|
return Instruction::FAdd;
|
|
|
|
case Instruction::Add:
|
|
|
|
return Instruction::Sub;
|
|
|
|
case Instruction::Sub:
|
|
|
|
return Instruction::Add;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
///\returns bool representing if Opcode \p Op can be part
|
|
|
|
/// of an alternate sequence which can later be merged as
|
|
|
|
/// a ShuffleVector instruction.
|
|
|
|
static bool canCombineAsAltInst(unsigned Op) {
|
|
|
|
if (Op == Instruction::FAdd || Op == Instruction::FSub ||
|
|
|
|
Op == Instruction::Sub || Op == Instruction::Add)
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \returns ShuffleVector instruction if intructions in \p VL have
|
|
|
|
/// alternate fadd,fsub / fsub,fadd/add,sub/sub,add sequence.
|
|
|
|
/// (i.e. e.g. opcodes of fadd,fsub,fadd,fsub...)
|
|
|
|
static unsigned isAltInst(ArrayRef<Value *> VL) {
|
|
|
|
Instruction *I0 = dyn_cast<Instruction>(VL[0]);
|
|
|
|
unsigned Opcode = I0->getOpcode();
|
|
|
|
unsigned AltOpcode = getAltOpcode(Opcode);
|
|
|
|
for (int i = 1, e = VL.size(); i < e; i++) {
|
|
|
|
Instruction *I = dyn_cast<Instruction>(VL[i]);
|
|
|
|
if (!I || I->getOpcode() != ((i & 1) ? AltOpcode : Opcode))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return Instruction::ShuffleVector;
|
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
/// \returns The opcode if all of the Instructions in \p VL have the same
|
|
|
|
/// opcode, or zero.
|
|
|
|
static unsigned getSameOpcode(ArrayRef<Value *> VL) {
|
|
|
|
Instruction *I0 = dyn_cast<Instruction>(VL[0]);
|
|
|
|
if (!I0)
|
|
|
|
return 0;
|
|
|
|
unsigned Opcode = I0->getOpcode();
|
|
|
|
for (int i = 1, e = VL.size(); i < e; i++) {
|
|
|
|
Instruction *I = dyn_cast<Instruction>(VL[i]);
|
2014-06-20 04:32:48 +00:00
|
|
|
if (!I || Opcode != I->getOpcode()) {
|
|
|
|
if (canCombineAsAltInst(Opcode) && i == 1)
|
|
|
|
return isAltInst(VL);
|
2013-07-07 06:57:07 +00:00
|
|
|
return 0;
|
2014-06-20 04:32:48 +00:00
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
|
|
|
return Opcode;
|
|
|
|
}
|
|
|
|
|
2014-09-03 17:40:30 +00:00
|
|
|
/// Get the intersection (logical and) of all of the potential IR flags
|
|
|
|
/// of each scalar operation (VL) that will be converted into a vector (I).
|
|
|
|
/// Flag set: NSW, NUW, exact, and all of fast-math.
|
|
|
|
static void propagateIRFlags(Value *I, ArrayRef<Value *> VL) {
|
|
|
|
if (auto *VecOp = dyn_cast<BinaryOperator>(I)) {
|
|
|
|
if (auto *Intersection = dyn_cast<BinaryOperator>(VL[0])) {
|
|
|
|
// Intersection is initialized to the 0th scalar,
|
|
|
|
// so start counting from index '1'.
|
|
|
|
for (int i = 1, e = VL.size(); i < e; ++i) {
|
|
|
|
if (auto *Scalar = dyn_cast<BinaryOperator>(VL[i]))
|
|
|
|
Intersection->andIRFlags(Scalar);
|
|
|
|
}
|
|
|
|
VecOp->copyIRFlags(Intersection);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-23 00:48:34 +00:00
|
|
|
/// \returns \p I after propagating metadata from \p VL.
|
|
|
|
static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) {
|
|
|
|
Instruction *I0 = cast<Instruction>(VL[0]);
|
2014-11-11 21:30:22 +00:00
|
|
|
SmallVector<std::pair<unsigned, MDNode *>, 4> Metadata;
|
2013-11-23 00:48:34 +00:00
|
|
|
I0->getAllMetadataOtherThanDebugLoc(Metadata);
|
|
|
|
|
|
|
|
for (unsigned i = 0, n = Metadata.size(); i != n; ++i) {
|
|
|
|
unsigned Kind = Metadata[i].first;
|
2014-11-11 21:30:22 +00:00
|
|
|
MDNode *MD = Metadata[i].second;
|
2013-11-23 00:48:34 +00:00
|
|
|
|
|
|
|
for (int i = 1, e = VL.size(); MD && i != e; i++) {
|
|
|
|
Instruction *I = cast<Instruction>(VL[i]);
|
2014-11-11 21:30:22 +00:00
|
|
|
MDNode *IMD = I->getMetadata(Kind);
|
2013-11-23 00:48:34 +00:00
|
|
|
|
|
|
|
switch (Kind) {
|
|
|
|
default:
|
2014-04-25 05:29:35 +00:00
|
|
|
MD = nullptr; // Remove unknown metadata
|
2013-11-23 00:48:34 +00:00
|
|
|
break;
|
|
|
|
case LLVMContext::MD_tbaa:
|
|
|
|
MD = MDNode::getMostGenericTBAA(MD, IMD);
|
|
|
|
break;
|
Add scoped-noalias metadata
This commit adds scoped noalias metadata. The primary motivations for this
feature are:
1. To preserve noalias function attribute information when inlining
2. To provide the ability to model block-scope C99 restrict pointers
Neither of these two abilities are added here, only the necessary
infrastructure. In fact, there should be no change to existing functionality,
only the addition of new features. The logic that converts noalias function
parameters into this metadata during inlining will come in a follow-up commit.
What is added here is the ability to generally specify noalias memory-access
sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA
nodes:
!scope0 = metadata !{ metadata !"scope of foo()" }
!scope1 = metadata !{ metadata !"scope 1", metadata !scope0 }
!scope2 = metadata !{ metadata !"scope 2", metadata !scope0 }
!scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 }
!scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 }
Loads and stores can be tagged with an alias-analysis scope, and also, with a
noalias tag for a specific scope:
... = load %ptr1, !alias.scope !{ !scope1 }
... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
When evaluating an aliasing query, if one of the instructions is associated
with an alias.scope id that is identical to the noalias scope associated with
the other instruction, or is a descendant (in the scope hierarchy) of the
noalias scope associated with the other instruction, then the two memory
accesses are assumed not to alias.
Note that is the first element of the scope metadata is a string, then it can
be combined accross functions and translation units. The string can be replaced
by a self-reference to create globally unqiue scope identifiers.
[Note: This overview is slightly stylized, since the metadata nodes really need
to just be numbers (!0 instead of !scope0), and the scope lists are also global
unnamed metadata.]
Existing noalias metadata in a callee is "cloned" for use by the inlined code.
This is necessary because the aliasing scopes are unique to each call site
(because of possible control dependencies on the aliasing properties). For
example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets
inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } --
now just because we know that a1 does not alias with b1 at the first call site,
and a2 does not alias with b2 at the second call site, we cannot let inlining
these functons have the metadata imply that a1 does not alias with b2.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@213864 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-24 14:25:39 +00:00
|
|
|
case LLVMContext::MD_alias_scope:
|
2015-02-08 17:07:14 +00:00
|
|
|
MD = MDNode::getMostGenericAliasScope(MD, IMD);
|
|
|
|
break;
|
Add scoped-noalias metadata
This commit adds scoped noalias metadata. The primary motivations for this
feature are:
1. To preserve noalias function attribute information when inlining
2. To provide the ability to model block-scope C99 restrict pointers
Neither of these two abilities are added here, only the necessary
infrastructure. In fact, there should be no change to existing functionality,
only the addition of new features. The logic that converts noalias function
parameters into this metadata during inlining will come in a follow-up commit.
What is added here is the ability to generally specify noalias memory-access
sets. Regarding the metadata, alias-analysis scopes are defined similar to TBAA
nodes:
!scope0 = metadata !{ metadata !"scope of foo()" }
!scope1 = metadata !{ metadata !"scope 1", metadata !scope0 }
!scope2 = metadata !{ metadata !"scope 2", metadata !scope0 }
!scope3 = metadata !{ metadata !"scope 2.1", metadata !scope2 }
!scope4 = metadata !{ metadata !"scope 2.2", metadata !scope2 }
Loads and stores can be tagged with an alias-analysis scope, and also, with a
noalias tag for a specific scope:
... = load %ptr1, !alias.scope !{ !scope1 }
... = load %ptr2, !alias.scope !{ !scope1, !scope2 }, !noalias !{ !scope1 }
When evaluating an aliasing query, if one of the instructions is associated
with an alias.scope id that is identical to the noalias scope associated with
the other instruction, or is a descendant (in the scope hierarchy) of the
noalias scope associated with the other instruction, then the two memory
accesses are assumed not to alias.
Note that is the first element of the scope metadata is a string, then it can
be combined accross functions and translation units. The string can be replaced
by a self-reference to create globally unqiue scope identifiers.
[Note: This overview is slightly stylized, since the metadata nodes really need
to just be numbers (!0 instead of !scope0), and the scope lists are also global
unnamed metadata.]
Existing noalias metadata in a callee is "cloned" for use by the inlined code.
This is necessary because the aliasing scopes are unique to each call site
(because of possible control dependencies on the aliasing properties). For
example, consider a function: foo(noalias a, noalias b) { *a = *b; } that gets
inlined into bar() { ... if (...) foo(a1, b1); ... if (...) foo(a2, b2); } --
now just because we know that a1 does not alias with b1 at the first call site,
and a2 does not alias with b2 at the second call site, we cannot let inlining
these functons have the metadata imply that a1 does not alias with b2.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@213864 91177308-0d34-0410-b5e6-96231b3b80d8
2014-07-24 14:25:39 +00:00
|
|
|
case LLVMContext::MD_noalias:
|
|
|
|
MD = MDNode::intersect(MD, IMD);
|
|
|
|
break;
|
2013-11-23 00:48:34 +00:00
|
|
|
case LLVMContext::MD_fpmath:
|
|
|
|
MD = MDNode::getMostGenericFPMath(MD, IMD);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
I->setMetadata(Kind, MD);
|
|
|
|
}
|
|
|
|
return I;
|
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
/// \returns The type that all of the values in \p VL have or null if there
|
|
|
|
/// are different types.
|
|
|
|
static Type* getSameType(ArrayRef<Value *> VL) {
|
|
|
|
Type *Ty = VL[0]->getType();
|
|
|
|
for (int i = 1, e = VL.size(); i < e; i++)
|
2013-07-09 21:38:08 +00:00
|
|
|
if (VL[i]->getType() != Ty)
|
2014-04-25 05:29:35 +00:00
|
|
|
return nullptr;
|
2013-07-07 06:57:07 +00:00
|
|
|
|
|
|
|
return Ty;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \returns True if the ExtractElement instructions in VL can be vectorized
|
|
|
|
/// to use the original vector.
|
|
|
|
static bool CanReuseExtract(ArrayRef<Value *> VL) {
|
|
|
|
assert(Instruction::ExtractElement == getSameOpcode(VL) && "Invalid opcode");
|
|
|
|
// Check if all of the extracts come from the same vector and from the
|
|
|
|
// correct offset.
|
|
|
|
Value *VL0 = VL[0];
|
|
|
|
ExtractElementInst *E0 = cast<ExtractElementInst>(VL0);
|
|
|
|
Value *Vec = E0->getOperand(0);
|
|
|
|
|
|
|
|
// We have to extract from the same vector type.
|
|
|
|
unsigned NElts = Vec->getType()->getVectorNumElements();
|
|
|
|
|
|
|
|
if (NElts != VL.size())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Check that all of the indices extract from the correct offset.
|
|
|
|
ConstantInt *CI = dyn_cast<ConstantInt>(E0->getOperand(1));
|
|
|
|
if (!CI || CI->getZExtValue())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (unsigned i = 1, e = VL.size(); i < e; ++i) {
|
|
|
|
ExtractElementInst *E = cast<ExtractElementInst>(VL[i]);
|
|
|
|
ConstantInt *CI = dyn_cast<ConstantInt>(E->getOperand(1));
|
|
|
|
|
|
|
|
if (!CI || CI->getZExtValue() != i || E->getOperand(0) != Vec)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-09-02 21:00:39 +00:00
|
|
|
/// \returns True if in-tree use also needs extract. This refers to
|
|
|
|
/// possible scalar operand in vectorized instruction.
|
|
|
|
static bool InTreeUserNeedToExtract(Value *Scalar, Instruction *UserInst,
|
|
|
|
TargetLibraryInfo *TLI) {
|
|
|
|
|
|
|
|
unsigned Opcode = UserInst->getOpcode();
|
|
|
|
switch (Opcode) {
|
|
|
|
case Instruction::Load: {
|
|
|
|
LoadInst *LI = cast<LoadInst>(UserInst);
|
|
|
|
return (LI->getPointerOperand() == Scalar);
|
|
|
|
}
|
|
|
|
case Instruction::Store: {
|
|
|
|
StoreInst *SI = cast<StoreInst>(UserInst);
|
|
|
|
return (SI->getPointerOperand() == Scalar);
|
|
|
|
}
|
|
|
|
case Instruction::Call: {
|
|
|
|
CallInst *CI = cast<CallInst>(UserInst);
|
|
|
|
Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
|
|
|
|
if (hasVectorInstrinsicScalarOpd(ID, 1)) {
|
|
|
|
return (CI->getArgOperand(1) == Scalar);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-14 11:24:47 +00:00
|
|
|
/// \returns the AA location that is being access by the instruction.
|
|
|
|
static AliasAnalysis::Location getLocation(Instruction *I, AliasAnalysis *AA) {
|
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(I))
|
|
|
|
return AA->getLocation(SI);
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(I))
|
|
|
|
return AA->getLocation(LI);
|
|
|
|
return AliasAnalysis::Location();
|
|
|
|
}
|
|
|
|
|
2015-01-26 09:07:04 +00:00
|
|
|
/// \returns True if the instruction is not a volatile or atomic load/store.
|
|
|
|
static bool isSimple(Instruction *I) {
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(I))
|
|
|
|
return LI->isSimple();
|
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(I))
|
|
|
|
return SI->isSimple();
|
|
|
|
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
|
|
|
|
return !MI->isVolatile();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
/// Bottom Up SLP Vectorizer.
|
|
|
|
class BoUpSLP {
|
|
|
|
public:
|
2013-06-22 21:34:10 +00:00
|
|
|
typedef SmallVector<Value *, 8> ValueList;
|
|
|
|
typedef SmallVector<Instruction *, 16> InstrList;
|
|
|
|
typedef SmallPtrSet<Value *, 16> ValueSet;
|
|
|
|
typedef SmallVector<StoreInst *, 8> StoreList;
|
|
|
|
|
2015-03-10 02:37:25 +00:00
|
|
|
BoUpSLP(Function *Func, ScalarEvolution *Se, TargetTransformInfo *Tti,
|
|
|
|
TargetLibraryInfo *TLi, AliasAnalysis *Aa, LoopInfo *Li,
|
|
|
|
DominatorTree *Dt, AssumptionCache *AC)
|
2015-01-04 12:03:27 +00:00
|
|
|
: NumLoadsWantToKeepOrder(0), NumLoadsWantToChangeOrder(0), F(Func),
|
2015-03-10 02:37:25 +00:00
|
|
|
SE(Se), TTI(Tti), TLI(TLi), AA(Aa), LI(Li), DT(Dt),
|
2014-10-15 17:35:01 +00:00
|
|
|
Builder(Se->getContext()) {
|
2015-01-04 12:03:27 +00:00
|
|
|
CodeMetrics::collectEphemeralValues(F, AC, EphValues);
|
2014-10-15 17:35:01 +00:00
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
|
|
|
/// \brief Vectorize the tree that starts with the elements in \p VL.
|
2013-09-21 01:06:00 +00:00
|
|
|
/// Returns the vectorized root.
|
|
|
|
Value *vectorizeTree();
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2014-08-05 12:30:34 +00:00
|
|
|
/// \returns the cost incurred by unwanted spills and fills, caused by
|
|
|
|
/// holding live values over call sites.
|
|
|
|
int getSpillCost();
|
|
|
|
|
2013-06-22 21:34:10 +00:00
|
|
|
/// \returns the vectorization cost of the subtree that starts at \p VL.
|
|
|
|
/// A negative number means that this is profitable.
|
2013-07-07 06:57:07 +00:00
|
|
|
int getTreeCost();
|
|
|
|
|
2014-05-04 17:10:15 +00:00
|
|
|
/// Construct a vectorizable tree that starts at \p Roots, ignoring users for
|
|
|
|
/// the purpose of scheduling and extraction in the \p UserIgnoreLst.
|
|
|
|
void buildTree(ArrayRef<Value *> Roots,
|
|
|
|
ArrayRef<Value *> UserIgnoreLst = None);
|
2013-07-07 06:57:07 +00:00
|
|
|
|
|
|
|
/// Clear the internal data structures that are created by 'buildTree'.
|
|
|
|
void deleteTree() {
|
|
|
|
VectorizableTree.clear();
|
|
|
|
ScalarToTreeEntry.clear();
|
|
|
|
MustGather.clear();
|
2013-07-11 04:54:05 +00:00
|
|
|
ExternalUses.clear();
|
2014-08-01 08:05:55 +00:00
|
|
|
NumLoadsWantToKeepOrder = 0;
|
|
|
|
NumLoadsWantToChangeOrder = 0;
|
2014-08-01 09:20:42 +00:00
|
|
|
for (auto &Iter : BlocksSchedules) {
|
|
|
|
BlockScheduling *BS = Iter.second.get();
|
|
|
|
BS->clear();
|
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
/// \returns true if the memory operations A and B are consecutive.
|
2015-03-10 02:37:25 +00:00
|
|
|
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL);
|
2013-07-07 06:57:07 +00:00
|
|
|
|
|
|
|
/// \brief Perform LICM and CSE on the newly generated gather sequences.
|
|
|
|
void optimizeGatherSequence();
|
2014-06-20 04:32:48 +00:00
|
|
|
|
2014-08-01 08:05:55 +00:00
|
|
|
/// \returns true if it is benefitial to reverse the vector order.
|
|
|
|
bool shouldReorder() const {
|
|
|
|
return NumLoadsWantToChangeOrder > NumLoadsWantToKeepOrder;
|
2014-07-30 21:07:56 +00:00
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
private:
|
|
|
|
struct TreeEntry;
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
/// \returns the cost of the vectorizable entry.
|
|
|
|
int getEntryCost(TreeEntry *E);
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
/// This is the recursive part of buildTree.
|
|
|
|
void buildTree_rec(ArrayRef<Value *> Roots, unsigned Depth);
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-22 22:18:07 +00:00
|
|
|
/// Vectorize a single entry in the tree.
|
2013-07-07 06:57:07 +00:00
|
|
|
Value *vectorizeTree(TreeEntry *E);
|
|
|
|
|
2013-07-22 22:18:07 +00:00
|
|
|
/// Vectorize a single entry in the tree, starting in \p VL.
|
2013-07-07 06:57:07 +00:00
|
|
|
Value *vectorizeTree(ArrayRef<Value *> VL);
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-22 22:18:07 +00:00
|
|
|
/// \returns the pointer to the vectorized value if \p VL is already
|
|
|
|
/// vectorized, or NULL. They may happen in cycles.
|
2013-08-26 17:56:38 +00:00
|
|
|
Value *alreadyVectorized(ArrayRef<Value *> VL) const;
|
2013-07-22 22:18:07 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
/// \brief Take the pointer operand from the Load/Store instruction.
|
|
|
|
/// \returns NULL if this is not a valid Load/Store instruction.
|
|
|
|
static Value *getPointerOperand(Value *I);
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
/// \brief Take the address space operand from the Load/Store instruction.
|
|
|
|
/// \returns -1 if this is not a valid Load/Store instruction.
|
|
|
|
static unsigned getAddressSpaceOperand(Value *I);
|
2013-06-22 21:34:10 +00:00
|
|
|
|
|
|
|
/// \returns the scalarization cost for this type. Scalarization in this
|
|
|
|
/// context means the creation of vectors from a group of scalars.
|
|
|
|
int getGatherCost(Type *Ty);
|
|
|
|
|
2013-07-11 20:56:13 +00:00
|
|
|
/// \returns the scalarization cost for this list of values. Assuming that
|
|
|
|
/// this subtree gets vectorized, we may need to extract the values from the
|
|
|
|
/// roots. This method calculates the cost of extracting the values.
|
|
|
|
int getGatherCost(ArrayRef<Value *> VL);
|
|
|
|
|
2013-08-26 23:08:37 +00:00
|
|
|
/// \brief Set the Builder insert point to one after the last instruction in
|
|
|
|
/// the bundle
|
|
|
|
void setInsertPointAfterBundle(ArrayRef<Value *> VL);
|
|
|
|
|
2013-06-22 21:34:10 +00:00
|
|
|
/// \returns a vector from a collection of scalars in \p VL.
|
|
|
|
Value *Gather(ArrayRef<Value *> VL, VectorType *Ty);
|
|
|
|
|
2013-10-02 20:20:39 +00:00
|
|
|
/// \returns whether the VectorizableTree is fully vectoriable and will
|
|
|
|
/// be beneficial even the tree height is tiny.
|
2013-11-22 15:47:17 +00:00
|
|
|
bool isFullyVectorizableTinyTree();
|
2013-10-02 20:20:39 +00:00
|
|
|
|
2015-01-20 06:11:00 +00:00
|
|
|
/// \reorder commutative operands in alt shuffle if they result in
|
|
|
|
/// vectorized code.
|
|
|
|
void reorderAltShuffleOperands(ArrayRef<Value *> VL,
|
|
|
|
SmallVectorImpl<Value *> &Left,
|
|
|
|
SmallVectorImpl<Value *> &Right);
|
|
|
|
/// \reorder commutative operands to get better probability of
|
|
|
|
/// generating vectorized code.
|
|
|
|
void reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
|
|
|
|
SmallVectorImpl<Value *> &Left,
|
|
|
|
SmallVectorImpl<Value *> &Right);
|
2013-07-07 06:57:07 +00:00
|
|
|
struct TreeEntry {
|
2014-08-01 09:20:42 +00:00
|
|
|
TreeEntry() : Scalars(), VectorizedValue(nullptr),
|
2013-07-07 06:57:07 +00:00
|
|
|
NeedToGather(0) {}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
/// \returns true if the scalars in VL are equal to this entry.
|
2013-08-26 17:56:38 +00:00
|
|
|
bool isSame(ArrayRef<Value *> VL) const {
|
2013-07-07 06:57:07 +00:00
|
|
|
assert(VL.size() == Scalars.size() && "Invalid size");
|
2013-10-02 19:06:06 +00:00
|
|
|
return std::equal(VL.begin(), VL.end(), Scalars.begin());
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// A vector of scalars.
|
|
|
|
ValueList Scalars;
|
|
|
|
|
|
|
|
/// The Scalars are vectorized into this value. It is initialized to Null.
|
|
|
|
Value *VectorizedValue;
|
|
|
|
|
|
|
|
/// Do we need to gather this sequence ?
|
|
|
|
bool NeedToGather;
|
|
|
|
};
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
/// Create a new VectorizableTree entry.
|
|
|
|
TreeEntry *newTreeEntry(ArrayRef<Value *> VL, bool Vectorized) {
|
|
|
|
VectorizableTree.push_back(TreeEntry());
|
|
|
|
int idx = VectorizableTree.size() - 1;
|
|
|
|
TreeEntry *Last = &VectorizableTree[idx];
|
|
|
|
Last->Scalars.insert(Last->Scalars.begin(), VL.begin(), VL.end());
|
|
|
|
Last->NeedToGather = !Vectorized;
|
|
|
|
if (Vectorized) {
|
|
|
|
for (int i = 0, e = VL.size(); i != e; ++i) {
|
|
|
|
assert(!ScalarToTreeEntry.count(VL[i]) && "Scalar already in tree!");
|
|
|
|
ScalarToTreeEntry[VL[i]] = idx;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
MustGather.insert(VL.begin(), VL.end());
|
|
|
|
}
|
|
|
|
return Last;
|
2013-06-28 22:07:09 +00:00
|
|
|
}
|
2014-08-01 09:20:42 +00:00
|
|
|
|
2013-06-22 21:34:10 +00:00
|
|
|
/// -- Vectorization State --
|
2013-07-07 06:57:07 +00:00
|
|
|
/// Holds all of the tree entries.
|
|
|
|
std::vector<TreeEntry> VectorizableTree;
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
/// Maps a specific scalar to its tree entry.
|
|
|
|
SmallDenseMap<Value*, int> ScalarToTreeEntry;
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
/// A list of scalars that we found that we need to keep as scalars.
|
2013-06-22 21:34:10 +00:00
|
|
|
ValueSet MustGather;
|
|
|
|
|
2013-07-11 04:54:05 +00:00
|
|
|
/// This POD struct describes one external user in the vectorized tree.
|
|
|
|
struct ExternalUser {
|
|
|
|
ExternalUser (Value *S, llvm::User *U, int L) :
|
|
|
|
Scalar(S), User(U), Lane(L){};
|
|
|
|
// Which scalar in our function.
|
|
|
|
Value *Scalar;
|
|
|
|
// Which user that uses the scalar.
|
|
|
|
llvm::User *User;
|
|
|
|
// Which lane does the scalar belong to.
|
|
|
|
int Lane;
|
|
|
|
};
|
|
|
|
typedef SmallVector<ExternalUser, 16> UserList;
|
|
|
|
|
2015-01-14 11:24:47 +00:00
|
|
|
/// Checks if two instructions may access the same memory.
|
|
|
|
///
|
|
|
|
/// \p Loc1 is the location of \p Inst1. It is passed explicitly because it
|
|
|
|
/// is invariant in the calling loop.
|
|
|
|
bool isAliased(const AliasAnalysis::Location &Loc1, Instruction *Inst1,
|
|
|
|
Instruction *Inst2) {
|
|
|
|
|
|
|
|
// First check if the result is already in the cache.
|
|
|
|
AliasCacheKey key = std::make_pair(Inst1, Inst2);
|
|
|
|
Optional<bool> &result = AliasCache[key];
|
|
|
|
if (result.hasValue()) {
|
|
|
|
return result.getValue();
|
|
|
|
}
|
|
|
|
AliasAnalysis::Location Loc2 = getLocation(Inst2, AA);
|
|
|
|
bool aliased = true;
|
2015-01-26 09:07:04 +00:00
|
|
|
if (Loc1.Ptr && Loc2.Ptr && isSimple(Inst1) && isSimple(Inst2)) {
|
2015-01-14 11:24:47 +00:00
|
|
|
// Do the alias check.
|
|
|
|
aliased = AA->alias(Loc1, Loc2);
|
|
|
|
}
|
|
|
|
// Store the result in the cache.
|
|
|
|
result = aliased;
|
|
|
|
return aliased;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef std::pair<Instruction *, Instruction *> AliasCacheKey;
|
|
|
|
|
|
|
|
/// Cache for alias results.
|
|
|
|
/// TODO: consider moving this to the AliasAnalysis itself.
|
|
|
|
DenseMap<AliasCacheKey, Optional<bool>> AliasCache;
|
|
|
|
|
|
|
|
/// Removes an instruction from its block and eventually deletes it.
|
|
|
|
/// It's like Instruction::eraseFromParent() except that the actual deletion
|
|
|
|
/// is delayed until BoUpSLP is destructed.
|
|
|
|
/// This is required to ensure that there are no incorrect collisions in the
|
|
|
|
/// AliasCache, which can happen if a new instruction is allocated at the
|
|
|
|
/// same address as a previously deleted instruction.
|
|
|
|
void eraseInstruction(Instruction *I) {
|
|
|
|
I->removeFromParent();
|
|
|
|
I->dropAllReferences();
|
|
|
|
DeletedInstructions.push_back(std::unique_ptr<Instruction>(I));
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Temporary store for deleted instructions. Instructions will be deleted
|
|
|
|
/// eventually when the BoUpSLP is destructed.
|
|
|
|
SmallVector<std::unique_ptr<Instruction>, 8> DeletedInstructions;
|
|
|
|
|
2013-07-11 04:54:05 +00:00
|
|
|
/// A list of values that need to extracted out of the tree.
|
|
|
|
/// This list holds pairs of (Internal Scalar : External User).
|
|
|
|
UserList ExternalUses;
|
|
|
|
|
2014-10-15 17:35:01 +00:00
|
|
|
/// Values used only by @llvm.assume calls.
|
|
|
|
SmallPtrSet<const Value *, 32> EphValues;
|
|
|
|
|
2013-06-22 21:34:10 +00:00
|
|
|
/// Holds all of the instructions that we gathered.
|
|
|
|
SetVector<Instruction *> GatherSeq;
|
2013-11-26 22:24:25 +00:00
|
|
|
/// A list of blocks that we are going to CSE.
|
2013-12-05 18:28:01 +00:00
|
|
|
SetVector<BasicBlock *> CSEBlocks;
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2014-08-01 09:20:42 +00:00
|
|
|
/// Contains all scheduling relevant data for an instruction.
|
|
|
|
/// A ScheduleData either represents a single instruction or a member of an
|
|
|
|
/// instruction bundle (= a group of instructions which is combined into a
|
|
|
|
/// vector instruction).
|
|
|
|
struct ScheduleData {
|
|
|
|
|
|
|
|
// The initial value for the dependency counters. It means that the
|
|
|
|
// dependencies are not calculated yet.
|
|
|
|
enum { InvalidDeps = -1 };
|
|
|
|
|
|
|
|
ScheduleData()
|
|
|
|
: Inst(nullptr), FirstInBundle(nullptr), NextInBundle(nullptr),
|
|
|
|
NextLoadStore(nullptr), SchedulingRegionID(0), SchedulingPriority(0),
|
|
|
|
Dependencies(InvalidDeps), UnscheduledDeps(InvalidDeps),
|
|
|
|
UnscheduledDepsInBundle(InvalidDeps), IsScheduled(false) {}
|
|
|
|
|
|
|
|
void init(int BlockSchedulingRegionID) {
|
|
|
|
FirstInBundle = this;
|
|
|
|
NextInBundle = nullptr;
|
|
|
|
NextLoadStore = nullptr;
|
|
|
|
IsScheduled = false;
|
|
|
|
SchedulingRegionID = BlockSchedulingRegionID;
|
|
|
|
UnscheduledDepsInBundle = UnscheduledDeps;
|
|
|
|
clearDependencies();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true if the dependency information has been calculated.
|
|
|
|
bool hasValidDependencies() const { return Dependencies != InvalidDeps; }
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2014-08-01 09:20:42 +00:00
|
|
|
/// Returns true for single instructions and for bundle representatives
|
|
|
|
/// (= the head of a bundle).
|
|
|
|
bool isSchedulingEntity() const { return FirstInBundle == this; }
|
|
|
|
|
|
|
|
/// Returns true if it represents an instruction bundle and not only a
|
|
|
|
/// single instruction.
|
|
|
|
bool isPartOfBundle() const {
|
|
|
|
return NextInBundle != nullptr || FirstInBundle != this;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns true if it is ready for scheduling, i.e. it has no more
|
|
|
|
/// unscheduled depending instructions/bundles.
|
|
|
|
bool isReady() const {
|
|
|
|
assert(isSchedulingEntity() &&
|
|
|
|
"can't consider non-scheduling entity for ready list");
|
|
|
|
return UnscheduledDepsInBundle == 0 && !IsScheduled;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Modifies the number of unscheduled dependencies, also updating it for
|
|
|
|
/// the whole bundle.
|
|
|
|
int incrementUnscheduledDeps(int Incr) {
|
|
|
|
UnscheduledDeps += Incr;
|
|
|
|
return FirstInBundle->UnscheduledDepsInBundle += Incr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Sets the number of unscheduled dependencies to the number of
|
|
|
|
/// dependencies.
|
|
|
|
void resetUnscheduledDeps() {
|
|
|
|
incrementUnscheduledDeps(Dependencies - UnscheduledDeps);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Clears all dependency information.
|
|
|
|
void clearDependencies() {
|
|
|
|
Dependencies = InvalidDeps;
|
|
|
|
resetUnscheduledDeps();
|
|
|
|
MemoryDependencies.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
void dump(raw_ostream &os) const {
|
|
|
|
if (!isSchedulingEntity()) {
|
|
|
|
os << "/ " << *Inst;
|
|
|
|
} else if (NextInBundle) {
|
|
|
|
os << '[' << *Inst;
|
|
|
|
ScheduleData *SD = NextInBundle;
|
|
|
|
while (SD) {
|
|
|
|
os << ';' << *SD->Inst;
|
|
|
|
SD = SD->NextInBundle;
|
|
|
|
}
|
|
|
|
os << ']';
|
|
|
|
} else {
|
|
|
|
os << *Inst;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Instruction *Inst;
|
|
|
|
|
|
|
|
/// Points to the head in an instruction bundle (and always to this for
|
|
|
|
/// single instructions).
|
|
|
|
ScheduleData *FirstInBundle;
|
|
|
|
|
|
|
|
/// Single linked list of all instructions in a bundle. Null if it is a
|
|
|
|
/// single instruction.
|
|
|
|
ScheduleData *NextInBundle;
|
|
|
|
|
|
|
|
/// Single linked list of all memory instructions (e.g. load, store, call)
|
|
|
|
/// in the block - until the end of the scheduling region.
|
|
|
|
ScheduleData *NextLoadStore;
|
|
|
|
|
|
|
|
/// The dependent memory instructions.
|
|
|
|
/// This list is derived on demand in calculateDependencies().
|
|
|
|
SmallVector<ScheduleData *, 4> MemoryDependencies;
|
|
|
|
|
|
|
|
/// This ScheduleData is in the current scheduling region if this matches
|
|
|
|
/// the current SchedulingRegionID of BlockScheduling.
|
|
|
|
int SchedulingRegionID;
|
|
|
|
|
|
|
|
/// Used for getting a "good" final ordering of instructions.
|
|
|
|
int SchedulingPriority;
|
|
|
|
|
|
|
|
/// The number of dependencies. Constitutes of the number of users of the
|
|
|
|
/// instruction plus the number of dependent memory instructions (if any).
|
|
|
|
/// This value is calculated on demand.
|
|
|
|
/// If InvalidDeps, the number of dependencies is not calculated yet.
|
|
|
|
///
|
|
|
|
int Dependencies;
|
|
|
|
|
|
|
|
/// The number of dependencies minus the number of dependencies of scheduled
|
|
|
|
/// instructions. As soon as this is zero, the instruction/bundle gets ready
|
|
|
|
/// for scheduling.
|
|
|
|
/// Note that this is negative as long as Dependencies is not calculated.
|
|
|
|
int UnscheduledDeps;
|
|
|
|
|
|
|
|
/// The sum of UnscheduledDeps in a bundle. Equals to UnscheduledDeps for
|
|
|
|
/// single instructions.
|
|
|
|
int UnscheduledDepsInBundle;
|
|
|
|
|
|
|
|
/// True if this instruction is scheduled (or considered as scheduled in the
|
|
|
|
/// dry-run).
|
|
|
|
bool IsScheduled;
|
|
|
|
};
|
|
|
|
|
2014-08-01 09:47:38 +00:00
|
|
|
#ifndef NDEBUG
|
2014-08-01 09:20:42 +00:00
|
|
|
friend raw_ostream &operator<<(raw_ostream &os,
|
|
|
|
const BoUpSLP::ScheduleData &SD);
|
2014-08-01 09:47:38 +00:00
|
|
|
#endif
|
2014-08-01 09:20:42 +00:00
|
|
|
|
|
|
|
/// Contains all scheduling data for a basic block.
|
|
|
|
///
|
|
|
|
struct BlockScheduling {
|
|
|
|
|
|
|
|
BlockScheduling(BasicBlock *BB)
|
|
|
|
: BB(BB), ChunkSize(BB->size()), ChunkPos(ChunkSize),
|
|
|
|
ScheduleStart(nullptr), ScheduleEnd(nullptr),
|
|
|
|
FirstLoadStoreInRegion(nullptr), LastLoadStoreInRegion(nullptr),
|
|
|
|
// Make sure that the initial SchedulingRegionID is greater than the
|
|
|
|
// initial SchedulingRegionID in ScheduleData (which is 0).
|
|
|
|
SchedulingRegionID(1) {}
|
|
|
|
|
|
|
|
void clear() {
|
|
|
|
ReadyInsts.clear();
|
|
|
|
ScheduleStart = nullptr;
|
|
|
|
ScheduleEnd = nullptr;
|
|
|
|
FirstLoadStoreInRegion = nullptr;
|
|
|
|
LastLoadStoreInRegion = nullptr;
|
|
|
|
|
|
|
|
// Make a new scheduling region, i.e. all existing ScheduleData is not
|
|
|
|
// in the new region yet.
|
|
|
|
++SchedulingRegionID;
|
|
|
|
}
|
|
|
|
|
|
|
|
ScheduleData *getScheduleData(Value *V) {
|
|
|
|
ScheduleData *SD = ScheduleDataMap[V];
|
|
|
|
if (SD && SD->SchedulingRegionID == SchedulingRegionID)
|
|
|
|
return SD;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool isInSchedulingRegion(ScheduleData *SD) {
|
|
|
|
return SD->SchedulingRegionID == SchedulingRegionID;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Marks an instruction as scheduled and puts all dependent ready
|
|
|
|
/// instructions into the ready-list.
|
|
|
|
template <typename ReadyListType>
|
|
|
|
void schedule(ScheduleData *SD, ReadyListType &ReadyList) {
|
|
|
|
SD->IsScheduled = true;
|
|
|
|
DEBUG(dbgs() << "SLP: schedule " << *SD << "\n");
|
|
|
|
|
|
|
|
ScheduleData *BundleMember = SD;
|
|
|
|
while (BundleMember) {
|
|
|
|
// Handle the def-use chain dependencies.
|
|
|
|
for (Use &U : BundleMember->Inst->operands()) {
|
|
|
|
ScheduleData *OpDef = getScheduleData(U.get());
|
|
|
|
if (OpDef && OpDef->hasValidDependencies() &&
|
|
|
|
OpDef->incrementUnscheduledDeps(-1) == 0) {
|
|
|
|
// There are no more unscheduled dependencies after decrementing,
|
|
|
|
// so we can put the dependent instruction into the ready list.
|
|
|
|
ScheduleData *DepBundle = OpDef->FirstInBundle;
|
|
|
|
assert(!DepBundle->IsScheduled &&
|
|
|
|
"already scheduled bundle gets ready");
|
|
|
|
ReadyList.insert(DepBundle);
|
|
|
|
DEBUG(dbgs() << "SLP: gets ready (def): " << *DepBundle << "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Handle the memory dependencies.
|
|
|
|
for (ScheduleData *MemoryDepSD : BundleMember->MemoryDependencies) {
|
|
|
|
if (MemoryDepSD->incrementUnscheduledDeps(-1) == 0) {
|
|
|
|
// There are no more unscheduled dependencies after decrementing,
|
|
|
|
// so we can put the dependent instruction into the ready list.
|
|
|
|
ScheduleData *DepBundle = MemoryDepSD->FirstInBundle;
|
|
|
|
assert(!DepBundle->IsScheduled &&
|
|
|
|
"already scheduled bundle gets ready");
|
|
|
|
ReadyList.insert(DepBundle);
|
|
|
|
DEBUG(dbgs() << "SLP: gets ready (mem): " << *DepBundle << "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BundleMember = BundleMember->NextInBundle;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Put all instructions into the ReadyList which are ready for scheduling.
|
|
|
|
template <typename ReadyListType>
|
|
|
|
void initialFillReadyList(ReadyListType &ReadyList) {
|
|
|
|
for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
|
|
|
|
ScheduleData *SD = getScheduleData(I);
|
|
|
|
if (SD->isSchedulingEntity() && SD->isReady()) {
|
|
|
|
ReadyList.insert(SD);
|
|
|
|
DEBUG(dbgs() << "SLP: initially in ready list: " << *I << "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Checks if a bundle of instructions can be scheduled, i.e. has no
|
|
|
|
/// cyclic dependencies. This is only a dry-run, no instructions are
|
|
|
|
/// actually moved at this stage.
|
2015-01-14 11:24:47 +00:00
|
|
|
bool tryScheduleBundle(ArrayRef<Value *> VL, BoUpSLP *SLP);
|
2014-08-01 09:20:42 +00:00
|
|
|
|
|
|
|
/// Un-bundles a group of instructions.
|
|
|
|
void cancelScheduling(ArrayRef<Value *> VL);
|
|
|
|
|
|
|
|
/// Extends the scheduling region so that V is inside the region.
|
|
|
|
void extendSchedulingRegion(Value *V);
|
|
|
|
|
|
|
|
/// Initialize the ScheduleData structures for new instructions in the
|
|
|
|
/// scheduling region.
|
|
|
|
void initScheduleData(Instruction *FromI, Instruction *ToI,
|
|
|
|
ScheduleData *PrevLoadStore,
|
|
|
|
ScheduleData *NextLoadStore);
|
|
|
|
|
|
|
|
/// Updates the dependency information of a bundle and of all instructions/
|
|
|
|
/// bundles which depend on the original bundle.
|
|
|
|
void calculateDependencies(ScheduleData *SD, bool InsertInReadyList,
|
2015-01-14 11:24:47 +00:00
|
|
|
BoUpSLP *SLP);
|
2014-08-01 09:20:42 +00:00
|
|
|
|
|
|
|
/// Sets all instruction in the scheduling region to un-scheduled.
|
|
|
|
void resetSchedule();
|
|
|
|
|
|
|
|
BasicBlock *BB;
|
|
|
|
|
|
|
|
/// Simple memory allocation for ScheduleData.
|
|
|
|
std::vector<std::unique_ptr<ScheduleData[]>> ScheduleDataChunks;
|
|
|
|
|
|
|
|
/// The size of a ScheduleData array in ScheduleDataChunks.
|
|
|
|
int ChunkSize;
|
|
|
|
|
|
|
|
/// The allocator position in the current chunk, which is the last entry
|
|
|
|
/// of ScheduleDataChunks.
|
|
|
|
int ChunkPos;
|
|
|
|
|
|
|
|
/// Attaches ScheduleData to Instruction.
|
|
|
|
/// Note that the mapping survives during all vectorization iterations, i.e.
|
|
|
|
/// ScheduleData structures are recycled.
|
|
|
|
DenseMap<Value *, ScheduleData *> ScheduleDataMap;
|
|
|
|
|
|
|
|
struct ReadyList : SmallVector<ScheduleData *, 8> {
|
|
|
|
void insert(ScheduleData *SD) { push_back(SD); }
|
|
|
|
};
|
|
|
|
|
|
|
|
/// The ready-list for scheduling (only used for the dry-run).
|
|
|
|
ReadyList ReadyInsts;
|
|
|
|
|
|
|
|
/// The first instruction of the scheduling region.
|
|
|
|
Instruction *ScheduleStart;
|
|
|
|
|
|
|
|
/// The first instruction _after_ the scheduling region.
|
|
|
|
Instruction *ScheduleEnd;
|
|
|
|
|
|
|
|
/// The first memory accessing instruction in the scheduling region
|
|
|
|
/// (can be null).
|
|
|
|
ScheduleData *FirstLoadStoreInRegion;
|
|
|
|
|
|
|
|
/// The last memory accessing instruction in the scheduling region
|
|
|
|
/// (can be null).
|
|
|
|
ScheduleData *LastLoadStoreInRegion;
|
|
|
|
|
|
|
|
/// The ID of the scheduling region. For a new vectorization iteration this
|
|
|
|
/// is incremented which "removes" all ScheduleData from the region.
|
|
|
|
int SchedulingRegionID;
|
|
|
|
};
|
|
|
|
|
|
|
|
/// Attaches the BlockScheduling structures to basic blocks.
|
2015-01-13 19:45:52 +00:00
|
|
|
MapVector<BasicBlock *, std::unique_ptr<BlockScheduling>> BlocksSchedules;
|
2014-08-01 09:20:42 +00:00
|
|
|
|
|
|
|
/// Performs the "real" scheduling. Done before vectorization is actually
|
|
|
|
/// performed in a basic block.
|
2014-08-02 19:39:42 +00:00
|
|
|
void scheduleBlock(BlockScheduling *BS);
|
2014-05-03 15:50:37 +00:00
|
|
|
|
2014-05-04 17:10:15 +00:00
|
|
|
/// List of users to ignore during scheduling and that don't need extracting.
|
|
|
|
ArrayRef<Value *> UserIgnoreList;
|
2013-09-21 01:06:00 +00:00
|
|
|
|
2014-08-01 08:05:55 +00:00
|
|
|
// Number of load-bundles, which contain consecutive loads.
|
|
|
|
int NumLoadsWantToKeepOrder;
|
|
|
|
|
|
|
|
// Number of load-bundles of size 2, which are consecutive loads if reversed.
|
|
|
|
int NumLoadsWantToChangeOrder;
|
|
|
|
|
2013-06-22 21:34:10 +00:00
|
|
|
// Analysis and block reference.
|
|
|
|
Function *F;
|
|
|
|
ScalarEvolution *SE;
|
|
|
|
TargetTransformInfo *TTI;
|
2014-05-03 09:59:54 +00:00
|
|
|
TargetLibraryInfo *TLI;
|
2013-06-22 21:34:10 +00:00
|
|
|
AliasAnalysis *AA;
|
|
|
|
LoopInfo *LI;
|
2013-06-23 21:57:27 +00:00
|
|
|
DominatorTree *DT;
|
2013-06-22 21:34:10 +00:00
|
|
|
/// Instruction builder to construct the vectorized tree.
|
|
|
|
IRBuilder<> Builder;
|
|
|
|
};
|
2014-08-01 09:47:38 +00:00
|
|
|
|
|
|
|
#ifndef NDEBUG
|
2014-08-01 09:20:42 +00:00
|
|
|
raw_ostream &operator<<(raw_ostream &os, const BoUpSLP::ScheduleData &SD) {
|
|
|
|
SD.dump(os);
|
|
|
|
return os;
|
|
|
|
}
|
2014-08-01 09:47:38 +00:00
|
|
|
#endif
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2014-05-04 17:10:15 +00:00
|
|
|
void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
|
|
|
|
ArrayRef<Value *> UserIgnoreLst) {
|
2013-07-07 06:57:07 +00:00
|
|
|
deleteTree();
|
2014-05-04 17:10:15 +00:00
|
|
|
UserIgnoreList = UserIgnoreLst;
|
2013-07-09 21:38:08 +00:00
|
|
|
if (!getSameType(Roots))
|
|
|
|
return;
|
2013-07-07 06:57:07 +00:00
|
|
|
buildTree_rec(Roots, 0);
|
2013-07-11 04:54:05 +00:00
|
|
|
|
|
|
|
// Collect the values that we need to extract from the tree.
|
|
|
|
for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) {
|
|
|
|
TreeEntry *Entry = &VectorizableTree[EIdx];
|
|
|
|
|
|
|
|
// For each lane:
|
|
|
|
for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
|
|
|
|
Value *Scalar = Entry->Scalars[Lane];
|
|
|
|
|
|
|
|
// No need to handle users of gathered values.
|
|
|
|
if (Entry->NeedToGather)
|
|
|
|
continue;
|
|
|
|
|
2014-03-09 03:16:01 +00:00
|
|
|
for (User *U : Scalar->users()) {
|
|
|
|
DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
|
2013-07-11 04:54:05 +00:00
|
|
|
|
2014-03-09 03:16:01 +00:00
|
|
|
Instruction *UserInst = dyn_cast<Instruction>(U);
|
2013-09-21 01:06:00 +00:00
|
|
|
if (!UserInst)
|
|
|
|
continue;
|
2013-07-11 04:54:05 +00:00
|
|
|
|
2014-09-02 21:00:39 +00:00
|
|
|
// Skip in-tree scalars that become vectors
|
|
|
|
if (ScalarToTreeEntry.count(U)) {
|
|
|
|
int Idx = ScalarToTreeEntry[U];
|
|
|
|
TreeEntry *UseEntry = &VectorizableTree[Idx];
|
|
|
|
Value *UseScalar = UseEntry->Scalars[0];
|
|
|
|
// Some in-tree scalars will remain as scalar in vectorized
|
|
|
|
// instructions. If that is the case, the one in Lane 0 will
|
|
|
|
// be used.
|
|
|
|
if (UseScalar != U ||
|
|
|
|
!InTreeUserNeedToExtract(Scalar, UserInst, TLI)) {
|
|
|
|
DEBUG(dbgs() << "SLP: \tInternal user will be removed:" << *U
|
|
|
|
<< ".\n");
|
|
|
|
assert(!VectorizableTree[Idx].NeedToGather && "Bad state");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-04 17:10:15 +00:00
|
|
|
// Ignore users in the user ignore list.
|
|
|
|
if (std::find(UserIgnoreList.begin(), UserIgnoreList.end(), UserInst) !=
|
|
|
|
UserIgnoreList.end())
|
2013-07-11 04:54:05 +00:00
|
|
|
continue;
|
|
|
|
|
2014-03-09 03:16:01 +00:00
|
|
|
DEBUG(dbgs() << "SLP: Need to extract:" << *U << " from lane " <<
|
2013-07-11 04:54:05 +00:00
|
|
|
Lane << " from " << *Scalar << ".\n");
|
2014-03-09 03:16:01 +00:00
|
|
|
ExternalUses.push_back(ExternalUser(Scalar, U, Lane));
|
2013-07-11 04:54:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
|
|
|
|
bool SameTy = getSameType(VL); (void)SameTy;
|
2014-06-20 04:32:48 +00:00
|
|
|
bool isAltShuffle = false;
|
2013-07-07 06:57:07 +00:00
|
|
|
assert(SameTy && "Invalid types!");
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
if (Depth == RecursionMaxDepth) {
|
|
|
|
DEBUG(dbgs() << "SLP: Gathering due to max recursion depth.\n");
|
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
// Don't handle vectors.
|
|
|
|
if (VL[0]->getType()->isVectorTy()) {
|
|
|
|
DEBUG(dbgs() << "SLP: Gathering due to vector type.\n");
|
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
|
|
|
|
if (SI->getValueOperand()->getType()->isVectorTy()) {
|
|
|
|
DEBUG(dbgs() << "SLP: Gathering due to store vector type.\n");
|
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
|
|
|
}
|
2014-06-20 04:32:48 +00:00
|
|
|
unsigned Opcode = getSameOpcode(VL);
|
|
|
|
|
|
|
|
// Check that this shuffle vector refers to the alternate
|
|
|
|
// sequence of opcodes.
|
|
|
|
if (Opcode == Instruction::ShuffleVector) {
|
|
|
|
Instruction *I0 = dyn_cast<Instruction>(VL[0]);
|
|
|
|
unsigned Op = I0->getOpcode();
|
|
|
|
if (Op != Instruction::ShuffleVector)
|
|
|
|
isAltShuffle = true;
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
// If all of the operands are identical or constant we have a simple solution.
|
2014-06-20 04:32:48 +00:00
|
|
|
if (allConstant(VL) || isSplat(VL) || !getSameBlock(VL) || !Opcode) {
|
2013-07-07 06:57:07 +00:00
|
|
|
DEBUG(dbgs() << "SLP: Gathering due to C,S,B,O. \n");
|
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
// We now know that this is a vector of instructions of the same type from
|
|
|
|
// the same block.
|
|
|
|
|
2014-10-15 17:35:01 +00:00
|
|
|
// Don't vectorize ephemeral values.
|
|
|
|
for (unsigned i = 0, e = VL.size(); i != e; ++i) {
|
|
|
|
if (EphValues.count(VL[i])) {
|
|
|
|
DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
|
|
|
|
") is ephemeral.\n");
|
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
// Check if this is a duplicate of another entry.
|
|
|
|
if (ScalarToTreeEntry.count(VL[0])) {
|
|
|
|
int Idx = ScalarToTreeEntry[VL[0]];
|
|
|
|
TreeEntry *E = &VectorizableTree[Idx];
|
|
|
|
for (unsigned i = 0, e = VL.size(); i != e; ++i) {
|
|
|
|
DEBUG(dbgs() << "SLP: \tChecking bundle: " << *VL[i] << ".\n");
|
|
|
|
if (E->Scalars[i] != VL[i]) {
|
|
|
|
DEBUG(dbgs() << "SLP: Gathering due to partial overlap.\n");
|
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
DEBUG(dbgs() << "SLP: Perfect diamond merge at " << *VL[0] << ".\n");
|
|
|
|
return;
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
// Check that none of the instructions in the bundle are already in the tree.
|
|
|
|
for (unsigned i = 0, e = VL.size(); i != e; ++i) {
|
|
|
|
if (ScalarToTreeEntry.count(VL[i])) {
|
|
|
|
DEBUG(dbgs() << "SLP: The instruction (" << *VL[i] <<
|
|
|
|
") is already in tree.\n");
|
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2015-01-09 22:15:06 +00:00
|
|
|
// If any of the scalars is marked as a value that needs to stay scalar then
|
|
|
|
// we need to gather the scalars.
|
2013-07-07 06:57:07 +00:00
|
|
|
for (unsigned i = 0, e = VL.size(); i != e; ++i) {
|
2015-01-09 20:36:19 +00:00
|
|
|
if (MustGather.count(VL[i])) {
|
|
|
|
DEBUG(dbgs() << "SLP: Gathering due to gathered scalar.\n");
|
2013-07-07 06:57:07 +00:00
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
// Check that all of the users of the scalars that we want to vectorize are
|
|
|
|
// schedulable.
|
|
|
|
Instruction *VL0 = cast<Instruction>(VL[0]);
|
|
|
|
BasicBlock *BB = cast<Instruction>(VL0)->getParent();
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2014-08-22 01:18:39 +00:00
|
|
|
if (!DT->isReachableFromEntry(BB)) {
|
|
|
|
// Don't go into unreachable blocks. They may contain instructions with
|
|
|
|
// dependency cycles which confuse the final scheduling.
|
|
|
|
DEBUG(dbgs() << "SLP: bundle in unreachable block.\n");
|
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
// Check that every instructions appears once in this bundle.
|
2013-06-22 21:34:10 +00:00
|
|
|
for (unsigned i = 0, e = VL.size(); i < e; ++i)
|
2013-07-07 06:57:07 +00:00
|
|
|
for (unsigned j = i+1; j < e; ++j)
|
|
|
|
if (VL[i] == VL[j]) {
|
|
|
|
DEBUG(dbgs() << "SLP: Scalar used twice in bundle.\n");
|
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2014-08-01 09:20:42 +00:00
|
|
|
auto &BSRef = BlocksSchedules[BB];
|
|
|
|
if (!BSRef) {
|
|
|
|
BSRef = llvm::make_unique<BlockScheduling>(BB);
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2014-08-01 09:20:42 +00:00
|
|
|
BlockScheduling &BS = *BSRef.get();
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2015-01-14 11:24:47 +00:00
|
|
|
if (!BS.tryScheduleBundle(VL, this)) {
|
2014-08-01 09:20:42 +00:00
|
|
|
DEBUG(dbgs() << "SLP: We are not able to schedule this bundle!\n");
|
|
|
|
BS.cancelScheduling(VL);
|
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2014-08-01 09:20:42 +00:00
|
|
|
DEBUG(dbgs() << "SLP: We are able to schedule this bundle.\n");
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
switch (Opcode) {
|
|
|
|
case Instruction::PHI: {
|
|
|
|
PHINode *PH = dyn_cast<PHINode>(VL0);
|
2013-09-17 17:03:29 +00:00
|
|
|
|
|
|
|
// Check for terminator values (e.g. invoke).
|
|
|
|
for (unsigned j = 0; j < VL.size(); ++j)
|
|
|
|
for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
|
2014-02-17 03:06:16 +00:00
|
|
|
TerminatorInst *Term = dyn_cast<TerminatorInst>(
|
|
|
|
cast<PHINode>(VL[j])->getIncomingValueForBlock(PH->getIncomingBlock(i)));
|
2013-09-17 17:03:29 +00:00
|
|
|
if (Term) {
|
|
|
|
DEBUG(dbgs() << "SLP: Need to swizzle PHINodes (TerminatorInst use).\n");
|
2014-08-01 09:20:42 +00:00
|
|
|
BS.cancelScheduling(VL);
|
2013-09-17 17:03:29 +00:00
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
newTreeEntry(VL, true);
|
|
|
|
DEBUG(dbgs() << "SLP: added a vector of PHINodes.\n");
|
|
|
|
|
|
|
|
for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
|
|
|
|
ValueList Operands;
|
|
|
|
// Prepare the operand vector.
|
|
|
|
for (unsigned j = 0; j < VL.size(); ++j)
|
2014-02-17 03:06:16 +00:00
|
|
|
Operands.push_back(cast<PHINode>(VL[j])->getIncomingValueForBlock(
|
|
|
|
PH->getIncomingBlock(i)));
|
2013-07-07 06:57:07 +00:00
|
|
|
|
|
|
|
buildTree_rec(Operands, Depth + 1);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
case Instruction::ExtractElement: {
|
|
|
|
bool Reuse = CanReuseExtract(VL);
|
|
|
|
if (Reuse) {
|
|
|
|
DEBUG(dbgs() << "SLP: Reusing extract sequence.\n");
|
2014-08-01 09:20:42 +00:00
|
|
|
} else {
|
|
|
|
BS.cancelScheduling(VL);
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
|
|
|
newTreeEntry(VL, Reuse);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
case Instruction::Load: {
|
|
|
|
// Check if the loads are consecutive or of we need to swizzle them.
|
2013-10-16 17:52:40 +00:00
|
|
|
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i) {
|
|
|
|
LoadInst *L = cast<LoadInst>(VL[i]);
|
2014-08-01 08:05:55 +00:00
|
|
|
if (!L->isSimple()) {
|
2014-08-01 09:20:42 +00:00
|
|
|
BS.cancelScheduling(VL);
|
2013-07-07 06:57:07 +00:00
|
|
|
newTreeEntry(VL, false);
|
2014-08-01 08:05:55 +00:00
|
|
|
DEBUG(dbgs() << "SLP: Gathering non-simple loads.\n");
|
|
|
|
return;
|
|
|
|
}
|
2015-03-10 02:37:25 +00:00
|
|
|
const DataLayout &DL = F->getParent()->getDataLayout();
|
|
|
|
if (!isConsecutiveAccess(VL[i], VL[i + 1], DL)) {
|
|
|
|
if (VL.size() == 2 && isConsecutiveAccess(VL[1], VL[0], DL)) {
|
2014-08-01 08:05:55 +00:00
|
|
|
++NumLoadsWantToChangeOrder;
|
|
|
|
}
|
2014-08-01 09:20:42 +00:00
|
|
|
BS.cancelScheduling(VL);
|
2014-08-01 08:05:55 +00:00
|
|
|
newTreeEntry(VL, false);
|
|
|
|
DEBUG(dbgs() << "SLP: Gathering non-consecutive loads.\n");
|
2013-07-07 06:57:07 +00:00
|
|
|
return;
|
|
|
|
}
|
2013-10-16 17:52:40 +00:00
|
|
|
}
|
2014-08-01 08:05:55 +00:00
|
|
|
++NumLoadsWantToKeepOrder;
|
2013-07-07 06:57:07 +00:00
|
|
|
newTreeEntry(VL, true);
|
|
|
|
DEBUG(dbgs() << "SLP: added a vector of loads.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
case Instruction::ZExt:
|
|
|
|
case Instruction::SExt:
|
|
|
|
case Instruction::FPToUI:
|
|
|
|
case Instruction::FPToSI:
|
|
|
|
case Instruction::FPExt:
|
|
|
|
case Instruction::PtrToInt:
|
|
|
|
case Instruction::IntToPtr:
|
|
|
|
case Instruction::SIToFP:
|
|
|
|
case Instruction::UIToFP:
|
|
|
|
case Instruction::Trunc:
|
|
|
|
case Instruction::FPTrunc:
|
|
|
|
case Instruction::BitCast: {
|
|
|
|
Type *SrcTy = VL0->getOperand(0)->getType();
|
|
|
|
for (unsigned i = 0; i < VL.size(); ++i) {
|
|
|
|
Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType();
|
2015-02-12 02:30:56 +00:00
|
|
|
if (Ty != SrcTy || !isValidElementType(Ty)) {
|
2014-08-01 09:20:42 +00:00
|
|
|
BS.cancelScheduling(VL);
|
2013-07-07 06:57:07 +00:00
|
|
|
newTreeEntry(VL, false);
|
|
|
|
DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
newTreeEntry(VL, true);
|
|
|
|
DEBUG(dbgs() << "SLP: added a vector of casts.\n");
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
|
|
|
|
ValueList Operands;
|
|
|
|
// Prepare the operand vector.
|
|
|
|
for (unsigned j = 0; j < VL.size(); ++j)
|
|
|
|
Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
buildTree_rec(Operands, Depth+1);
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
return;
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
|
|
|
case Instruction::ICmp:
|
|
|
|
case Instruction::FCmp: {
|
|
|
|
// Check that all of the compares have the same predicate.
|
|
|
|
CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate();
|
2013-07-15 22:52:48 +00:00
|
|
|
Type *ComparedTy = cast<Instruction>(VL[0])->getOperand(0)->getType();
|
2013-07-07 06:57:07 +00:00
|
|
|
for (unsigned i = 1, e = VL.size(); i < e; ++i) {
|
|
|
|
CmpInst *Cmp = cast<CmpInst>(VL[i]);
|
2013-07-15 22:52:48 +00:00
|
|
|
if (Cmp->getPredicate() != P0 ||
|
|
|
|
Cmp->getOperand(0)->getType() != ComparedTy) {
|
2014-08-01 09:20:42 +00:00
|
|
|
BS.cancelScheduling(VL);
|
2013-07-07 06:57:07 +00:00
|
|
|
newTreeEntry(VL, false);
|
|
|
|
DEBUG(dbgs() << "SLP: Gathering cmp with different predicate.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
newTreeEntry(VL, true);
|
|
|
|
DEBUG(dbgs() << "SLP: added a vector of compares.\n");
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
|
|
|
|
ValueList Operands;
|
|
|
|
// Prepare the operand vector.
|
|
|
|
for (unsigned j = 0; j < VL.size(); ++j)
|
|
|
|
Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
buildTree_rec(Operands, Depth+1);
|
2013-06-25 23:04:09 +00:00
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
return;
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
case Instruction::Select:
|
|
|
|
case Instruction::Add:
|
|
|
|
case Instruction::FAdd:
|
|
|
|
case Instruction::Sub:
|
|
|
|
case Instruction::FSub:
|
|
|
|
case Instruction::Mul:
|
|
|
|
case Instruction::FMul:
|
|
|
|
case Instruction::UDiv:
|
|
|
|
case Instruction::SDiv:
|
|
|
|
case Instruction::FDiv:
|
|
|
|
case Instruction::URem:
|
|
|
|
case Instruction::SRem:
|
|
|
|
case Instruction::FRem:
|
|
|
|
case Instruction::Shl:
|
|
|
|
case Instruction::LShr:
|
|
|
|
case Instruction::AShr:
|
|
|
|
case Instruction::And:
|
|
|
|
case Instruction::Or:
|
|
|
|
case Instruction::Xor: {
|
|
|
|
newTreeEntry(VL, true);
|
|
|
|
DEBUG(dbgs() << "SLP: added a vector of bin op.\n");
|
|
|
|
|
2013-10-04 20:39:16 +00:00
|
|
|
// Sort operands of the instructions so that each side is more likely to
|
|
|
|
// have the same opcode.
|
|
|
|
if (isa<BinaryOperator>(VL0) && VL0->isCommutative()) {
|
|
|
|
ValueList Left, Right;
|
|
|
|
reorderInputsAccordingToOpcode(VL, Left, Right);
|
2014-08-01 09:20:42 +00:00
|
|
|
buildTree_rec(Left, Depth + 1);
|
|
|
|
buildTree_rec(Right, Depth + 1);
|
2013-10-04 20:39:16 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
|
|
|
|
ValueList Operands;
|
|
|
|
// Prepare the operand vector.
|
|
|
|
for (unsigned j = 0; j < VL.size(); ++j)
|
|
|
|
Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
|
|
|
|
|
|
|
|
buildTree_rec(Operands, Depth+1);
|
|
|
|
}
|
|
|
|
return;
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2014-08-27 15:01:18 +00:00
|
|
|
case Instruction::GetElementPtr: {
|
|
|
|
// We don't combine GEPs with complicated (nested) indexing.
|
|
|
|
for (unsigned j = 0; j < VL.size(); ++j) {
|
|
|
|
if (cast<Instruction>(VL[j])->getNumOperands() != 2) {
|
|
|
|
DEBUG(dbgs() << "SLP: not-vectorizable GEP (nested indexes).\n");
|
|
|
|
BS.cancelScheduling(VL);
|
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We can't combine several GEPs into one vector if they operate on
|
|
|
|
// different types.
|
|
|
|
Type *Ty0 = cast<Instruction>(VL0)->getOperand(0)->getType();
|
|
|
|
for (unsigned j = 0; j < VL.size(); ++j) {
|
|
|
|
Type *CurTy = cast<Instruction>(VL[j])->getOperand(0)->getType();
|
|
|
|
if (Ty0 != CurTy) {
|
|
|
|
DEBUG(dbgs() << "SLP: not-vectorizable GEP (different types).\n");
|
|
|
|
BS.cancelScheduling(VL);
|
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We don't combine GEPs with non-constant indexes.
|
|
|
|
for (unsigned j = 0; j < VL.size(); ++j) {
|
|
|
|
auto Op = cast<Instruction>(VL[j])->getOperand(1);
|
|
|
|
if (!isa<ConstantInt>(Op)) {
|
|
|
|
DEBUG(
|
|
|
|
dbgs() << "SLP: not-vectorizable GEP (non-constant indexes).\n");
|
|
|
|
BS.cancelScheduling(VL);
|
|
|
|
newTreeEntry(VL, false);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
newTreeEntry(VL, true);
|
|
|
|
DEBUG(dbgs() << "SLP: added a vector of GEPs.\n");
|
|
|
|
for (unsigned i = 0, e = 2; i < e; ++i) {
|
|
|
|
ValueList Operands;
|
|
|
|
// Prepare the operand vector.
|
|
|
|
for (unsigned j = 0; j < VL.size(); ++j)
|
|
|
|
Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
|
|
|
|
|
|
|
|
buildTree_rec(Operands, Depth + 1);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
case Instruction::Store: {
|
2015-03-10 02:37:25 +00:00
|
|
|
const DataLayout &DL = F->getParent()->getDataLayout();
|
2013-07-07 06:57:07 +00:00
|
|
|
// Check if the stores are consecutive or of we need to swizzle them.
|
|
|
|
for (unsigned i = 0, e = VL.size() - 1; i < e; ++i)
|
2015-03-10 02:37:25 +00:00
|
|
|
if (!isConsecutiveAccess(VL[i], VL[i + 1], DL)) {
|
2014-08-01 09:20:42 +00:00
|
|
|
BS.cancelScheduling(VL);
|
2013-07-07 06:57:07 +00:00
|
|
|
newTreeEntry(VL, false);
|
2013-12-05 05:44:44 +00:00
|
|
|
DEBUG(dbgs() << "SLP: Non-consecutive store.\n");
|
2013-07-07 06:57:07 +00:00
|
|
|
return;
|
|
|
|
}
|
2013-06-25 23:04:09 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
newTreeEntry(VL, true);
|
|
|
|
DEBUG(dbgs() << "SLP: added a vector of stores.\n");
|
2013-06-25 23:04:09 +00:00
|
|
|
|
|
|
|
ValueList Operands;
|
|
|
|
for (unsigned j = 0; j < VL.size(); ++j)
|
2013-07-07 06:57:07 +00:00
|
|
|
Operands.push_back(cast<Instruction>(VL[j])->getOperand(0));
|
2013-06-25 23:04:09 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
buildTree_rec(Operands, Depth + 1);
|
2013-06-22 21:34:10 +00:00
|
|
|
return;
|
|
|
|
}
|
2014-03-12 20:21:50 +00:00
|
|
|
case Instruction::Call: {
|
|
|
|
// Check if the calls are all to the same vectorizable intrinsic.
|
2014-05-03 09:59:54 +00:00
|
|
|
CallInst *CI = cast<CallInst>(VL[0]);
|
|
|
|
// Check if this is an Intrinsic call or something that can be
|
|
|
|
// represented by an intrinsic call
|
|
|
|
Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
|
2014-04-09 14:20:47 +00:00
|
|
|
if (!isTriviallyVectorizable(ID)) {
|
2014-08-01 09:20:42 +00:00
|
|
|
BS.cancelScheduling(VL);
|
2014-03-12 20:21:50 +00:00
|
|
|
newTreeEntry(VL, false);
|
|
|
|
DEBUG(dbgs() << "SLP: Non-vectorizable call.\n");
|
|
|
|
return;
|
|
|
|
}
|
2014-05-03 09:59:54 +00:00
|
|
|
Function *Int = CI->getCalledFunction();
|
2014-05-30 04:31:24 +00:00
|
|
|
Value *A1I = nullptr;
|
|
|
|
if (hasVectorInstrinsicScalarOpd(ID, 1))
|
|
|
|
A1I = CI->getArgOperand(1);
|
2014-03-12 20:21:50 +00:00
|
|
|
for (unsigned i = 1, e = VL.size(); i != e; ++i) {
|
2014-05-03 09:59:54 +00:00
|
|
|
CallInst *CI2 = dyn_cast<CallInst>(VL[i]);
|
|
|
|
if (!CI2 || CI2->getCalledFunction() != Int ||
|
|
|
|
getIntrinsicIDForCall(CI2, TLI) != ID) {
|
2014-08-01 09:20:42 +00:00
|
|
|
BS.cancelScheduling(VL);
|
2014-03-12 20:21:50 +00:00
|
|
|
newTreeEntry(VL, false);
|
2014-05-03 09:59:54 +00:00
|
|
|
DEBUG(dbgs() << "SLP: mismatched calls:" << *CI << "!=" << *VL[i]
|
2014-03-12 20:21:50 +00:00
|
|
|
<< "\n");
|
|
|
|
return;
|
|
|
|
}
|
2014-05-30 04:31:24 +00:00
|
|
|
// ctlz,cttz and powi are special intrinsics whose second argument
|
|
|
|
// should be same in order for them to be vectorized.
|
|
|
|
if (hasVectorInstrinsicScalarOpd(ID, 1)) {
|
|
|
|
Value *A1J = CI2->getArgOperand(1);
|
|
|
|
if (A1I != A1J) {
|
2014-08-01 09:20:42 +00:00
|
|
|
BS.cancelScheduling(VL);
|
2014-05-30 04:31:24 +00:00
|
|
|
newTreeEntry(VL, false);
|
|
|
|
DEBUG(dbgs() << "SLP: mismatched arguments in call:" << *CI
|
|
|
|
<< " argument "<< A1I<<"!=" << A1J
|
|
|
|
<< "\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2014-03-12 20:21:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
newTreeEntry(VL, true);
|
2014-05-03 09:59:54 +00:00
|
|
|
for (unsigned i = 0, e = CI->getNumArgOperands(); i != e; ++i) {
|
2014-03-12 20:21:50 +00:00
|
|
|
ValueList Operands;
|
|
|
|
// Prepare the operand vector.
|
|
|
|
for (unsigned j = 0; j < VL.size(); ++j) {
|
2014-05-03 09:59:54 +00:00
|
|
|
CallInst *CI2 = dyn_cast<CallInst>(VL[j]);
|
|
|
|
Operands.push_back(CI2->getArgOperand(i));
|
2014-03-12 20:21:50 +00:00
|
|
|
}
|
|
|
|
buildTree_rec(Operands, Depth + 1);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2014-06-20 04:32:48 +00:00
|
|
|
case Instruction::ShuffleVector: {
|
|
|
|
// If this is not an alternate sequence of opcode like add-sub
|
|
|
|
// then do not vectorize this instruction.
|
|
|
|
if (!isAltShuffle) {
|
2014-08-01 09:20:42 +00:00
|
|
|
BS.cancelScheduling(VL);
|
2014-06-20 04:32:48 +00:00
|
|
|
newTreeEntry(VL, false);
|
|
|
|
DEBUG(dbgs() << "SLP: ShuffleVector are not vectorized.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
newTreeEntry(VL, true);
|
|
|
|
DEBUG(dbgs() << "SLP: added a ShuffleVector op.\n");
|
2015-01-20 06:11:00 +00:00
|
|
|
|
|
|
|
// Reorder operands if reordering would enable vectorization.
|
|
|
|
if (isa<BinaryOperator>(VL0)) {
|
|
|
|
ValueList Left, Right;
|
|
|
|
reorderAltShuffleOperands(VL, Left, Right);
|
|
|
|
buildTree_rec(Left, Depth + 1);
|
|
|
|
buildTree_rec(Right, Depth + 1);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-06-20 04:32:48 +00:00
|
|
|
for (unsigned i = 0, e = VL0->getNumOperands(); i < e; ++i) {
|
|
|
|
ValueList Operands;
|
|
|
|
// Prepare the operand vector.
|
|
|
|
for (unsigned j = 0; j < VL.size(); ++j)
|
|
|
|
Operands.push_back(cast<Instruction>(VL[j])->getOperand(i));
|
|
|
|
|
|
|
|
buildTree_rec(Operands, Depth + 1);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
default:
|
2014-08-01 09:20:42 +00:00
|
|
|
BS.cancelScheduling(VL);
|
2013-07-07 06:57:07 +00:00
|
|
|
newTreeEntry(VL, false);
|
|
|
|
DEBUG(dbgs() << "SLP: Gathering unknown instruction.\n");
|
|
|
|
return;
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
int BoUpSLP::getEntryCost(TreeEntry *E) {
|
|
|
|
ArrayRef<Value*> VL = E->Scalars;
|
2013-06-22 21:34:10 +00:00
|
|
|
|
|
|
|
Type *ScalarTy = VL[0]->getType();
|
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
|
|
|
|
ScalarTy = SI->getValueOperand()->getType();
|
2013-06-24 02:52:43 +00:00
|
|
|
VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
if (E->NeedToGather) {
|
|
|
|
if (allConstant(VL))
|
|
|
|
return 0;
|
|
|
|
if (isSplat(VL)) {
|
|
|
|
return TTI->getShuffleCost(TargetTransformInfo::SK_Broadcast, VecTy, 0);
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
return getGatherCost(E->Scalars);
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2014-06-20 04:32:48 +00:00
|
|
|
unsigned Opcode = getSameOpcode(VL);
|
|
|
|
assert(Opcode && getSameType(VL) && getSameBlock(VL) && "Invalid VL");
|
2013-06-22 21:34:10 +00:00
|
|
|
Instruction *VL0 = cast<Instruction>(VL[0]);
|
|
|
|
switch (Opcode) {
|
2013-07-07 06:57:07 +00:00
|
|
|
case Instruction::PHI: {
|
2013-06-22 21:34:10 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
case Instruction::ExtractElement: {
|
2014-03-28 17:21:32 +00:00
|
|
|
if (CanReuseExtract(VL)) {
|
|
|
|
int DeadCost = 0;
|
|
|
|
for (unsigned i = 0, e = VL.size(); i < e; ++i) {
|
|
|
|
ExtractElementInst *E = cast<ExtractElementInst>(VL[i]);
|
|
|
|
if (E->hasOneUse())
|
|
|
|
// Take credit for instruction that will become dead.
|
|
|
|
DeadCost +=
|
|
|
|
TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy, i);
|
|
|
|
}
|
|
|
|
return -DeadCost;
|
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
return getGatherCost(VecTy);
|
2013-06-24 02:52:43 +00:00
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
case Instruction::ZExt:
|
|
|
|
case Instruction::SExt:
|
|
|
|
case Instruction::FPToUI:
|
|
|
|
case Instruction::FPToSI:
|
|
|
|
case Instruction::FPExt:
|
|
|
|
case Instruction::PtrToInt:
|
|
|
|
case Instruction::IntToPtr:
|
|
|
|
case Instruction::SIToFP:
|
|
|
|
case Instruction::UIToFP:
|
|
|
|
case Instruction::Trunc:
|
|
|
|
case Instruction::FPTrunc:
|
|
|
|
case Instruction::BitCast: {
|
|
|
|
Type *SrcTy = VL0->getOperand(0)->getType();
|
|
|
|
|
|
|
|
// Calculate the cost of this instruction.
|
|
|
|
int ScalarCost = VL.size() * TTI->getCastInstrCost(VL0->getOpcode(),
|
|
|
|
VL0->getType(), SrcTy);
|
|
|
|
|
|
|
|
VectorType *SrcVecTy = VectorType::get(SrcTy, VL.size());
|
|
|
|
int VecCost = TTI->getCastInstrCost(VL0->getOpcode(), VecTy, SrcVecTy);
|
|
|
|
return VecCost - ScalarCost;
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
case Instruction::FCmp:
|
|
|
|
case Instruction::ICmp:
|
|
|
|
case Instruction::Select:
|
|
|
|
case Instruction::Add:
|
|
|
|
case Instruction::FAdd:
|
|
|
|
case Instruction::Sub:
|
|
|
|
case Instruction::FSub:
|
|
|
|
case Instruction::Mul:
|
|
|
|
case Instruction::FMul:
|
|
|
|
case Instruction::UDiv:
|
|
|
|
case Instruction::SDiv:
|
|
|
|
case Instruction::FDiv:
|
|
|
|
case Instruction::URem:
|
|
|
|
case Instruction::SRem:
|
|
|
|
case Instruction::FRem:
|
|
|
|
case Instruction::Shl:
|
|
|
|
case Instruction::LShr:
|
|
|
|
case Instruction::AShr:
|
|
|
|
case Instruction::And:
|
|
|
|
case Instruction::Or:
|
|
|
|
case Instruction::Xor: {
|
|
|
|
// Calculate the cost of this instruction.
|
|
|
|
int ScalarCost = 0;
|
|
|
|
int VecCost = 0;
|
|
|
|
if (Opcode == Instruction::FCmp || Opcode == Instruction::ICmp ||
|
|
|
|
Opcode == Instruction::Select) {
|
|
|
|
VectorType *MaskTy = VectorType::get(Builder.getInt1Ty(), VL.size());
|
|
|
|
ScalarCost = VecTy->getNumElements() *
|
|
|
|
TTI->getCmpSelInstrCost(Opcode, ScalarTy, Builder.getInt1Ty());
|
|
|
|
VecCost = TTI->getCmpSelInstrCost(Opcode, VecTy, MaskTy);
|
|
|
|
} else {
|
2013-10-29 01:33:53 +00:00
|
|
|
// Certain instructions can be cheaper to vectorize if they have a
|
|
|
|
// constant second vector operand.
|
|
|
|
TargetTransformInfo::OperandValueKind Op1VK =
|
|
|
|
TargetTransformInfo::OK_AnyValue;
|
|
|
|
TargetTransformInfo::OperandValueKind Op2VK =
|
|
|
|
TargetTransformInfo::OK_UniformConstantValue;
|
2014-08-25 04:56:54 +00:00
|
|
|
TargetTransformInfo::OperandValueProperties Op1VP =
|
|
|
|
TargetTransformInfo::OP_None;
|
|
|
|
TargetTransformInfo::OperandValueProperties Op2VP =
|
|
|
|
TargetTransformInfo::OP_None;
|
2013-10-29 01:33:53 +00:00
|
|
|
|
2014-02-12 23:43:47 +00:00
|
|
|
// If all operands are exactly the same ConstantInt then set the
|
|
|
|
// operand kind to OK_UniformConstantValue.
|
|
|
|
// If instead not all operands are constants, then set the operand kind
|
|
|
|
// to OK_AnyValue. If all operands are constants but not the same,
|
|
|
|
// then set the operand kind to OK_NonUniformConstantValue.
|
2014-04-25 05:29:35 +00:00
|
|
|
ConstantInt *CInt = nullptr;
|
2014-02-12 23:43:47 +00:00
|
|
|
for (unsigned i = 0; i < VL.size(); ++i) {
|
|
|
|
const Instruction *I = cast<Instruction>(VL[i]);
|
|
|
|
if (!isa<ConstantInt>(I->getOperand(1))) {
|
2013-10-29 01:33:53 +00:00
|
|
|
Op2VK = TargetTransformInfo::OK_AnyValue;
|
|
|
|
break;
|
|
|
|
}
|
2014-02-12 23:43:47 +00:00
|
|
|
if (i == 0) {
|
|
|
|
CInt = cast<ConstantInt>(I->getOperand(1));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (Op2VK == TargetTransformInfo::OK_UniformConstantValue &&
|
|
|
|
CInt != cast<ConstantInt>(I->getOperand(1)))
|
|
|
|
Op2VK = TargetTransformInfo::OK_NonUniformConstantValue;
|
|
|
|
}
|
2014-08-25 04:56:54 +00:00
|
|
|
// FIXME: Currently cost of model modification for division by
|
|
|
|
// power of 2 is handled only for X86. Add support for other targets.
|
|
|
|
if (Op2VK == TargetTransformInfo::OK_UniformConstantValue && CInt &&
|
|
|
|
CInt->getValue().isPowerOf2())
|
|
|
|
Op2VP = TargetTransformInfo::OP_PowerOf2;
|
2013-10-29 01:33:53 +00:00
|
|
|
|
2014-08-25 04:56:54 +00:00
|
|
|
ScalarCost = VecTy->getNumElements() *
|
|
|
|
TTI->getArithmeticInstrCost(Opcode, ScalarTy, Op1VK, Op2VK,
|
|
|
|
Op1VP, Op2VP);
|
|
|
|
VecCost = TTI->getArithmeticInstrCost(Opcode, VecTy, Op1VK, Op2VK,
|
|
|
|
Op1VP, Op2VP);
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
|
|
|
return VecCost - ScalarCost;
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2014-08-27 15:01:18 +00:00
|
|
|
case Instruction::GetElementPtr: {
|
|
|
|
TargetTransformInfo::OperandValueKind Op1VK =
|
|
|
|
TargetTransformInfo::OK_AnyValue;
|
|
|
|
TargetTransformInfo::OperandValueKind Op2VK =
|
|
|
|
TargetTransformInfo::OK_UniformConstantValue;
|
|
|
|
|
|
|
|
int ScalarCost =
|
|
|
|
VecTy->getNumElements() *
|
|
|
|
TTI->getArithmeticInstrCost(Instruction::Add, ScalarTy, Op1VK, Op2VK);
|
|
|
|
int VecCost =
|
|
|
|
TTI->getArithmeticInstrCost(Instruction::Add, VecTy, Op1VK, Op2VK);
|
|
|
|
|
|
|
|
return VecCost - ScalarCost;
|
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
case Instruction::Load: {
|
|
|
|
// Cost of wide load - cost of scalar loads.
|
|
|
|
int ScalarLdCost = VecTy->getNumElements() *
|
|
|
|
TTI->getMemoryOpCost(Instruction::Load, ScalarTy, 1, 0);
|
2013-10-29 01:33:50 +00:00
|
|
|
int VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, 1, 0);
|
2013-07-07 06:57:07 +00:00
|
|
|
return VecLdCost - ScalarLdCost;
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
case Instruction::Store: {
|
|
|
|
// We know that we can merge the stores. Calculate the cost.
|
|
|
|
int ScalarStCost = VecTy->getNumElements() *
|
|
|
|
TTI->getMemoryOpCost(Instruction::Store, ScalarTy, 1, 0);
|
2013-10-29 01:33:50 +00:00
|
|
|
int VecStCost = TTI->getMemoryOpCost(Instruction::Store, VecTy, 1, 0);
|
2013-07-07 06:57:07 +00:00
|
|
|
return VecStCost - ScalarStCost;
|
2013-06-24 02:52:43 +00:00
|
|
|
}
|
2014-03-12 20:21:50 +00:00
|
|
|
case Instruction::Call: {
|
|
|
|
CallInst *CI = cast<CallInst>(VL0);
|
2014-05-03 09:59:54 +00:00
|
|
|
Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
|
2014-03-12 20:21:50 +00:00
|
|
|
|
|
|
|
// Calculate the cost of the scalar and vector calls.
|
|
|
|
SmallVector<Type*, 4> ScalarTys, VecTys;
|
2014-05-03 09:59:54 +00:00
|
|
|
for (unsigned op = 0, opc = CI->getNumArgOperands(); op!= opc; ++op) {
|
2014-03-12 20:21:50 +00:00
|
|
|
ScalarTys.push_back(CI->getArgOperand(op)->getType());
|
|
|
|
VecTys.push_back(VectorType::get(CI->getArgOperand(op)->getType(),
|
|
|
|
VecTy->getNumElements()));
|
|
|
|
}
|
|
|
|
|
|
|
|
int ScalarCallCost = VecTy->getNumElements() *
|
|
|
|
TTI->getIntrinsicInstrCost(ID, ScalarTy, ScalarTys);
|
|
|
|
|
|
|
|
int VecCallCost = TTI->getIntrinsicInstrCost(ID, VecTy, VecTys);
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "SLP: Call cost "<< VecCallCost - ScalarCallCost
|
|
|
|
<< " (" << VecCallCost << "-" << ScalarCallCost << ")"
|
2014-05-03 09:59:54 +00:00
|
|
|
<< " for " << *CI << "\n");
|
2014-03-12 20:21:50 +00:00
|
|
|
|
|
|
|
return VecCallCost - ScalarCallCost;
|
|
|
|
}
|
2014-06-20 04:32:48 +00:00
|
|
|
case Instruction::ShuffleVector: {
|
|
|
|
TargetTransformInfo::OperandValueKind Op1VK =
|
|
|
|
TargetTransformInfo::OK_AnyValue;
|
|
|
|
TargetTransformInfo::OperandValueKind Op2VK =
|
|
|
|
TargetTransformInfo::OK_AnyValue;
|
|
|
|
int ScalarCost = 0;
|
|
|
|
int VecCost = 0;
|
|
|
|
for (unsigned i = 0; i < VL.size(); ++i) {
|
|
|
|
Instruction *I = cast<Instruction>(VL[i]);
|
|
|
|
if (!I)
|
|
|
|
break;
|
|
|
|
ScalarCost +=
|
|
|
|
TTI->getArithmeticInstrCost(I->getOpcode(), ScalarTy, Op1VK, Op2VK);
|
|
|
|
}
|
|
|
|
// VecCost is equal to sum of the cost of creating 2 vectors
|
|
|
|
// and the cost of creating shuffle.
|
|
|
|
Instruction *I0 = cast<Instruction>(VL[0]);
|
|
|
|
VecCost =
|
|
|
|
TTI->getArithmeticInstrCost(I0->getOpcode(), VecTy, Op1VK, Op2VK);
|
|
|
|
Instruction *I1 = cast<Instruction>(VL[1]);
|
|
|
|
VecCost +=
|
|
|
|
TTI->getArithmeticInstrCost(I1->getOpcode(), VecTy, Op1VK, Op2VK);
|
|
|
|
VecCost +=
|
|
|
|
TTI->getShuffleCost(TargetTransformInfo::SK_Alternate, VecTy, 0);
|
|
|
|
return VecCost - ScalarCost;
|
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
default:
|
|
|
|
llvm_unreachable("Unknown instruction");
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
2013-06-24 02:52:43 +00:00
|
|
|
|
2013-10-02 20:20:39 +00:00
|
|
|
bool BoUpSLP::isFullyVectorizableTinyTree() {
|
|
|
|
DEBUG(dbgs() << "SLP: Check whether the tree with height " <<
|
|
|
|
VectorizableTree.size() << " is fully vectorizable .\n");
|
|
|
|
|
|
|
|
// We only handle trees of height 2.
|
|
|
|
if (VectorizableTree.size() != 2)
|
|
|
|
return false;
|
|
|
|
|
2014-02-24 19:52:29 +00:00
|
|
|
// Handle splat stores.
|
|
|
|
if (!VectorizableTree[0].NeedToGather && isSplat(VectorizableTree[1].Scalars))
|
|
|
|
return true;
|
|
|
|
|
2013-10-02 20:20:39 +00:00
|
|
|
// Gathering cost would be too much for tiny trees.
|
2014-02-24 19:52:29 +00:00
|
|
|
if (VectorizableTree[0].NeedToGather || VectorizableTree[1].NeedToGather)
|
|
|
|
return false;
|
2013-10-02 20:20:39 +00:00
|
|
|
|
2014-02-24 19:52:29 +00:00
|
|
|
return true;
|
2013-10-02 20:20:39 +00:00
|
|
|
}
|
|
|
|
|
2014-08-05 12:30:34 +00:00
|
|
|
int BoUpSLP::getSpillCost() {
|
|
|
|
// Walk from the bottom of the tree to the top, tracking which values are
|
|
|
|
// live. When we see a call instruction that is not part of our tree,
|
|
|
|
// query TTI to see if there is a cost to keeping values live over it
|
|
|
|
// (for example, if spills and fills are required).
|
|
|
|
unsigned BundleWidth = VectorizableTree.front().Scalars.size();
|
|
|
|
int Cost = 0;
|
|
|
|
|
|
|
|
SmallPtrSet<Instruction*, 4> LiveValues;
|
|
|
|
Instruction *PrevInst = nullptr;
|
|
|
|
|
|
|
|
for (unsigned N = 0; N < VectorizableTree.size(); ++N) {
|
|
|
|
Instruction *Inst = dyn_cast<Instruction>(VectorizableTree[N].Scalars[0]);
|
|
|
|
if (!Inst)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!PrevInst) {
|
|
|
|
PrevInst = Inst;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUG(
|
|
|
|
dbgs() << "SLP: #LV: " << LiveValues.size();
|
|
|
|
for (auto *X : LiveValues)
|
|
|
|
dbgs() << " " << X->getName();
|
|
|
|
dbgs() << ", Looking at ";
|
|
|
|
Inst->dump();
|
|
|
|
);
|
|
|
|
|
|
|
|
// Update LiveValues.
|
|
|
|
LiveValues.erase(PrevInst);
|
|
|
|
for (auto &J : PrevInst->operands()) {
|
|
|
|
if (isa<Instruction>(&*J) && ScalarToTreeEntry.count(&*J))
|
|
|
|
LiveValues.insert(cast<Instruction>(&*J));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now find the sequence of instructions between PrevInst and Inst.
|
|
|
|
BasicBlock::reverse_iterator InstIt(Inst), PrevInstIt(PrevInst);
|
|
|
|
--PrevInstIt;
|
|
|
|
while (InstIt != PrevInstIt) {
|
|
|
|
if (PrevInstIt == PrevInst->getParent()->rend()) {
|
|
|
|
PrevInstIt = Inst->getParent()->rbegin();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (isa<CallInst>(&*PrevInstIt) && &*PrevInstIt != PrevInst) {
|
|
|
|
SmallVector<Type*, 4> V;
|
|
|
|
for (auto *II : LiveValues)
|
|
|
|
V.push_back(VectorType::get(II->getType(), BundleWidth));
|
|
|
|
Cost += TTI->getCostOfKeepingLiveOverCall(V);
|
|
|
|
}
|
|
|
|
|
|
|
|
++PrevInstIt;
|
|
|
|
}
|
|
|
|
|
|
|
|
PrevInst = Inst;
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "SLP: SpillCost=" << Cost << "\n");
|
|
|
|
return Cost;
|
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
int BoUpSLP::getTreeCost() {
|
|
|
|
int Cost = 0;
|
|
|
|
DEBUG(dbgs() << "SLP: Calculating cost for tree of size " <<
|
|
|
|
VectorizableTree.size() << ".\n");
|
|
|
|
|
2013-10-02 20:20:39 +00:00
|
|
|
// We only vectorize tiny trees if it is fully vectorizable.
|
|
|
|
if (VectorizableTree.size() < 3 && !isFullyVectorizableTinyTree()) {
|
2015-01-15 11:41:30 +00:00
|
|
|
if (VectorizableTree.empty()) {
|
2013-07-26 23:07:55 +00:00
|
|
|
assert(!ExternalUses.size() && "We should not have any external users");
|
|
|
|
}
|
2013-09-24 17:26:43 +00:00
|
|
|
return INT_MAX;
|
2013-07-11 04:54:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned BundleWidth = VectorizableTree[0].Scalars.size();
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
for (unsigned i = 0, e = VectorizableTree.size(); i != e; ++i) {
|
|
|
|
int C = getEntryCost(&VectorizableTree[i]);
|
|
|
|
DEBUG(dbgs() << "SLP: Adding cost " << C << " for bundle that starts with "
|
|
|
|
<< *VectorizableTree[i].Scalars[0] << " .\n");
|
|
|
|
Cost += C;
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2013-07-11 04:54:05 +00:00
|
|
|
|
2013-11-22 15:47:17 +00:00
|
|
|
SmallSet<Value *, 16> ExtractCostCalculated;
|
2013-07-11 04:54:05 +00:00
|
|
|
int ExtractCost = 0;
|
|
|
|
for (UserList::iterator I = ExternalUses.begin(), E = ExternalUses.end();
|
|
|
|
I != E; ++I) {
|
2013-11-22 15:47:17 +00:00
|
|
|
// We only add extract cost once for the same scalar.
|
2014-11-19 07:49:26 +00:00
|
|
|
if (!ExtractCostCalculated.insert(I->Scalar).second)
|
2013-11-22 15:47:17 +00:00
|
|
|
continue;
|
2013-07-11 04:54:05 +00:00
|
|
|
|
2014-10-15 17:35:01 +00:00
|
|
|
// Uses by ephemeral values are free (because the ephemeral value will be
|
|
|
|
// removed prior to code generation, and so the extraction will be
|
|
|
|
// removed as well).
|
|
|
|
if (EphValues.count(I->User))
|
|
|
|
continue;
|
|
|
|
|
2013-07-11 04:54:05 +00:00
|
|
|
VectorType *VecTy = VectorType::get(I->Scalar->getType(), BundleWidth);
|
|
|
|
ExtractCost += TTI->getVectorInstrCost(Instruction::ExtractElement, VecTy,
|
|
|
|
I->Lane);
|
|
|
|
}
|
|
|
|
|
2014-08-05 12:30:34 +00:00
|
|
|
Cost += getSpillCost();
|
|
|
|
|
2013-07-11 04:54:05 +00:00
|
|
|
DEBUG(dbgs() << "SLP: Total Cost " << Cost + ExtractCost<< ".\n");
|
|
|
|
return Cost + ExtractCost;
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
int BoUpSLP::getGatherCost(Type *Ty) {
|
|
|
|
int Cost = 0;
|
|
|
|
for (unsigned i = 0, e = cast<VectorType>(Ty)->getNumElements(); i < e; ++i)
|
|
|
|
Cost += TTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
|
|
|
|
return Cost;
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
int BoUpSLP::getGatherCost(ArrayRef<Value *> VL) {
|
|
|
|
// Find the type of the operands in VL.
|
|
|
|
Type *ScalarTy = VL[0]->getType();
|
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
|
|
|
|
ScalarTy = SI->getValueOperand()->getType();
|
|
|
|
VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
|
|
|
|
// Find the cost of inserting/extracting values from the vector.
|
|
|
|
return getGatherCost(VecTy);
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
Value *BoUpSLP::getPointerOperand(Value *I) {
|
|
|
|
if (LoadInst *LI = dyn_cast<LoadInst>(I))
|
|
|
|
return LI->getPointerOperand();
|
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(I))
|
|
|
|
return SI->getPointerOperand();
|
2014-04-25 05:29:35 +00:00
|
|
|
return nullptr;
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
2013-06-28 22:07:09 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
unsigned BoUpSLP::getAddressSpaceOperand(Value *I) {
|
|
|
|
if (LoadInst *L = dyn_cast<LoadInst>(I))
|
|
|
|
return L->getPointerAddressSpace();
|
|
|
|
if (StoreInst *S = dyn_cast<StoreInst>(I))
|
|
|
|
return S->getPointerAddressSpace();
|
|
|
|
return -1;
|
|
|
|
}
|
2013-06-28 22:07:09 +00:00
|
|
|
|
2015-03-10 02:37:25 +00:00
|
|
|
bool BoUpSLP::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL) {
|
2013-07-07 06:57:07 +00:00
|
|
|
Value *PtrA = getPointerOperand(A);
|
|
|
|
Value *PtrB = getPointerOperand(B);
|
|
|
|
unsigned ASA = getAddressSpaceOperand(A);
|
|
|
|
unsigned ASB = getAddressSpaceOperand(B);
|
2013-06-28 22:07:09 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
// Check that the address spaces match and that the pointers are valid.
|
|
|
|
if (!PtrA || !PtrB || (ASA != ASB))
|
|
|
|
return false;
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-17 22:41:16 +00:00
|
|
|
// Make sure that A and B are different pointers of the same type.
|
2013-07-17 19:52:25 +00:00
|
|
|
if (PtrA == PtrB || PtrA->getType() != PtrB->getType())
|
2013-07-07 06:57:07 +00:00
|
|
|
return false;
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2015-03-10 02:37:25 +00:00
|
|
|
unsigned PtrBitWidth = DL.getPointerSizeInBits(ASA);
|
2013-07-18 04:33:20 +00:00
|
|
|
Type *Ty = cast<PointerType>(PtrA->getType())->getElementType();
|
2015-03-10 02:37:25 +00:00
|
|
|
APInt Size(PtrBitWidth, DL.getTypeStoreSize(Ty));
|
2013-07-18 04:33:20 +00:00
|
|
|
|
Teach the SLP vectorizer the correct way to check for consecutive access
using GEPs. Previously, it used a number of different heuristics for
analyzing the GEPs. Several of these were conservatively correct, but
failed to fall back to SCEV even when SCEV might have given a reasonable
answer. One was simply incorrect in how it was formulated.
There was good code already to recursively evaluate the constant offsets
in GEPs, look through pointer casts, etc. I gathered this into a form
code like the SLP code can use in a previous commit, which allows all of
this code to become quite simple.
There is some performance (compile time) concern here at first glance as
we're directly attempting to walk both pointers constant GEP chains.
However, a couple of thoughts:
1) The very common cases where there is a dynamic pointer, and a second
pointer at a constant offset (usually a stride) from it, this code
will actually not do any unnecessary work.
2) InstCombine and other passes work very hard to collapse constant
GEPs, so it will be rare that we iterate here for a long time.
That said, if there remain performance problems here, there are some
obvious things that can improve the situation immensely. Doing
a vectorizer-pass-wide memoizer for each individual layer of pointer
values, their base values, and the constant offset is likely to be able
to completely remove redundant work and strictly limit the scaling of
the work to scrape these GEPs. Since this optimization was not done on
the prior version (which would still benefit from it), I've not done it
here. But if folks have benchmarks that slow down it should be straight
forward for them to add.
I've added a test case, but I'm not really confident of the amount of
testing done for different access patterns, strides, and pointer
manipulation.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@189007 91177308-0d34-0410-b5e6-96231b3b80d8
2013-08-22 12:45:17 +00:00
|
|
|
APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0);
|
2015-03-10 02:37:25 +00:00
|
|
|
PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
|
|
|
|
PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
|
2013-07-18 04:33:20 +00:00
|
|
|
|
Teach the SLP vectorizer the correct way to check for consecutive access
using GEPs. Previously, it used a number of different heuristics for
analyzing the GEPs. Several of these were conservatively correct, but
failed to fall back to SCEV even when SCEV might have given a reasonable
answer. One was simply incorrect in how it was formulated.
There was good code already to recursively evaluate the constant offsets
in GEPs, look through pointer casts, etc. I gathered this into a form
code like the SLP code can use in a previous commit, which allows all of
this code to become quite simple.
There is some performance (compile time) concern here at first glance as
we're directly attempting to walk both pointers constant GEP chains.
However, a couple of thoughts:
1) The very common cases where there is a dynamic pointer, and a second
pointer at a constant offset (usually a stride) from it, this code
will actually not do any unnecessary work.
2) InstCombine and other passes work very hard to collapse constant
GEPs, so it will be rare that we iterate here for a long time.
That said, if there remain performance problems here, there are some
obvious things that can improve the situation immensely. Doing
a vectorizer-pass-wide memoizer for each individual layer of pointer
values, their base values, and the constant offset is likely to be able
to completely remove redundant work and strictly limit the scaling of
the work to scrape these GEPs. Since this optimization was not done on
the prior version (which would still benefit from it), I've not done it
here. But if folks have benchmarks that slow down it should be straight
forward for them to add.
I've added a test case, but I'm not really confident of the amount of
testing done for different access patterns, strides, and pointer
manipulation.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@189007 91177308-0d34-0410-b5e6-96231b3b80d8
2013-08-22 12:45:17 +00:00
|
|
|
APInt OffsetDelta = OffsetB - OffsetA;
|
2013-07-18 04:33:20 +00:00
|
|
|
|
Teach the SLP vectorizer the correct way to check for consecutive access
using GEPs. Previously, it used a number of different heuristics for
analyzing the GEPs. Several of these were conservatively correct, but
failed to fall back to SCEV even when SCEV might have given a reasonable
answer. One was simply incorrect in how it was formulated.
There was good code already to recursively evaluate the constant offsets
in GEPs, look through pointer casts, etc. I gathered this into a form
code like the SLP code can use in a previous commit, which allows all of
this code to become quite simple.
There is some performance (compile time) concern here at first glance as
we're directly attempting to walk both pointers constant GEP chains.
However, a couple of thoughts:
1) The very common cases where there is a dynamic pointer, and a second
pointer at a constant offset (usually a stride) from it, this code
will actually not do any unnecessary work.
2) InstCombine and other passes work very hard to collapse constant
GEPs, so it will be rare that we iterate here for a long time.
That said, if there remain performance problems here, there are some
obvious things that can improve the situation immensely. Doing
a vectorizer-pass-wide memoizer for each individual layer of pointer
values, their base values, and the constant offset is likely to be able
to completely remove redundant work and strictly limit the scaling of
the work to scrape these GEPs. Since this optimization was not done on
the prior version (which would still benefit from it), I've not done it
here. But if folks have benchmarks that slow down it should be straight
forward for them to add.
I've added a test case, but I'm not really confident of the amount of
testing done for different access patterns, strides, and pointer
manipulation.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@189007 91177308-0d34-0410-b5e6-96231b3b80d8
2013-08-22 12:45:17 +00:00
|
|
|
// Check if they are based on the same pointer. That makes the offsets
|
|
|
|
// sufficient.
|
|
|
|
if (PtrA == PtrB)
|
|
|
|
return OffsetDelta == Size;
|
2013-07-18 18:20:45 +00:00
|
|
|
|
Teach the SLP vectorizer the correct way to check for consecutive access
using GEPs. Previously, it used a number of different heuristics for
analyzing the GEPs. Several of these were conservatively correct, but
failed to fall back to SCEV even when SCEV might have given a reasonable
answer. One was simply incorrect in how it was formulated.
There was good code already to recursively evaluate the constant offsets
in GEPs, look through pointer casts, etc. I gathered this into a form
code like the SLP code can use in a previous commit, which allows all of
this code to become quite simple.
There is some performance (compile time) concern here at first glance as
we're directly attempting to walk both pointers constant GEP chains.
However, a couple of thoughts:
1) The very common cases where there is a dynamic pointer, and a second
pointer at a constant offset (usually a stride) from it, this code
will actually not do any unnecessary work.
2) InstCombine and other passes work very hard to collapse constant
GEPs, so it will be rare that we iterate here for a long time.
That said, if there remain performance problems here, there are some
obvious things that can improve the situation immensely. Doing
a vectorizer-pass-wide memoizer for each individual layer of pointer
values, their base values, and the constant offset is likely to be able
to completely remove redundant work and strictly limit the scaling of
the work to scrape these GEPs. Since this optimization was not done on
the prior version (which would still benefit from it), I've not done it
here. But if folks have benchmarks that slow down it should be straight
forward for them to add.
I've added a test case, but I'm not really confident of the amount of
testing done for different access patterns, strides, and pointer
manipulation.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@189007 91177308-0d34-0410-b5e6-96231b3b80d8
2013-08-22 12:45:17 +00:00
|
|
|
// Compute the necessary base pointer delta to have the necessary final delta
|
|
|
|
// equal to the size.
|
|
|
|
APInt BaseDelta = Size - OffsetDelta;
|
2013-07-18 18:34:21 +00:00
|
|
|
|
Teach the SLP vectorizer the correct way to check for consecutive access
using GEPs. Previously, it used a number of different heuristics for
analyzing the GEPs. Several of these were conservatively correct, but
failed to fall back to SCEV even when SCEV might have given a reasonable
answer. One was simply incorrect in how it was formulated.
There was good code already to recursively evaluate the constant offsets
in GEPs, look through pointer casts, etc. I gathered this into a form
code like the SLP code can use in a previous commit, which allows all of
this code to become quite simple.
There is some performance (compile time) concern here at first glance as
we're directly attempting to walk both pointers constant GEP chains.
However, a couple of thoughts:
1) The very common cases where there is a dynamic pointer, and a second
pointer at a constant offset (usually a stride) from it, this code
will actually not do any unnecessary work.
2) InstCombine and other passes work very hard to collapse constant
GEPs, so it will be rare that we iterate here for a long time.
That said, if there remain performance problems here, there are some
obvious things that can improve the situation immensely. Doing
a vectorizer-pass-wide memoizer for each individual layer of pointer
values, their base values, and the constant offset is likely to be able
to completely remove redundant work and strictly limit the scaling of
the work to scrape these GEPs. Since this optimization was not done on
the prior version (which would still benefit from it), I've not done it
here. But if folks have benchmarks that slow down it should be straight
forward for them to add.
I've added a test case, but I'm not really confident of the amount of
testing done for different access patterns, strides, and pointer
manipulation.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@189007 91177308-0d34-0410-b5e6-96231b3b80d8
2013-08-22 12:45:17 +00:00
|
|
|
// Otherwise compute the distance with SCEV between the base pointers.
|
2013-07-07 06:57:07 +00:00
|
|
|
const SCEV *PtrSCEVA = SE->getSCEV(PtrA);
|
|
|
|
const SCEV *PtrSCEVB = SE->getSCEV(PtrB);
|
Teach the SLP vectorizer the correct way to check for consecutive access
using GEPs. Previously, it used a number of different heuristics for
analyzing the GEPs. Several of these were conservatively correct, but
failed to fall back to SCEV even when SCEV might have given a reasonable
answer. One was simply incorrect in how it was formulated.
There was good code already to recursively evaluate the constant offsets
in GEPs, look through pointer casts, etc. I gathered this into a form
code like the SLP code can use in a previous commit, which allows all of
this code to become quite simple.
There is some performance (compile time) concern here at first glance as
we're directly attempting to walk both pointers constant GEP chains.
However, a couple of thoughts:
1) The very common cases where there is a dynamic pointer, and a second
pointer at a constant offset (usually a stride) from it, this code
will actually not do any unnecessary work.
2) InstCombine and other passes work very hard to collapse constant
GEPs, so it will be rare that we iterate here for a long time.
That said, if there remain performance problems here, there are some
obvious things that can improve the situation immensely. Doing
a vectorizer-pass-wide memoizer for each individual layer of pointer
values, their base values, and the constant offset is likely to be able
to completely remove redundant work and strictly limit the scaling of
the work to scrape these GEPs. Since this optimization was not done on
the prior version (which would still benefit from it), I've not done it
here. But if folks have benchmarks that slow down it should be straight
forward for them to add.
I've added a test case, but I'm not really confident of the amount of
testing done for different access patterns, strides, and pointer
manipulation.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@189007 91177308-0d34-0410-b5e6-96231b3b80d8
2013-08-22 12:45:17 +00:00
|
|
|
const SCEV *C = SE->getConstant(BaseDelta);
|
2013-07-17 00:48:31 +00:00
|
|
|
const SCEV *X = SE->getAddExpr(PtrSCEVA, C);
|
|
|
|
return X == PtrSCEVB;
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2015-01-20 06:11:00 +00:00
|
|
|
// Reorder commutative operations in alternate shuffle if the resulting vectors
|
|
|
|
// are consecutive loads. This would allow us to vectorize the tree.
|
|
|
|
// If we have something like-
|
|
|
|
// load a[0] - load b[0]
|
|
|
|
// load b[1] + load a[1]
|
|
|
|
// load a[2] - load b[2]
|
|
|
|
// load a[3] + load b[3]
|
|
|
|
// Reordering the second load b[1] load a[1] would allow us to vectorize this
|
|
|
|
// code.
|
|
|
|
void BoUpSLP::reorderAltShuffleOperands(ArrayRef<Value *> VL,
|
|
|
|
SmallVectorImpl<Value *> &Left,
|
|
|
|
SmallVectorImpl<Value *> &Right) {
|
2015-03-10 02:37:25 +00:00
|
|
|
const DataLayout &DL = F->getParent()->getDataLayout();
|
2015-01-20 06:11:00 +00:00
|
|
|
|
|
|
|
// Push left and right operands of binary operation into Left and Right
|
|
|
|
for (unsigned i = 0, e = VL.size(); i < e; ++i) {
|
|
|
|
Left.push_back(cast<Instruction>(VL[i])->getOperand(0));
|
|
|
|
Right.push_back(cast<Instruction>(VL[i])->getOperand(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reorder if we have a commutative operation and consecutive access
|
|
|
|
// are on either side of the alternate instructions.
|
|
|
|
for (unsigned j = 0; j < VL.size() - 1; ++j) {
|
|
|
|
if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) {
|
|
|
|
if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) {
|
|
|
|
Instruction *VL1 = cast<Instruction>(VL[j]);
|
|
|
|
Instruction *VL2 = cast<Instruction>(VL[j + 1]);
|
2015-03-10 02:37:25 +00:00
|
|
|
if (isConsecutiveAccess(L, L1, DL) && VL1->isCommutative()) {
|
2015-01-20 06:11:00 +00:00
|
|
|
std::swap(Left[j], Right[j]);
|
|
|
|
continue;
|
2015-03-10 02:37:25 +00:00
|
|
|
} else if (isConsecutiveAccess(L, L1, DL) && VL2->isCommutative()) {
|
2015-01-20 06:11:00 +00:00
|
|
|
std::swap(Left[j + 1], Right[j + 1]);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// else unchanged
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) {
|
|
|
|
if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) {
|
|
|
|
Instruction *VL1 = cast<Instruction>(VL[j]);
|
|
|
|
Instruction *VL2 = cast<Instruction>(VL[j + 1]);
|
2015-03-10 02:37:25 +00:00
|
|
|
if (isConsecutiveAccess(L, L1, DL) && VL1->isCommutative()) {
|
2015-01-20 06:11:00 +00:00
|
|
|
std::swap(Left[j], Right[j]);
|
|
|
|
continue;
|
2015-03-10 02:37:25 +00:00
|
|
|
} else if (isConsecutiveAccess(L, L1, DL) && VL2->isCommutative()) {
|
2015-01-20 06:11:00 +00:00
|
|
|
std::swap(Left[j + 1], Right[j + 1]);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// else unchanged
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BoUpSLP::reorderInputsAccordingToOpcode(ArrayRef<Value *> VL,
|
|
|
|
SmallVectorImpl<Value *> &Left,
|
|
|
|
SmallVectorImpl<Value *> &Right) {
|
|
|
|
|
|
|
|
SmallVector<Value *, 16> OrigLeft, OrigRight;
|
|
|
|
|
|
|
|
bool AllSameOpcodeLeft = true;
|
|
|
|
bool AllSameOpcodeRight = true;
|
|
|
|
for (unsigned i = 0, e = VL.size(); i != e; ++i) {
|
|
|
|
Instruction *I = cast<Instruction>(VL[i]);
|
|
|
|
Value *VLeft = I->getOperand(0);
|
|
|
|
Value *VRight = I->getOperand(1);
|
|
|
|
|
|
|
|
OrigLeft.push_back(VLeft);
|
|
|
|
OrigRight.push_back(VRight);
|
|
|
|
|
|
|
|
Instruction *ILeft = dyn_cast<Instruction>(VLeft);
|
|
|
|
Instruction *IRight = dyn_cast<Instruction>(VRight);
|
|
|
|
|
|
|
|
// Check whether all operands on one side have the same opcode. In this case
|
|
|
|
// we want to preserve the original order and not make things worse by
|
|
|
|
// reordering.
|
|
|
|
if (i && AllSameOpcodeLeft && ILeft) {
|
|
|
|
if (Instruction *PLeft = dyn_cast<Instruction>(OrigLeft[i - 1])) {
|
|
|
|
if (PLeft->getOpcode() != ILeft->getOpcode())
|
|
|
|
AllSameOpcodeLeft = false;
|
|
|
|
} else
|
|
|
|
AllSameOpcodeLeft = false;
|
|
|
|
}
|
|
|
|
if (i && AllSameOpcodeRight && IRight) {
|
|
|
|
if (Instruction *PRight = dyn_cast<Instruction>(OrigRight[i - 1])) {
|
|
|
|
if (PRight->getOpcode() != IRight->getOpcode())
|
|
|
|
AllSameOpcodeRight = false;
|
|
|
|
} else
|
|
|
|
AllSameOpcodeRight = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sort two opcodes. In the code below we try to preserve the ability to use
|
|
|
|
// broadcast of values instead of individual inserts.
|
|
|
|
// vl1 = load
|
|
|
|
// vl2 = phi
|
|
|
|
// vr1 = load
|
|
|
|
// vr2 = vr2
|
|
|
|
// = vl1 x vr1
|
|
|
|
// = vl2 x vr2
|
|
|
|
// If we just sorted according to opcode we would leave the first line in
|
|
|
|
// tact but we would swap vl2 with vr2 because opcode(phi) > opcode(load).
|
|
|
|
// = vl1 x vr1
|
|
|
|
// = vr2 x vl2
|
|
|
|
// Because vr2 and vr1 are from the same load we loose the opportunity of a
|
|
|
|
// broadcast for the packed right side in the backend: we have [vr1, vl2]
|
|
|
|
// instead of [vr1, vr2=vr1].
|
|
|
|
if (ILeft && IRight) {
|
|
|
|
if (!i && ILeft->getOpcode() > IRight->getOpcode()) {
|
|
|
|
Left.push_back(IRight);
|
|
|
|
Right.push_back(ILeft);
|
|
|
|
} else if (i && ILeft->getOpcode() > IRight->getOpcode() &&
|
|
|
|
Right[i - 1] != IRight) {
|
|
|
|
// Try not to destroy a broad cast for no apparent benefit.
|
|
|
|
Left.push_back(IRight);
|
|
|
|
Right.push_back(ILeft);
|
|
|
|
} else if (i && ILeft->getOpcode() == IRight->getOpcode() &&
|
|
|
|
Right[i - 1] == ILeft) {
|
|
|
|
// Try preserve broadcasts.
|
|
|
|
Left.push_back(IRight);
|
|
|
|
Right.push_back(ILeft);
|
|
|
|
} else if (i && ILeft->getOpcode() == IRight->getOpcode() &&
|
|
|
|
Left[i - 1] == IRight) {
|
|
|
|
// Try preserve broadcasts.
|
|
|
|
Left.push_back(IRight);
|
|
|
|
Right.push_back(ILeft);
|
|
|
|
} else {
|
|
|
|
Left.push_back(ILeft);
|
|
|
|
Right.push_back(IRight);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// One opcode, put the instruction on the right.
|
|
|
|
if (ILeft) {
|
|
|
|
Left.push_back(VRight);
|
|
|
|
Right.push_back(ILeft);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
Left.push_back(VLeft);
|
|
|
|
Right.push_back(VRight);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool LeftBroadcast = isSplat(Left);
|
|
|
|
bool RightBroadcast = isSplat(Right);
|
|
|
|
|
|
|
|
// If operands end up being broadcast return this operand order.
|
|
|
|
if (LeftBroadcast || RightBroadcast)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Don't reorder if the operands where good to begin.
|
|
|
|
if (AllSameOpcodeRight || AllSameOpcodeLeft) {
|
|
|
|
Left = OrigLeft;
|
|
|
|
Right = OrigRight;
|
|
|
|
}
|
|
|
|
|
2015-03-10 02:37:25 +00:00
|
|
|
const DataLayout &DL = F->getParent()->getDataLayout();
|
|
|
|
|
2015-01-20 06:11:00 +00:00
|
|
|
// Finally check if we can get longer vectorizable chain by reordering
|
|
|
|
// without breaking the good operand order detected above.
|
|
|
|
// E.g. If we have something like-
|
|
|
|
// load a[0] load b[0]
|
|
|
|
// load b[1] load a[1]
|
|
|
|
// load a[2] load b[2]
|
|
|
|
// load a[3] load b[3]
|
|
|
|
// Reordering the second load b[1] load a[1] would allow us to vectorize
|
|
|
|
// this code and we still retain AllSameOpcode property.
|
|
|
|
// FIXME: This load reordering might break AllSameOpcode in some rare cases
|
|
|
|
// such as-
|
|
|
|
// add a[0],c[0] load b[0]
|
|
|
|
// add a[1],c[2] load b[1]
|
|
|
|
// b[2] load b[2]
|
|
|
|
// add a[3],c[3] load b[3]
|
|
|
|
for (unsigned j = 0; j < VL.size() - 1; ++j) {
|
|
|
|
if (LoadInst *L = dyn_cast<LoadInst>(Left[j])) {
|
|
|
|
if (LoadInst *L1 = dyn_cast<LoadInst>(Right[j + 1])) {
|
2015-03-10 02:37:25 +00:00
|
|
|
if (isConsecutiveAccess(L, L1, DL)) {
|
2015-01-20 06:11:00 +00:00
|
|
|
std::swap(Left[j + 1], Right[j + 1]);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (LoadInst *L = dyn_cast<LoadInst>(Right[j])) {
|
|
|
|
if (LoadInst *L1 = dyn_cast<LoadInst>(Left[j + 1])) {
|
2015-03-10 02:37:25 +00:00
|
|
|
if (isConsecutiveAccess(L, L1, DL)) {
|
2015-01-20 06:11:00 +00:00
|
|
|
std::swap(Left[j + 1], Right[j + 1]);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// else unchanged
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-26 23:08:37 +00:00
|
|
|
void BoUpSLP::setInsertPointAfterBundle(ArrayRef<Value *> VL) {
|
|
|
|
Instruction *VL0 = cast<Instruction>(VL[0]);
|
2014-08-01 09:20:42 +00:00
|
|
|
BasicBlock::iterator NextInst = VL0;
|
2013-08-26 23:08:37 +00:00
|
|
|
++NextInst;
|
|
|
|
Builder.SetInsertPoint(VL0->getParent(), NextInst);
|
|
|
|
Builder.SetCurrentDebugLocation(VL0->getDebugLoc());
|
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
Value *BoUpSLP::Gather(ArrayRef<Value *> VL, VectorType *Ty) {
|
2013-06-22 21:34:10 +00:00
|
|
|
Value *Vec = UndefValue::get(Ty);
|
|
|
|
// Generate the 'InsertElement' instruction.
|
|
|
|
for (unsigned i = 0; i < Ty->getNumElements(); ++i) {
|
|
|
|
Vec = Builder.CreateInsertElement(Vec, VL[i], Builder.getInt32(i));
|
2013-07-11 04:54:05 +00:00
|
|
|
if (Instruction *Insrt = dyn_cast<Instruction>(Vec)) {
|
|
|
|
GatherSeq.insert(Insrt);
|
2013-11-26 22:24:25 +00:00
|
|
|
CSEBlocks.insert(Insrt->getParent());
|
2013-07-11 04:54:05 +00:00
|
|
|
|
|
|
|
// Add to our 'need-to-extract' list.
|
|
|
|
if (ScalarToTreeEntry.count(VL[i])) {
|
|
|
|
int Idx = ScalarToTreeEntry[VL[i]];
|
|
|
|
TreeEntry *E = &VectorizableTree[Idx];
|
|
|
|
// Find which lane we need to extract.
|
|
|
|
int FoundLane = -1;
|
|
|
|
for (unsigned Lane = 0, LE = VL.size(); Lane != LE; ++Lane) {
|
|
|
|
// Is this the lane of the scalar that we are looking for ?
|
|
|
|
if (E->Scalars[Lane] == VL[i]) {
|
|
|
|
FoundLane = Lane;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert(FoundLane >= 0 && "Could not find the correct lane");
|
|
|
|
ExternalUses.push_back(ExternalUser(VL[i], Insrt, FoundLane));
|
|
|
|
}
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return Vec;
|
|
|
|
}
|
|
|
|
|
2013-08-26 17:56:38 +00:00
|
|
|
Value *BoUpSLP::alreadyVectorized(ArrayRef<Value *> VL) const {
|
|
|
|
SmallDenseMap<Value*, int>::const_iterator Entry
|
|
|
|
= ScalarToTreeEntry.find(VL[0]);
|
|
|
|
if (Entry != ScalarToTreeEntry.end()) {
|
|
|
|
int Idx = Entry->second;
|
|
|
|
const TreeEntry *En = &VectorizableTree[Idx];
|
2013-07-22 22:18:07 +00:00
|
|
|
if (En->isSame(VL) && En->VectorizedValue)
|
|
|
|
return En->VectorizedValue;
|
|
|
|
}
|
2014-04-25 05:29:35 +00:00
|
|
|
return nullptr;
|
2013-07-22 22:18:07 +00:00
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
Value *BoUpSLP::vectorizeTree(ArrayRef<Value *> VL) {
|
|
|
|
if (ScalarToTreeEntry.count(VL[0])) {
|
|
|
|
int Idx = ScalarToTreeEntry[VL[0]];
|
|
|
|
TreeEntry *E = &VectorizableTree[Idx];
|
|
|
|
if (E->isSame(VL))
|
|
|
|
return vectorizeTree(E);
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
|
|
|
Type *ScalarTy = VL[0]->getType();
|
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(VL[0]))
|
|
|
|
ScalarTy = SI->getValueOperand()->getType();
|
|
|
|
VectorType *VecTy = VectorType::get(ScalarTy, VL.size());
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
return Gather(VL, VecTy);
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
|
2013-09-30 15:39:48 +00:00
|
|
|
IRBuilder<>::InsertPointGuard Guard(Builder);
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
if (E->VectorizedValue) {
|
|
|
|
DEBUG(dbgs() << "SLP: Diamond merged for " << *E->Scalars[0] << ".\n");
|
|
|
|
return E->VectorizedValue;
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
|
|
|
|
2013-08-26 17:56:35 +00:00
|
|
|
Instruction *VL0 = cast<Instruction>(E->Scalars[0]);
|
|
|
|
Type *ScalarTy = VL0->getType();
|
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(VL0))
|
2013-07-07 06:57:07 +00:00
|
|
|
ScalarTy = SI->getValueOperand()->getType();
|
|
|
|
VectorType *VecTy = VectorType::get(ScalarTy, E->Scalars.size());
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
if (E->NeedToGather) {
|
2013-08-26 23:08:37 +00:00
|
|
|
setInsertPointAfterBundle(E->Scalars);
|
2013-07-07 06:57:07 +00:00
|
|
|
return Gather(E->Scalars, VecTy);
|
|
|
|
}
|
2014-08-01 09:20:42 +00:00
|
|
|
|
2015-03-10 02:37:25 +00:00
|
|
|
const DataLayout &DL = F->getParent()->getDataLayout();
|
2014-06-20 04:32:48 +00:00
|
|
|
unsigned Opcode = getSameOpcode(E->Scalars);
|
2013-06-25 23:04:09 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
switch (Opcode) {
|
|
|
|
case Instruction::PHI: {
|
|
|
|
PHINode *PH = dyn_cast<PHINode>(VL0);
|
2013-09-27 15:30:25 +00:00
|
|
|
Builder.SetInsertPoint(PH->getParent()->getFirstNonPHI());
|
2013-07-29 18:18:46 +00:00
|
|
|
Builder.SetCurrentDebugLocation(PH->getDebugLoc());
|
2013-07-07 06:57:07 +00:00
|
|
|
PHINode *NewPhi = Builder.CreatePHI(VecTy, PH->getNumIncomingValues());
|
|
|
|
E->VectorizedValue = NewPhi;
|
|
|
|
|
2013-08-12 17:46:44 +00:00
|
|
|
// PHINodes may have multiple entries from the same block. We want to
|
|
|
|
// visit every block once.
|
|
|
|
SmallSet<BasicBlock*, 4> VisitedBBs;
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
for (unsigned i = 0, e = PH->getNumIncomingValues(); i < e; ++i) {
|
|
|
|
ValueList Operands;
|
|
|
|
BasicBlock *IBB = PH->getIncomingBlock(i);
|
|
|
|
|
2014-11-19 07:49:26 +00:00
|
|
|
if (!VisitedBBs.insert(IBB).second) {
|
2013-08-12 17:46:44 +00:00
|
|
|
NewPhi->addIncoming(NewPhi->getIncomingValueForBlock(IBB), IBB);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
// Prepare the operand vector.
|
|
|
|
for (unsigned j = 0; j < E->Scalars.size(); ++j)
|
|
|
|
Operands.push_back(cast<PHINode>(E->Scalars[j])->
|
|
|
|
getIncomingValueForBlock(IBB));
|
|
|
|
|
|
|
|
Builder.SetInsertPoint(IBB->getTerminator());
|
2013-07-29 18:18:46 +00:00
|
|
|
Builder.SetCurrentDebugLocation(PH->getDebugLoc());
|
2013-07-07 06:57:07 +00:00
|
|
|
Value *Vec = vectorizeTree(Operands);
|
|
|
|
NewPhi->addIncoming(Vec, IBB);
|
|
|
|
}
|
2013-06-25 23:04:09 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
assert(NewPhi->getNumIncomingValues() == PH->getNumIncomingValues() &&
|
|
|
|
"Invalid number of incoming values");
|
|
|
|
return NewPhi;
|
2013-06-25 23:04:09 +00:00
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
case Instruction::ExtractElement: {
|
|
|
|
if (CanReuseExtract(E->Scalars)) {
|
|
|
|
Value *V = VL0->getOperand(0);
|
|
|
|
E->VectorizedValue = V;
|
|
|
|
return V;
|
|
|
|
}
|
|
|
|
return Gather(E->Scalars, VecTy);
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
case Instruction::ZExt:
|
|
|
|
case Instruction::SExt:
|
|
|
|
case Instruction::FPToUI:
|
|
|
|
case Instruction::FPToSI:
|
|
|
|
case Instruction::FPExt:
|
|
|
|
case Instruction::PtrToInt:
|
|
|
|
case Instruction::IntToPtr:
|
|
|
|
case Instruction::SIToFP:
|
|
|
|
case Instruction::UIToFP:
|
|
|
|
case Instruction::Trunc:
|
|
|
|
case Instruction::FPTrunc:
|
|
|
|
case Instruction::BitCast: {
|
|
|
|
ValueList INVL;
|
|
|
|
for (int i = 0, e = E->Scalars.size(); i < e; ++i)
|
|
|
|
INVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0));
|
|
|
|
|
2013-08-26 23:08:37 +00:00
|
|
|
setInsertPointAfterBundle(E->Scalars);
|
2013-07-29 18:18:46 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
Value *InVec = vectorizeTree(INVL);
|
2013-07-22 22:18:07 +00:00
|
|
|
|
|
|
|
if (Value *V = alreadyVectorized(E->Scalars))
|
|
|
|
return V;
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
CastInst *CI = dyn_cast<CastInst>(VL0);
|
|
|
|
Value *V = Builder.CreateCast(CI->getOpcode(), InVec, VecTy);
|
|
|
|
E->VectorizedValue = V;
|
2014-08-01 08:14:28 +00:00
|
|
|
++NumVectorInstructions;
|
2013-07-07 06:57:07 +00:00
|
|
|
return V;
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
case Instruction::FCmp:
|
|
|
|
case Instruction::ICmp: {
|
|
|
|
ValueList LHSV, RHSV;
|
|
|
|
for (int i = 0, e = E->Scalars.size(); i < e; ++i) {
|
|
|
|
LHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0));
|
|
|
|
RHSV.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1));
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-08-26 23:08:37 +00:00
|
|
|
setInsertPointAfterBundle(E->Scalars);
|
2013-07-29 18:18:46 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
Value *L = vectorizeTree(LHSV);
|
|
|
|
Value *R = vectorizeTree(RHSV);
|
2013-07-22 22:18:07 +00:00
|
|
|
|
|
|
|
if (Value *V = alreadyVectorized(E->Scalars))
|
|
|
|
return V;
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
CmpInst::Predicate P0 = dyn_cast<CmpInst>(VL0)->getPredicate();
|
2013-07-22 22:18:07 +00:00
|
|
|
Value *V;
|
2013-07-07 06:57:07 +00:00
|
|
|
if (Opcode == Instruction::FCmp)
|
|
|
|
V = Builder.CreateFCmp(P0, L, R);
|
|
|
|
else
|
|
|
|
V = Builder.CreateICmp(P0, L, R);
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
E->VectorizedValue = V;
|
2014-08-01 08:14:28 +00:00
|
|
|
++NumVectorInstructions;
|
2013-07-07 06:57:07 +00:00
|
|
|
return V;
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
case Instruction::Select: {
|
|
|
|
ValueList TrueVec, FalseVec, CondVec;
|
|
|
|
for (int i = 0, e = E->Scalars.size(); i < e; ++i) {
|
|
|
|
CondVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0));
|
|
|
|
TrueVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1));
|
|
|
|
FalseVec.push_back(cast<Instruction>(E->Scalars[i])->getOperand(2));
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-08-26 23:08:37 +00:00
|
|
|
setInsertPointAfterBundle(E->Scalars);
|
2013-07-29 18:18:46 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
Value *Cond = vectorizeTree(CondVec);
|
|
|
|
Value *True = vectorizeTree(TrueVec);
|
|
|
|
Value *False = vectorizeTree(FalseVec);
|
2013-07-22 22:18:07 +00:00
|
|
|
|
|
|
|
if (Value *V = alreadyVectorized(E->Scalars))
|
|
|
|
return V;
|
2013-08-26 18:38:29 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
Value *V = Builder.CreateSelect(Cond, True, False);
|
|
|
|
E->VectorizedValue = V;
|
2014-08-01 08:14:28 +00:00
|
|
|
++NumVectorInstructions;
|
2013-07-07 06:57:07 +00:00
|
|
|
return V;
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
case Instruction::Add:
|
|
|
|
case Instruction::FAdd:
|
|
|
|
case Instruction::Sub:
|
|
|
|
case Instruction::FSub:
|
|
|
|
case Instruction::Mul:
|
|
|
|
case Instruction::FMul:
|
|
|
|
case Instruction::UDiv:
|
|
|
|
case Instruction::SDiv:
|
|
|
|
case Instruction::FDiv:
|
|
|
|
case Instruction::URem:
|
|
|
|
case Instruction::SRem:
|
|
|
|
case Instruction::FRem:
|
|
|
|
case Instruction::Shl:
|
|
|
|
case Instruction::LShr:
|
|
|
|
case Instruction::AShr:
|
|
|
|
case Instruction::And:
|
|
|
|
case Instruction::Or:
|
|
|
|
case Instruction::Xor: {
|
|
|
|
ValueList LHSVL, RHSVL;
|
2014-12-17 10:34:27 +00:00
|
|
|
if (isa<BinaryOperator>(VL0) && VL0->isCommutative())
|
2013-10-04 20:39:16 +00:00
|
|
|
reorderInputsAccordingToOpcode(E->Scalars, LHSVL, RHSVL);
|
2014-12-17 10:34:27 +00:00
|
|
|
else
|
2013-10-04 20:39:16 +00:00
|
|
|
for (int i = 0, e = E->Scalars.size(); i < e; ++i) {
|
|
|
|
LHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(0));
|
|
|
|
RHSVL.push_back(cast<Instruction>(E->Scalars[i])->getOperand(1));
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-08-26 23:08:37 +00:00
|
|
|
setInsertPointAfterBundle(E->Scalars);
|
2013-07-29 18:18:46 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
Value *LHS = vectorizeTree(LHSVL);
|
|
|
|
Value *RHS = vectorizeTree(RHSVL);
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
if (LHS == RHS && isa<Instruction>(LHS)) {
|
|
|
|
assert((VL0->getOperand(0) == VL0->getOperand(1)) && "Invalid order");
|
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-22 22:18:07 +00:00
|
|
|
if (Value *V = alreadyVectorized(E->Scalars))
|
|
|
|
return V;
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
BinaryOperator *BinOp = cast<BinaryOperator>(VL0);
|
|
|
|
Value *V = Builder.CreateBinOp(BinOp->getOpcode(), LHS, RHS);
|
|
|
|
E->VectorizedValue = V;
|
2014-09-03 17:40:30 +00:00
|
|
|
propagateIRFlags(E->VectorizedValue, E->Scalars);
|
2014-08-01 08:14:28 +00:00
|
|
|
++NumVectorInstructions;
|
2013-11-23 00:48:34 +00:00
|
|
|
|
|
|
|
if (Instruction *I = dyn_cast<Instruction>(V))
|
|
|
|
return propagateMetadata(I, E->Scalars);
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
return V;
|
|
|
|
}
|
|
|
|
case Instruction::Load: {
|
|
|
|
// Loads are inserted at the head of the tree because we don't want to
|
|
|
|
// sink them all the way down past store instructions.
|
2013-08-26 23:08:37 +00:00
|
|
|
setInsertPointAfterBundle(E->Scalars);
|
2013-07-29 18:18:46 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
LoadInst *LI = cast<LoadInst>(VL0);
|
2014-08-07 22:47:27 +00:00
|
|
|
Type *ScalarLoadTy = LI->getType();
|
2013-09-27 21:24:57 +00:00
|
|
|
unsigned AS = LI->getPointerAddressSpace();
|
|
|
|
|
|
|
|
Value *VecPtr = Builder.CreateBitCast(LI->getPointerOperand(),
|
|
|
|
VecTy->getPointerTo(AS));
|
2014-09-02 21:00:39 +00:00
|
|
|
|
|
|
|
// The pointer operand uses an in-tree scalar so we add the new BitCast to
|
|
|
|
// ExternalUses list to make sure that an extract will be generated in the
|
|
|
|
// future.
|
|
|
|
if (ScalarToTreeEntry.count(LI->getPointerOperand()))
|
|
|
|
ExternalUses.push_back(
|
|
|
|
ExternalUser(LI->getPointerOperand(), cast<User>(VecPtr), 0));
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
unsigned Alignment = LI->getAlignment();
|
|
|
|
LI = Builder.CreateLoad(VecPtr);
|
2015-03-10 02:37:25 +00:00
|
|
|
if (!Alignment) {
|
|
|
|
Alignment = DL.getABITypeAlignment(ScalarLoadTy);
|
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
LI->setAlignment(Alignment);
|
|
|
|
E->VectorizedValue = LI;
|
2014-08-01 08:14:28 +00:00
|
|
|
++NumVectorInstructions;
|
2013-11-23 00:48:34 +00:00
|
|
|
return propagateMetadata(LI, E->Scalars);
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
|
|
|
case Instruction::Store: {
|
|
|
|
StoreInst *SI = cast<StoreInst>(VL0);
|
|
|
|
unsigned Alignment = SI->getAlignment();
|
2013-09-27 21:24:57 +00:00
|
|
|
unsigned AS = SI->getPointerAddressSpace();
|
2013-07-07 06:57:07 +00:00
|
|
|
|
|
|
|
ValueList ValueOp;
|
|
|
|
for (int i = 0, e = E->Scalars.size(); i < e; ++i)
|
|
|
|
ValueOp.push_back(cast<StoreInst>(E->Scalars[i])->getValueOperand());
|
|
|
|
|
2013-08-26 23:08:37 +00:00
|
|
|
setInsertPointAfterBundle(E->Scalars);
|
2013-07-29 18:18:46 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
Value *VecValue = vectorizeTree(ValueOp);
|
2013-09-27 21:24:57 +00:00
|
|
|
Value *VecPtr = Builder.CreateBitCast(SI->getPointerOperand(),
|
|
|
|
VecTy->getPointerTo(AS));
|
2013-07-07 06:57:07 +00:00
|
|
|
StoreInst *S = Builder.CreateStore(VecValue, VecPtr);
|
2014-09-02 21:00:39 +00:00
|
|
|
|
|
|
|
// The pointer operand uses an in-tree scalar so we add the new BitCast to
|
|
|
|
// ExternalUses list to make sure that an extract will be generated in the
|
|
|
|
// future.
|
|
|
|
if (ScalarToTreeEntry.count(SI->getPointerOperand()))
|
|
|
|
ExternalUses.push_back(
|
|
|
|
ExternalUser(SI->getPointerOperand(), cast<User>(VecPtr), 0));
|
|
|
|
|
2015-03-10 02:37:25 +00:00
|
|
|
if (!Alignment) {
|
|
|
|
Alignment = DL.getABITypeAlignment(SI->getValueOperand()->getType());
|
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
S->setAlignment(Alignment);
|
|
|
|
E->VectorizedValue = S;
|
2014-08-01 08:14:28 +00:00
|
|
|
++NumVectorInstructions;
|
2013-11-23 00:48:34 +00:00
|
|
|
return propagateMetadata(S, E->Scalars);
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
2014-08-27 15:01:18 +00:00
|
|
|
case Instruction::GetElementPtr: {
|
|
|
|
setInsertPointAfterBundle(E->Scalars);
|
|
|
|
|
|
|
|
ValueList Op0VL;
|
|
|
|
for (int i = 0, e = E->Scalars.size(); i < e; ++i)
|
|
|
|
Op0VL.push_back(cast<GetElementPtrInst>(E->Scalars[i])->getOperand(0));
|
|
|
|
|
|
|
|
Value *Op0 = vectorizeTree(Op0VL);
|
|
|
|
|
|
|
|
std::vector<Value *> OpVecs;
|
|
|
|
for (int j = 1, e = cast<GetElementPtrInst>(VL0)->getNumOperands(); j < e;
|
|
|
|
++j) {
|
|
|
|
ValueList OpVL;
|
|
|
|
for (int i = 0, e = E->Scalars.size(); i < e; ++i)
|
|
|
|
OpVL.push_back(cast<GetElementPtrInst>(E->Scalars[i])->getOperand(j));
|
|
|
|
|
|
|
|
Value *OpVec = vectorizeTree(OpVL);
|
|
|
|
OpVecs.push_back(OpVec);
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *V = Builder.CreateGEP(Op0, OpVecs);
|
|
|
|
E->VectorizedValue = V;
|
|
|
|
++NumVectorInstructions;
|
|
|
|
|
|
|
|
if (Instruction *I = dyn_cast<Instruction>(V))
|
|
|
|
return propagateMetadata(I, E->Scalars);
|
|
|
|
|
|
|
|
return V;
|
|
|
|
}
|
2014-03-12 20:21:50 +00:00
|
|
|
case Instruction::Call: {
|
|
|
|
CallInst *CI = cast<CallInst>(VL0);
|
|
|
|
setInsertPointAfterBundle(E->Scalars);
|
2014-05-30 04:31:24 +00:00
|
|
|
Function *FI;
|
|
|
|
Intrinsic::ID IID = Intrinsic::not_intrinsic;
|
2014-09-02 21:00:39 +00:00
|
|
|
Value *ScalarArg = nullptr;
|
2014-05-30 04:31:24 +00:00
|
|
|
if (CI && (FI = CI->getCalledFunction())) {
|
|
|
|
IID = (Intrinsic::ID) FI->getIntrinsicID();
|
|
|
|
}
|
2014-03-12 20:21:50 +00:00
|
|
|
std::vector<Value *> OpVecs;
|
|
|
|
for (int j = 0, e = CI->getNumArgOperands(); j < e; ++j) {
|
|
|
|
ValueList OpVL;
|
2014-05-30 04:31:24 +00:00
|
|
|
// ctlz,cttz and powi are special intrinsics whose second argument is
|
|
|
|
// a scalar. This argument should not be vectorized.
|
|
|
|
if (hasVectorInstrinsicScalarOpd(IID, 1) && j == 1) {
|
|
|
|
CallInst *CEI = cast<CallInst>(E->Scalars[0]);
|
2014-09-02 21:00:39 +00:00
|
|
|
ScalarArg = CEI->getArgOperand(j);
|
2014-05-30 04:31:24 +00:00
|
|
|
OpVecs.push_back(CEI->getArgOperand(j));
|
|
|
|
continue;
|
|
|
|
}
|
2014-03-12 20:21:50 +00:00
|
|
|
for (int i = 0, e = E->Scalars.size(); i < e; ++i) {
|
|
|
|
CallInst *CEI = cast<CallInst>(E->Scalars[i]);
|
|
|
|
OpVL.push_back(CEI->getArgOperand(j));
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *OpVec = vectorizeTree(OpVL);
|
|
|
|
DEBUG(dbgs() << "SLP: OpVec[" << j << "]: " << *OpVec << "\n");
|
|
|
|
OpVecs.push_back(OpVec);
|
|
|
|
}
|
|
|
|
|
|
|
|
Module *M = F->getParent();
|
2014-05-03 09:59:54 +00:00
|
|
|
Intrinsic::ID ID = getIntrinsicIDForCall(CI, TLI);
|
2014-03-12 20:21:50 +00:00
|
|
|
Type *Tys[] = { VectorType::get(CI->getType(), E->Scalars.size()) };
|
|
|
|
Function *CF = Intrinsic::getDeclaration(M, ID, Tys);
|
|
|
|
Value *V = Builder.CreateCall(CF, OpVecs);
|
2014-09-02 21:00:39 +00:00
|
|
|
|
|
|
|
// The scalar argument uses an in-tree scalar so we add the new vectorized
|
|
|
|
// call to ExternalUses list to make sure that an extract will be
|
|
|
|
// generated in the future.
|
|
|
|
if (ScalarArg && ScalarToTreeEntry.count(ScalarArg))
|
|
|
|
ExternalUses.push_back(ExternalUser(ScalarArg, cast<User>(V), 0));
|
|
|
|
|
2014-03-12 20:21:50 +00:00
|
|
|
E->VectorizedValue = V;
|
2014-08-01 08:14:28 +00:00
|
|
|
++NumVectorInstructions;
|
2014-03-12 20:21:50 +00:00
|
|
|
return V;
|
|
|
|
}
|
2014-06-20 04:32:48 +00:00
|
|
|
case Instruction::ShuffleVector: {
|
|
|
|
ValueList LHSVL, RHSVL;
|
2015-01-20 06:11:00 +00:00
|
|
|
assert(isa<BinaryOperator>(VL0) && "Invalid Shuffle Vector Operand");
|
|
|
|
reorderAltShuffleOperands(E->Scalars, LHSVL, RHSVL);
|
2014-06-20 04:32:48 +00:00
|
|
|
setInsertPointAfterBundle(E->Scalars);
|
|
|
|
|
|
|
|
Value *LHS = vectorizeTree(LHSVL);
|
|
|
|
Value *RHS = vectorizeTree(RHSVL);
|
|
|
|
|
|
|
|
if (Value *V = alreadyVectorized(E->Scalars))
|
|
|
|
return V;
|
|
|
|
|
|
|
|
// Create a vector of LHS op1 RHS
|
|
|
|
BinaryOperator *BinOp0 = cast<BinaryOperator>(VL0);
|
|
|
|
Value *V0 = Builder.CreateBinOp(BinOp0->getOpcode(), LHS, RHS);
|
|
|
|
|
|
|
|
// Create a vector of LHS op2 RHS
|
|
|
|
Instruction *VL1 = cast<Instruction>(E->Scalars[1]);
|
|
|
|
BinaryOperator *BinOp1 = cast<BinaryOperator>(VL1);
|
|
|
|
Value *V1 = Builder.CreateBinOp(BinOp1->getOpcode(), LHS, RHS);
|
|
|
|
|
2014-09-03 17:40:30 +00:00
|
|
|
// Create shuffle to take alternate operations from the vector.
|
|
|
|
// Also, gather up odd and even scalar ops to propagate IR flags to
|
|
|
|
// each vector operation.
|
|
|
|
ValueList OddScalars, EvenScalars;
|
2014-06-20 04:32:48 +00:00
|
|
|
unsigned e = E->Scalars.size();
|
2014-09-03 17:40:30 +00:00
|
|
|
SmallVector<Constant *, 8> Mask(e);
|
2014-06-20 04:32:48 +00:00
|
|
|
for (unsigned i = 0; i < e; ++i) {
|
2014-09-03 17:40:30 +00:00
|
|
|
if (i & 1) {
|
2014-06-20 04:32:48 +00:00
|
|
|
Mask[i] = Builder.getInt32(e + i);
|
2014-09-03 17:40:30 +00:00
|
|
|
OddScalars.push_back(E->Scalars[i]);
|
|
|
|
} else {
|
2014-06-20 04:32:48 +00:00
|
|
|
Mask[i] = Builder.getInt32(i);
|
2014-09-03 17:40:30 +00:00
|
|
|
EvenScalars.push_back(E->Scalars[i]);
|
|
|
|
}
|
2014-06-20 04:32:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Value *ShuffleMask = ConstantVector::get(Mask);
|
2014-09-03 17:40:30 +00:00
|
|
|
propagateIRFlags(V0, EvenScalars);
|
|
|
|
propagateIRFlags(V1, OddScalars);
|
2014-06-20 04:32:48 +00:00
|
|
|
|
|
|
|
Value *V = Builder.CreateShuffleVector(V0, V1, ShuffleMask);
|
|
|
|
E->VectorizedValue = V;
|
2014-08-01 08:14:28 +00:00
|
|
|
++NumVectorInstructions;
|
2014-06-20 04:32:48 +00:00
|
|
|
if (Instruction *I = dyn_cast<Instruction>(V))
|
|
|
|
return propagateMetadata(I, E->Scalars);
|
|
|
|
|
|
|
|
return V;
|
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
default:
|
|
|
|
llvm_unreachable("unknown inst");
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2014-04-25 05:29:35 +00:00
|
|
|
return nullptr;
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-09-21 01:06:00 +00:00
|
|
|
Value *BoUpSLP::vectorizeTree() {
|
2014-08-02 19:39:42 +00:00
|
|
|
|
|
|
|
// All blocks must be scheduled before any instructions are inserted.
|
|
|
|
for (auto &BSIter : BlocksSchedules) {
|
|
|
|
scheduleBlock(BSIter.second.get());
|
|
|
|
}
|
|
|
|
|
2013-07-09 17:55:36 +00:00
|
|
|
Builder.SetInsertPoint(F->getEntryBlock().begin());
|
2013-07-07 06:57:07 +00:00
|
|
|
vectorizeTree(&VectorizableTree[0]);
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-11 04:54:05 +00:00
|
|
|
DEBUG(dbgs() << "SLP: Extracting " << ExternalUses.size() << " values .\n");
|
|
|
|
|
|
|
|
// Extract all of the elements with the external uses.
|
|
|
|
for (UserList::iterator it = ExternalUses.begin(), e = ExternalUses.end();
|
|
|
|
it != e; ++it) {
|
|
|
|
Value *Scalar = it->Scalar;
|
|
|
|
llvm::User *User = it->User;
|
2013-07-12 06:09:24 +00:00
|
|
|
|
|
|
|
// Skip users that we already RAUW. This happens when one instruction
|
|
|
|
// has multiple uses of the same value.
|
2014-03-09 03:16:01 +00:00
|
|
|
if (std::find(Scalar->user_begin(), Scalar->user_end(), User) ==
|
|
|
|
Scalar->user_end())
|
2013-07-11 04:54:05 +00:00
|
|
|
continue;
|
|
|
|
assert(ScalarToTreeEntry.count(Scalar) && "Invalid scalar");
|
|
|
|
|
|
|
|
int Idx = ScalarToTreeEntry[Scalar];
|
|
|
|
TreeEntry *E = &VectorizableTree[Idx];
|
|
|
|
assert(!E->NeedToGather && "Extracting from a gather list");
|
|
|
|
|
|
|
|
Value *Vec = E->VectorizedValue;
|
|
|
|
assert(Vec && "Can't find vectorizable value");
|
|
|
|
|
2013-08-02 18:40:24 +00:00
|
|
|
Value *Lane = Builder.getInt32(it->Lane);
|
2013-07-11 04:54:05 +00:00
|
|
|
// Generate extracts for out-of-tree users.
|
|
|
|
// Find the insertion point for the extractelement lane.
|
2014-03-25 02:18:47 +00:00
|
|
|
if (isa<Instruction>(Vec)){
|
2013-07-12 06:09:24 +00:00
|
|
|
if (PHINode *PH = dyn_cast<PHINode>(User)) {
|
|
|
|
for (int i = 0, e = PH->getNumIncomingValues(); i != e; ++i) {
|
|
|
|
if (PH->getIncomingValue(i) == Scalar) {
|
2013-08-02 18:40:24 +00:00
|
|
|
Builder.SetInsertPoint(PH->getIncomingBlock(i)->getTerminator());
|
|
|
|
Value *Ex = Builder.CreateExtractElement(Vec, Lane);
|
2013-11-26 22:24:25 +00:00
|
|
|
CSEBlocks.insert(PH->getIncomingBlock(i));
|
2013-08-02 18:40:24 +00:00
|
|
|
PH->setOperand(i, Ex);
|
2013-07-12 06:09:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2013-08-02 18:40:24 +00:00
|
|
|
Builder.SetInsertPoint(cast<Instruction>(User));
|
|
|
|
Value *Ex = Builder.CreateExtractElement(Vec, Lane);
|
2013-11-26 22:24:25 +00:00
|
|
|
CSEBlocks.insert(cast<Instruction>(User)->getParent());
|
2013-08-02 18:40:24 +00:00
|
|
|
User->replaceUsesOfWith(Scalar, Ex);
|
2013-07-12 06:09:24 +00:00
|
|
|
}
|
2013-07-11 04:54:05 +00:00
|
|
|
} else {
|
2013-08-02 18:40:24 +00:00
|
|
|
Builder.SetInsertPoint(F->getEntryBlock().begin());
|
|
|
|
Value *Ex = Builder.CreateExtractElement(Vec, Lane);
|
2013-11-26 22:24:25 +00:00
|
|
|
CSEBlocks.insert(&F->getEntryBlock());
|
2013-08-02 18:40:24 +00:00
|
|
|
User->replaceUsesOfWith(Scalar, Ex);
|
2013-07-11 04:54:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "SLP: Replaced:" << *User << ".\n");
|
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
// For each vectorized value:
|
|
|
|
for (int EIdx = 0, EE = VectorizableTree.size(); EIdx < EE; ++EIdx) {
|
|
|
|
TreeEntry *Entry = &VectorizableTree[EIdx];
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
// For each lane:
|
|
|
|
for (int Lane = 0, LE = Entry->Scalars.size(); Lane != LE; ++Lane) {
|
|
|
|
Value *Scalar = Entry->Scalars[Lane];
|
|
|
|
// No need to handle users of gathered values.
|
|
|
|
if (Entry->NeedToGather)
|
|
|
|
continue;
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-07-11 05:39:02 +00:00
|
|
|
assert(Entry->VectorizedValue && "Can't find vectorizable value");
|
2013-06-28 22:07:09 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
Type *Ty = Scalar->getType();
|
|
|
|
if (!Ty->isVoidTy()) {
|
2014-03-09 03:50:36 +00:00
|
|
|
#ifndef NDEBUG
|
2014-03-09 03:16:01 +00:00
|
|
|
for (User *U : Scalar->users()) {
|
|
|
|
DEBUG(dbgs() << "SLP: \tvalidating user:" << *U << ".\n");
|
2013-09-21 01:06:00 +00:00
|
|
|
|
2014-03-09 03:16:01 +00:00
|
|
|
assert((ScalarToTreeEntry.count(U) ||
|
2014-05-04 17:10:15 +00:00
|
|
|
// It is legal to replace users in the ignorelist by undef.
|
|
|
|
(std::find(UserIgnoreList.begin(), UserIgnoreList.end(), U) !=
|
|
|
|
UserIgnoreList.end())) &&
|
2013-07-07 06:57:07 +00:00
|
|
|
"Replacing out-of-tree value with undef");
|
|
|
|
}
|
2014-03-09 03:50:36 +00:00
|
|
|
#endif
|
2013-07-07 06:57:07 +00:00
|
|
|
Value *Undef = UndefValue::get(Ty);
|
|
|
|
Scalar->replaceAllUsesWith(Undef);
|
|
|
|
}
|
|
|
|
DEBUG(dbgs() << "SLP: \tErasing scalar:" << *Scalar << ".\n");
|
2015-01-14 11:24:47 +00:00
|
|
|
eraseInstruction(cast<Instruction>(Scalar));
|
2013-06-28 22:07:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-07-07 14:57:18 +00:00
|
|
|
Builder.ClearInsertionPoint();
|
2013-09-21 01:06:00 +00:00
|
|
|
|
|
|
|
return VectorizableTree[0].VectorizedValue;
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
void BoUpSLP::optimizeGatherSequence() {
|
|
|
|
DEBUG(dbgs() << "SLP: Optimizing " << GatherSeq.size()
|
|
|
|
<< " gather sequences instructions.\n");
|
2013-06-23 06:15:46 +00:00
|
|
|
// LICM InsertElementInst sequences.
|
2013-06-22 21:34:10 +00:00
|
|
|
for (SetVector<Instruction *>::iterator it = GatherSeq.begin(),
|
2013-06-23 06:15:46 +00:00
|
|
|
e = GatherSeq.end(); it != e; ++it) {
|
|
|
|
InsertElementInst *Insert = dyn_cast<InsertElementInst>(*it);
|
2013-06-22 21:34:10 +00:00
|
|
|
|
|
|
|
if (!Insert)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// Check if this block is inside a loop.
|
2013-06-23 06:15:46 +00:00
|
|
|
Loop *L = LI->getLoopFor(Insert->getParent());
|
2013-06-22 21:34:10 +00:00
|
|
|
if (!L)
|
2013-06-23 06:15:46 +00:00
|
|
|
continue;
|
2013-06-22 21:34:10 +00:00
|
|
|
|
|
|
|
// Check if it has a preheader.
|
|
|
|
BasicBlock *PreHeader = L->getLoopPreheader();
|
|
|
|
if (!PreHeader)
|
2013-06-26 16:54:53 +00:00
|
|
|
continue;
|
2013-06-22 21:34:10 +00:00
|
|
|
|
|
|
|
// If the vector or the element that we insert into it are
|
|
|
|
// instructions that are defined in this basic block then we can't
|
|
|
|
// hoist this instruction.
|
|
|
|
Instruction *CurrVec = dyn_cast<Instruction>(Insert->getOperand(0));
|
|
|
|
Instruction *NewElem = dyn_cast<Instruction>(Insert->getOperand(1));
|
|
|
|
if (CurrVec && L->contains(CurrVec))
|
|
|
|
continue;
|
|
|
|
if (NewElem && L->contains(NewElem))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// We can hoist this instruction. Move it to the pre-header.
|
2013-06-23 06:15:46 +00:00
|
|
|
Insert->moveBefore(PreHeader->getTerminator());
|
|
|
|
}
|
|
|
|
|
2014-05-11 10:28:58 +00:00
|
|
|
// Make a list of all reachable blocks in our CSE queue.
|
|
|
|
SmallVector<const DomTreeNode *, 8> CSEWorkList;
|
|
|
|
CSEWorkList.reserve(CSEBlocks.size());
|
|
|
|
for (BasicBlock *BB : CSEBlocks)
|
|
|
|
if (DomTreeNode *N = DT->getNode(BB)) {
|
|
|
|
assert(DT->isReachableFromEntry(N));
|
|
|
|
CSEWorkList.push_back(N);
|
|
|
|
}
|
|
|
|
|
2013-11-03 12:27:52 +00:00
|
|
|
// Sort blocks by domination. This ensures we visit a block after all blocks
|
|
|
|
// dominating it are visited.
|
2014-03-01 11:47:00 +00:00
|
|
|
std::stable_sort(CSEWorkList.begin(), CSEWorkList.end(),
|
2014-05-11 10:28:58 +00:00
|
|
|
[this](const DomTreeNode *A, const DomTreeNode *B) {
|
2014-03-01 11:47:00 +00:00
|
|
|
return DT->properlyDominates(A, B);
|
|
|
|
});
|
2013-11-03 12:27:52 +00:00
|
|
|
|
2013-06-23 06:15:46 +00:00
|
|
|
// Perform O(N^2) search over the gather sequences and merge identical
|
|
|
|
// instructions. TODO: We can further optimize this scan if we split the
|
|
|
|
// instructions into different buckets based on the insert lane.
|
2013-11-03 12:27:52 +00:00
|
|
|
SmallVector<Instruction *, 16> Visited;
|
2014-05-11 10:28:58 +00:00
|
|
|
for (auto I = CSEWorkList.begin(), E = CSEWorkList.end(); I != E; ++I) {
|
|
|
|
assert((I == CSEWorkList.begin() || !DT->dominates(*I, *std::prev(I))) &&
|
2013-11-03 12:27:52 +00:00
|
|
|
"Worklist not sorted properly!");
|
2014-05-11 10:28:58 +00:00
|
|
|
BasicBlock *BB = (*I)->getBlock();
|
2013-11-03 12:27:52 +00:00
|
|
|
// For all instructions in blocks containing gather sequences:
|
|
|
|
for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e;) {
|
|
|
|
Instruction *In = it++;
|
2013-11-26 22:24:25 +00:00
|
|
|
if (!isa<InsertElementInst>(In) && !isa<ExtractElementInst>(In))
|
2013-06-23 06:15:46 +00:00
|
|
|
continue;
|
|
|
|
|
2013-06-26 16:54:53 +00:00
|
|
|
// Check if we can replace this instruction with any of the
|
|
|
|
// visited instructions.
|
2013-11-03 12:27:52 +00:00
|
|
|
for (SmallVectorImpl<Instruction *>::iterator v = Visited.begin(),
|
|
|
|
ve = Visited.end();
|
|
|
|
v != ve; ++v) {
|
2013-07-12 06:09:24 +00:00
|
|
|
if (In->isIdenticalTo(*v) &&
|
|
|
|
DT->dominates((*v)->getParent(), In->getParent())) {
|
|
|
|
In->replaceAllUsesWith(*v);
|
2015-01-14 11:24:47 +00:00
|
|
|
eraseInstruction(In);
|
2014-04-25 05:29:35 +00:00
|
|
|
In = nullptr;
|
2013-06-23 06:15:46 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2013-11-03 12:27:52 +00:00
|
|
|
if (In) {
|
|
|
|
assert(std::find(Visited.begin(), Visited.end(), In) == Visited.end());
|
|
|
|
Visited.push_back(In);
|
|
|
|
}
|
2013-06-23 06:15:46 +00:00
|
|
|
}
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
2013-11-26 22:24:25 +00:00
|
|
|
CSEBlocks.clear();
|
|
|
|
GatherSeq.clear();
|
2013-06-22 21:34:10 +00:00
|
|
|
}
|
|
|
|
|
2014-08-01 09:20:42 +00:00
|
|
|
// Groups the instructions to a bundle (which is then a single scheduling entity)
|
|
|
|
// and schedules instructions until the bundle gets ready.
|
|
|
|
bool BoUpSLP::BlockScheduling::tryScheduleBundle(ArrayRef<Value *> VL,
|
2015-01-14 11:24:47 +00:00
|
|
|
BoUpSLP *SLP) {
|
2014-08-01 09:20:42 +00:00
|
|
|
if (isa<PHINode>(VL[0]))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Initialize the instruction bundle.
|
|
|
|
Instruction *OldScheduleEnd = ScheduleEnd;
|
|
|
|
ScheduleData *PrevInBundle = nullptr;
|
|
|
|
ScheduleData *Bundle = nullptr;
|
|
|
|
bool ReSchedule = false;
|
|
|
|
DEBUG(dbgs() << "SLP: bundle: " << *VL[0] << "\n");
|
|
|
|
for (Value *V : VL) {
|
|
|
|
extendSchedulingRegion(V);
|
|
|
|
ScheduleData *BundleMember = getScheduleData(V);
|
|
|
|
assert(BundleMember &&
|
|
|
|
"no ScheduleData for bundle member (maybe not in same basic block)");
|
|
|
|
if (BundleMember->IsScheduled) {
|
|
|
|
// A bundle member was scheduled as single instruction before and now
|
|
|
|
// needs to be scheduled as part of the bundle. We just get rid of the
|
|
|
|
// existing schedule.
|
|
|
|
DEBUG(dbgs() << "SLP: reset schedule because " << *BundleMember
|
|
|
|
<< " was already scheduled\n");
|
|
|
|
ReSchedule = true;
|
|
|
|
}
|
|
|
|
assert(BundleMember->isSchedulingEntity() &&
|
|
|
|
"bundle member already part of other bundle");
|
|
|
|
if (PrevInBundle) {
|
|
|
|
PrevInBundle->NextInBundle = BundleMember;
|
|
|
|
} else {
|
|
|
|
Bundle = BundleMember;
|
|
|
|
}
|
|
|
|
BundleMember->UnscheduledDepsInBundle = 0;
|
|
|
|
Bundle->UnscheduledDepsInBundle += BundleMember->UnscheduledDeps;
|
|
|
|
|
|
|
|
// Group the instructions to a bundle.
|
|
|
|
BundleMember->FirstInBundle = Bundle;
|
|
|
|
PrevInBundle = BundleMember;
|
|
|
|
}
|
|
|
|
if (ScheduleEnd != OldScheduleEnd) {
|
|
|
|
// The scheduling region got new instructions at the lower end (or it is a
|
|
|
|
// new region for the first bundle). This makes it necessary to
|
|
|
|
// recalculate all dependencies.
|
|
|
|
// It is seldom that this needs to be done a second time after adding the
|
|
|
|
// initial bundle to the region.
|
|
|
|
for (auto *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
|
|
|
|
ScheduleData *SD = getScheduleData(I);
|
|
|
|
SD->clearDependencies();
|
|
|
|
}
|
|
|
|
ReSchedule = true;
|
|
|
|
}
|
|
|
|
if (ReSchedule) {
|
|
|
|
resetSchedule();
|
|
|
|
initialFillReadyList(ReadyInsts);
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "SLP: try schedule bundle " << *Bundle << " in block "
|
|
|
|
<< BB->getName() << "\n");
|
|
|
|
|
2015-01-14 11:24:47 +00:00
|
|
|
calculateDependencies(Bundle, true, SLP);
|
2014-08-01 09:20:42 +00:00
|
|
|
|
|
|
|
// Now try to schedule the new bundle. As soon as the bundle is "ready" it
|
|
|
|
// means that there are no cyclic dependencies and we can schedule it.
|
|
|
|
// Note that's important that we don't "schedule" the bundle yet (see
|
|
|
|
// cancelScheduling).
|
|
|
|
while (!Bundle->isReady() && !ReadyInsts.empty()) {
|
|
|
|
|
|
|
|
ScheduleData *pickedSD = ReadyInsts.back();
|
|
|
|
ReadyInsts.pop_back();
|
|
|
|
|
|
|
|
if (pickedSD->isSchedulingEntity() && pickedSD->isReady()) {
|
|
|
|
schedule(pickedSD, ReadyInsts);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Bundle->isReady();
|
|
|
|
}
|
|
|
|
|
|
|
|
void BoUpSLP::BlockScheduling::cancelScheduling(ArrayRef<Value *> VL) {
|
|
|
|
if (isa<PHINode>(VL[0]))
|
|
|
|
return;
|
|
|
|
|
|
|
|
ScheduleData *Bundle = getScheduleData(VL[0]);
|
|
|
|
DEBUG(dbgs() << "SLP: cancel scheduling of " << *Bundle << "\n");
|
|
|
|
assert(!Bundle->IsScheduled &&
|
|
|
|
"Can't cancel bundle which is already scheduled");
|
|
|
|
assert(Bundle->isSchedulingEntity() && Bundle->isPartOfBundle() &&
|
|
|
|
"tried to unbundle something which is not a bundle");
|
|
|
|
|
|
|
|
// Un-bundle: make single instructions out of the bundle.
|
|
|
|
ScheduleData *BundleMember = Bundle;
|
|
|
|
while (BundleMember) {
|
|
|
|
assert(BundleMember->FirstInBundle == Bundle && "corrupt bundle links");
|
|
|
|
BundleMember->FirstInBundle = BundleMember;
|
|
|
|
ScheduleData *Next = BundleMember->NextInBundle;
|
|
|
|
BundleMember->NextInBundle = nullptr;
|
|
|
|
BundleMember->UnscheduledDepsInBundle = BundleMember->UnscheduledDeps;
|
|
|
|
if (BundleMember->UnscheduledDepsInBundle == 0) {
|
|
|
|
ReadyInsts.insert(BundleMember);
|
|
|
|
}
|
|
|
|
BundleMember = Next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BoUpSLP::BlockScheduling::extendSchedulingRegion(Value *V) {
|
|
|
|
if (getScheduleData(V))
|
|
|
|
return;
|
|
|
|
Instruction *I = dyn_cast<Instruction>(V);
|
|
|
|
assert(I && "bundle member must be an instruction");
|
|
|
|
assert(!isa<PHINode>(I) && "phi nodes don't need to be scheduled");
|
|
|
|
if (!ScheduleStart) {
|
|
|
|
// It's the first instruction in the new region.
|
|
|
|
initScheduleData(I, I->getNextNode(), nullptr, nullptr);
|
|
|
|
ScheduleStart = I;
|
|
|
|
ScheduleEnd = I->getNextNode();
|
|
|
|
assert(ScheduleEnd && "tried to vectorize a TerminatorInst?");
|
|
|
|
DEBUG(dbgs() << "SLP: initialize schedule region to " << *I << "\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Search up and down at the same time, because we don't know if the new
|
|
|
|
// instruction is above or below the existing scheduling region.
|
|
|
|
BasicBlock::reverse_iterator UpIter(ScheduleStart);
|
|
|
|
BasicBlock::reverse_iterator UpperEnd = BB->rend();
|
|
|
|
BasicBlock::iterator DownIter(ScheduleEnd);
|
|
|
|
BasicBlock::iterator LowerEnd = BB->end();
|
|
|
|
for (;;) {
|
|
|
|
if (UpIter != UpperEnd) {
|
|
|
|
if (&*UpIter == I) {
|
|
|
|
initScheduleData(I, ScheduleStart, nullptr, FirstLoadStoreInRegion);
|
|
|
|
ScheduleStart = I;
|
|
|
|
DEBUG(dbgs() << "SLP: extend schedule region start to " << *I << "\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
UpIter++;
|
|
|
|
}
|
|
|
|
if (DownIter != LowerEnd) {
|
|
|
|
if (&*DownIter == I) {
|
|
|
|
initScheduleData(ScheduleEnd, I->getNextNode(), LastLoadStoreInRegion,
|
|
|
|
nullptr);
|
|
|
|
ScheduleEnd = I->getNextNode();
|
|
|
|
assert(ScheduleEnd && "tried to vectorize a TerminatorInst?");
|
|
|
|
DEBUG(dbgs() << "SLP: extend schedule region end to " << *I << "\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
DownIter++;
|
|
|
|
}
|
|
|
|
assert((UpIter != UpperEnd || DownIter != LowerEnd) &&
|
|
|
|
"instruction not found in block");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BoUpSLP::BlockScheduling::initScheduleData(Instruction *FromI,
|
|
|
|
Instruction *ToI,
|
|
|
|
ScheduleData *PrevLoadStore,
|
|
|
|
ScheduleData *NextLoadStore) {
|
|
|
|
ScheduleData *CurrentLoadStore = PrevLoadStore;
|
|
|
|
for (Instruction *I = FromI; I != ToI; I = I->getNextNode()) {
|
|
|
|
ScheduleData *SD = ScheduleDataMap[I];
|
|
|
|
if (!SD) {
|
|
|
|
// Allocate a new ScheduleData for the instruction.
|
|
|
|
if (ChunkPos >= ChunkSize) {
|
|
|
|
ScheduleDataChunks.push_back(
|
|
|
|
llvm::make_unique<ScheduleData[]>(ChunkSize));
|
|
|
|
ChunkPos = 0;
|
|
|
|
}
|
|
|
|
SD = &(ScheduleDataChunks.back()[ChunkPos++]);
|
|
|
|
ScheduleDataMap[I] = SD;
|
|
|
|
SD->Inst = I;
|
|
|
|
}
|
|
|
|
assert(!isInSchedulingRegion(SD) &&
|
|
|
|
"new ScheduleData already in scheduling region");
|
|
|
|
SD->init(SchedulingRegionID);
|
|
|
|
|
|
|
|
if (I->mayReadOrWriteMemory()) {
|
|
|
|
// Update the linked list of memory accessing instructions.
|
|
|
|
if (CurrentLoadStore) {
|
|
|
|
CurrentLoadStore->NextLoadStore = SD;
|
|
|
|
} else {
|
|
|
|
FirstLoadStoreInRegion = SD;
|
|
|
|
}
|
|
|
|
CurrentLoadStore = SD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (NextLoadStore) {
|
|
|
|
if (CurrentLoadStore)
|
|
|
|
CurrentLoadStore->NextLoadStore = NextLoadStore;
|
|
|
|
} else {
|
|
|
|
LastLoadStoreInRegion = CurrentLoadStore;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BoUpSLP::BlockScheduling::calculateDependencies(ScheduleData *SD,
|
|
|
|
bool InsertInReadyList,
|
2015-01-14 11:24:47 +00:00
|
|
|
BoUpSLP *SLP) {
|
2014-08-01 09:20:42 +00:00
|
|
|
assert(SD->isSchedulingEntity());
|
|
|
|
|
|
|
|
SmallVector<ScheduleData *, 10> WorkList;
|
|
|
|
WorkList.push_back(SD);
|
|
|
|
|
|
|
|
while (!WorkList.empty()) {
|
|
|
|
ScheduleData *SD = WorkList.back();
|
|
|
|
WorkList.pop_back();
|
|
|
|
|
|
|
|
ScheduleData *BundleMember = SD;
|
|
|
|
while (BundleMember) {
|
|
|
|
assert(isInSchedulingRegion(BundleMember));
|
|
|
|
if (!BundleMember->hasValidDependencies()) {
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "SLP: update deps of " << *BundleMember << "\n");
|
|
|
|
BundleMember->Dependencies = 0;
|
|
|
|
BundleMember->resetUnscheduledDeps();
|
|
|
|
|
|
|
|
// Handle def-use chain dependencies.
|
|
|
|
for (User *U : BundleMember->Inst->users()) {
|
|
|
|
if (isa<Instruction>(U)) {
|
|
|
|
ScheduleData *UseSD = getScheduleData(U);
|
|
|
|
if (UseSD && isInSchedulingRegion(UseSD->FirstInBundle)) {
|
|
|
|
BundleMember->Dependencies++;
|
|
|
|
ScheduleData *DestBundle = UseSD->FirstInBundle;
|
|
|
|
if (!DestBundle->IsScheduled) {
|
|
|
|
BundleMember->incrementUnscheduledDeps(1);
|
|
|
|
}
|
|
|
|
if (!DestBundle->hasValidDependencies()) {
|
|
|
|
WorkList.push_back(DestBundle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// I'm not sure if this can ever happen. But we need to be safe.
|
|
|
|
// This lets the instruction/bundle never be scheduled and eventally
|
|
|
|
// disable vectorization.
|
|
|
|
BundleMember->Dependencies++;
|
|
|
|
BundleMember->incrementUnscheduledDeps(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle the memory dependencies.
|
|
|
|
ScheduleData *DepDest = BundleMember->NextLoadStore;
|
|
|
|
if (DepDest) {
|
2015-01-14 11:24:47 +00:00
|
|
|
Instruction *SrcInst = BundleMember->Inst;
|
|
|
|
AliasAnalysis::Location SrcLoc = getLocation(SrcInst, SLP->AA);
|
2014-08-01 09:20:42 +00:00
|
|
|
bool SrcMayWrite = BundleMember->Inst->mayWriteToMemory();
|
2015-01-19 09:33:38 +00:00
|
|
|
unsigned numAliased = 0;
|
2015-01-22 08:20:51 +00:00
|
|
|
unsigned DistToSrc = 1;
|
2014-08-01 09:20:42 +00:00
|
|
|
|
|
|
|
while (DepDest) {
|
|
|
|
assert(isInSchedulingRegion(DepDest));
|
2015-01-22 08:20:51 +00:00
|
|
|
|
|
|
|
// We have two limits to reduce the complexity:
|
|
|
|
// 1) AliasedCheckLimit: It's a small limit to reduce calls to
|
|
|
|
// SLP->isAliased (which is the expensive part in this loop).
|
|
|
|
// 2) MaxMemDepDistance: It's for very large blocks and it aborts
|
|
|
|
// the whole loop (even if the loop is fast, it's quadratic).
|
|
|
|
// It's important for the loop break condition (see below) to
|
|
|
|
// check this limit even between two read-only instructions.
|
|
|
|
if (DistToSrc >= MaxMemDepDistance ||
|
|
|
|
((SrcMayWrite || DepDest->Inst->mayWriteToMemory()) &&
|
|
|
|
(numAliased >= AliasedCheckLimit ||
|
|
|
|
SLP->isAliased(SrcLoc, SrcInst, DepDest->Inst)))) {
|
|
|
|
|
|
|
|
// We increment the counter only if the locations are aliased
|
|
|
|
// (instead of counting all alias checks). This gives a better
|
|
|
|
// balance between reduced runtime and accurate dependencies.
|
|
|
|
numAliased++;
|
|
|
|
|
|
|
|
DepDest->MemoryDependencies.push_back(BundleMember);
|
|
|
|
BundleMember->Dependencies++;
|
|
|
|
ScheduleData *DestBundle = DepDest->FirstInBundle;
|
|
|
|
if (!DestBundle->IsScheduled) {
|
|
|
|
BundleMember->incrementUnscheduledDeps(1);
|
|
|
|
}
|
|
|
|
if (!DestBundle->hasValidDependencies()) {
|
|
|
|
WorkList.push_back(DestBundle);
|
2014-08-01 09:20:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
DepDest = DepDest->NextLoadStore;
|
2015-01-22 08:20:51 +00:00
|
|
|
|
|
|
|
// Example, explaining the loop break condition: Let's assume our
|
|
|
|
// starting instruction is i0 and MaxMemDepDistance = 3.
|
|
|
|
//
|
|
|
|
// +--------v--v--v
|
|
|
|
// i0,i1,i2,i3,i4,i5,i6,i7,i8
|
|
|
|
// +--------^--^--^
|
|
|
|
//
|
|
|
|
// MaxMemDepDistance let us stop alias-checking at i3 and we add
|
|
|
|
// dependencies from i0 to i3,i4,.. (even if they are not aliased).
|
|
|
|
// Previously we already added dependencies from i3 to i6,i7,i8
|
|
|
|
// (because of MaxMemDepDistance). As we added a dependency from
|
|
|
|
// i0 to i3, we have transitive dependencies from i0 to i6,i7,i8
|
|
|
|
// and we can abort this loop at i6.
|
|
|
|
if (DistToSrc >= 2 * MaxMemDepDistance)
|
|
|
|
break;
|
|
|
|
DistToSrc++;
|
2014-08-01 09:20:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BundleMember = BundleMember->NextInBundle;
|
|
|
|
}
|
|
|
|
if (InsertInReadyList && SD->isReady()) {
|
|
|
|
ReadyInsts.push_back(SD);
|
|
|
|
DEBUG(dbgs() << "SLP: gets ready on update: " << *SD->Inst << "\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BoUpSLP::BlockScheduling::resetSchedule() {
|
|
|
|
assert(ScheduleStart &&
|
|
|
|
"tried to reset schedule on block which has not been scheduled");
|
|
|
|
for (Instruction *I = ScheduleStart; I != ScheduleEnd; I = I->getNextNode()) {
|
|
|
|
ScheduleData *SD = getScheduleData(I);
|
|
|
|
assert(isInSchedulingRegion(SD));
|
|
|
|
SD->IsScheduled = false;
|
|
|
|
SD->resetUnscheduledDeps();
|
|
|
|
}
|
|
|
|
ReadyInsts.clear();
|
|
|
|
}
|
|
|
|
|
2014-08-02 19:39:42 +00:00
|
|
|
void BoUpSLP::scheduleBlock(BlockScheduling *BS) {
|
|
|
|
|
|
|
|
if (!BS->ScheduleStart)
|
2014-08-01 09:20:42 +00:00
|
|
|
return;
|
2014-08-02 19:39:42 +00:00
|
|
|
|
|
|
|
DEBUG(dbgs() << "SLP: schedule block " << BS->BB->getName() << "\n");
|
2014-08-01 09:20:42 +00:00
|
|
|
|
|
|
|
BS->resetSchedule();
|
|
|
|
|
|
|
|
// For the real scheduling we use a more sophisticated ready-list: it is
|
|
|
|
// sorted by the original instruction location. This lets the final schedule
|
|
|
|
// be as close as possible to the original instruction order.
|
|
|
|
struct ScheduleDataCompare {
|
|
|
|
bool operator()(ScheduleData *SD1, ScheduleData *SD2) {
|
|
|
|
return SD2->SchedulingPriority < SD1->SchedulingPriority;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
std::set<ScheduleData *, ScheduleDataCompare> ReadyInsts;
|
|
|
|
|
|
|
|
// Ensure that all depencency data is updated and fill the ready-list with
|
|
|
|
// initial instructions.
|
|
|
|
int Idx = 0;
|
|
|
|
int NumToSchedule = 0;
|
|
|
|
for (auto *I = BS->ScheduleStart; I != BS->ScheduleEnd;
|
|
|
|
I = I->getNextNode()) {
|
|
|
|
ScheduleData *SD = BS->getScheduleData(I);
|
|
|
|
assert(
|
|
|
|
SD->isPartOfBundle() == (ScalarToTreeEntry.count(SD->Inst) != 0) &&
|
|
|
|
"scheduler and vectorizer have different opinion on what is a bundle");
|
|
|
|
SD->FirstInBundle->SchedulingPriority = Idx++;
|
|
|
|
if (SD->isSchedulingEntity()) {
|
2015-01-14 11:24:47 +00:00
|
|
|
BS->calculateDependencies(SD, false, this);
|
2014-08-01 09:20:42 +00:00
|
|
|
NumToSchedule++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BS->initialFillReadyList(ReadyInsts);
|
|
|
|
|
|
|
|
Instruction *LastScheduledInst = BS->ScheduleEnd;
|
|
|
|
|
|
|
|
// Do the "real" scheduling.
|
|
|
|
while (!ReadyInsts.empty()) {
|
|
|
|
ScheduleData *picked = *ReadyInsts.begin();
|
|
|
|
ReadyInsts.erase(ReadyInsts.begin());
|
|
|
|
|
|
|
|
// Move the scheduled instruction(s) to their dedicated places, if not
|
|
|
|
// there yet.
|
|
|
|
ScheduleData *BundleMember = picked;
|
|
|
|
while (BundleMember) {
|
|
|
|
Instruction *pickedInst = BundleMember->Inst;
|
|
|
|
if (LastScheduledInst->getNextNode() != pickedInst) {
|
2014-08-02 19:39:42 +00:00
|
|
|
BS->BB->getInstList().remove(pickedInst);
|
|
|
|
BS->BB->getInstList().insert(LastScheduledInst, pickedInst);
|
2014-08-01 09:20:42 +00:00
|
|
|
}
|
|
|
|
LastScheduledInst = pickedInst;
|
|
|
|
BundleMember = BundleMember->NextInBundle;
|
|
|
|
}
|
|
|
|
|
|
|
|
BS->schedule(picked, ReadyInsts);
|
|
|
|
NumToSchedule--;
|
|
|
|
}
|
|
|
|
assert(NumToSchedule == 0 && "could not schedule all instructions");
|
|
|
|
|
|
|
|
// Avoid duplicate scheduling of the block.
|
|
|
|
BS->ScheduleStart = nullptr;
|
|
|
|
}
|
|
|
|
|
2013-04-09 19:44:35 +00:00
|
|
|
/// The SLPVectorizer Pass.
|
2013-04-15 22:00:26 +00:00
|
|
|
struct SLPVectorizer : public FunctionPass {
|
2013-06-22 21:34:10 +00:00
|
|
|
typedef SmallVector<StoreInst *, 8> StoreList;
|
|
|
|
typedef MapVector<Value *, StoreList> StoreListMap;
|
2013-04-09 19:44:35 +00:00
|
|
|
|
|
|
|
/// Pass identification, replacement for typeid
|
|
|
|
static char ID;
|
|
|
|
|
2013-04-15 22:00:26 +00:00
|
|
|
explicit SLPVectorizer() : FunctionPass(ID) {
|
2013-04-09 19:44:35 +00:00
|
|
|
initializeSLPVectorizerPass(*PassRegistry::getPassRegistry());
|
|
|
|
}
|
|
|
|
|
|
|
|
ScalarEvolution *SE;
|
|
|
|
TargetTransformInfo *TTI;
|
2014-05-03 09:59:54 +00:00
|
|
|
TargetLibraryInfo *TLI;
|
2013-04-09 19:44:35 +00:00
|
|
|
AliasAnalysis *AA;
|
2013-04-15 22:00:26 +00:00
|
|
|
LoopInfo *LI;
|
2013-06-23 21:57:27 +00:00
|
|
|
DominatorTree *DT;
|
2015-01-04 12:03:27 +00:00
|
|
|
AssumptionCache *AC;
|
2013-04-15 22:00:26 +00:00
|
|
|
|
2014-03-05 09:10:37 +00:00
|
|
|
bool runOnFunction(Function &F) override {
|
2014-02-06 00:07:05 +00:00
|
|
|
if (skipOptnoneFunction(F))
|
|
|
|
return false;
|
|
|
|
|
2013-04-15 22:00:26 +00:00
|
|
|
SE = &getAnalysis<ScalarEvolution>();
|
2015-02-01 12:01:35 +00:00
|
|
|
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
|
2015-01-15 10:41:28 +00:00
|
|
|
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
|
|
|
|
TLI = TLIP ? &TLIP->getTLI() : nullptr;
|
2013-04-15 22:00:26 +00:00
|
|
|
AA = &getAnalysis<AliasAnalysis>();
|
2015-01-17 14:16:18 +00:00
|
|
|
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
|
2014-01-13 13:07:17 +00:00
|
|
|
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
2015-01-04 12:03:27 +00:00
|
|
|
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
|
2013-04-15 22:00:26 +00:00
|
|
|
|
|
|
|
StoreRefs.clear();
|
|
|
|
bool Changed = false;
|
|
|
|
|
2013-09-18 12:43:35 +00:00
|
|
|
// If the target claims to have no vector registers don't attempt
|
|
|
|
// vectorization.
|
|
|
|
if (!TTI->getNumberOfRegisters(true))
|
|
|
|
return false;
|
|
|
|
|
2013-07-29 05:13:00 +00:00
|
|
|
// Don't vectorize when the attribute NoImplicitFloat is used.
|
2013-08-21 18:54:50 +00:00
|
|
|
if (F.hasFnAttribute(Attribute::NoImplicitFloat))
|
2013-07-29 05:13:00 +00:00
|
|
|
return false;
|
|
|
|
|
2013-06-20 17:54:36 +00:00
|
|
|
DEBUG(dbgs() << "SLP: Analyzing blocks in " << F.getName() << ".\n");
|
2013-05-10 22:56:18 +00:00
|
|
|
|
2014-02-16 10:43:25 +00:00
|
|
|
// Use the bottom up slp vectorizer to construct chains that start with
|
2014-05-20 17:11:11 +00:00
|
|
|
// store instructions.
|
2015-03-10 02:37:25 +00:00
|
|
|
BoUpSLP R(&F, SE, TTI, TLI, AA, LI, DT, AC);
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2015-01-14 11:24:47 +00:00
|
|
|
// A general note: the vectorizer must use BoUpSLP::eraseInstruction() to
|
|
|
|
// delete instructions.
|
|
|
|
|
2013-06-26 23:44:45 +00:00
|
|
|
// Scan the blocks in the function in post order.
|
|
|
|
for (po_iterator<BasicBlock*> it = po_begin(&F.getEntryBlock()),
|
|
|
|
e = po_end(&F.getEntryBlock()); it != e; ++it) {
|
|
|
|
BasicBlock *BB = *it;
|
2013-04-15 22:00:26 +00:00
|
|
|
// Vectorize trees that end at stores.
|
2013-04-20 05:23:11 +00:00
|
|
|
if (unsigned count = collectStores(BB, R)) {
|
2013-04-20 06:40:28 +00:00
|
|
|
(void)count;
|
2013-06-20 17:54:36 +00:00
|
|
|
DEBUG(dbgs() << "SLP: Found " << count << " stores to vectorize.\n");
|
2013-06-22 21:34:10 +00:00
|
|
|
Changed |= vectorizeStoreChains(R);
|
2013-04-15 22:00:26 +00:00
|
|
|
}
|
2013-07-14 06:15:46 +00:00
|
|
|
|
|
|
|
// Vectorize trees that end at reductions.
|
|
|
|
Changed |= vectorizeChainsInBlock(BB, R);
|
2013-04-15 22:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (Changed) {
|
2013-06-23 06:15:46 +00:00
|
|
|
R.optimizeGatherSequence();
|
2013-06-20 17:54:36 +00:00
|
|
|
DEBUG(dbgs() << "SLP: vectorized \"" << F.getName() << "\"\n");
|
2013-04-15 22:00:26 +00:00
|
|
|
DEBUG(verifyFunction(F));
|
|
|
|
}
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
2014-03-05 09:10:37 +00:00
|
|
|
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
2013-04-15 22:00:26 +00:00
|
|
|
FunctionPass::getAnalysisUsage(AU);
|
2015-01-04 12:03:27 +00:00
|
|
|
AU.addRequired<AssumptionCacheTracker>();
|
2013-04-15 22:00:26 +00:00
|
|
|
AU.addRequired<ScalarEvolution>();
|
|
|
|
AU.addRequired<AliasAnalysis>();
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@227669 91177308-0d34-0410-b5e6-96231b3b80d8
2015-01-31 03:43:40 +00:00
|
|
|
AU.addRequired<TargetTransformInfoWrapperPass>();
|
2015-01-17 14:16:18 +00:00
|
|
|
AU.addRequired<LoopInfoWrapperPass>();
|
2014-01-13 13:07:17 +00:00
|
|
|
AU.addRequired<DominatorTreeWrapperPass>();
|
2015-01-17 14:16:18 +00:00
|
|
|
AU.addPreserved<LoopInfoWrapperPass>();
|
2014-01-13 13:07:17 +00:00
|
|
|
AU.addPreserved<DominatorTreeWrapperPass>();
|
2013-06-29 05:38:15 +00:00
|
|
|
AU.setPreservesCFG();
|
2013-04-15 22:00:26 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2013-04-09 19:44:35 +00:00
|
|
|
|
|
|
|
/// \brief Collect memory references and sort them according to their base
|
|
|
|
/// object. We sort the stores to their base objects to reduce the cost of the
|
|
|
|
/// quadratic search on the stores. TODO: We can further reduce this cost
|
|
|
|
/// if we flush the chain creation every time we run into a memory barrier.
|
2013-07-07 06:57:07 +00:00
|
|
|
unsigned collectStores(BasicBlock *BB, BoUpSLP &R);
|
2013-04-09 19:44:35 +00:00
|
|
|
|
2013-04-15 22:00:26 +00:00
|
|
|
/// \brief Try to vectorize a chain that starts at two arithmetic instrs.
|
2014-08-01 08:05:55 +00:00
|
|
|
bool tryToVectorizePair(Value *A, Value *B, BoUpSLP &R);
|
2013-04-09 19:44:35 +00:00
|
|
|
|
2013-07-12 00:04:18 +00:00
|
|
|
/// \brief Try to vectorize a list of operands.
|
2014-05-04 17:10:15 +00:00
|
|
|
/// \@param BuildVector A list of users to ignore for the purpose of
|
|
|
|
/// scheduling and that don't need extracting.
|
2013-06-20 17:41:45 +00:00
|
|
|
/// \returns true if a value was vectorized.
|
2014-05-04 17:10:15 +00:00
|
|
|
bool tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
|
2014-08-01 08:05:55 +00:00
|
|
|
ArrayRef<Value *> BuildVector = None,
|
|
|
|
bool allowReorder = false);
|
2013-04-20 07:22:58 +00:00
|
|
|
|
2013-04-15 22:00:26 +00:00
|
|
|
/// \brief Try to vectorize a chain that may start at the operands of \V;
|
2013-07-07 06:57:07 +00:00
|
|
|
bool tryToVectorize(BinaryOperator *V, BoUpSLP &R);
|
2013-04-09 19:44:35 +00:00
|
|
|
|
2013-04-15 22:00:26 +00:00
|
|
|
/// \brief Vectorize the stores that were collected in StoreRefs.
|
2013-07-07 06:57:07 +00:00
|
|
|
bool vectorizeStoreChains(BoUpSLP &R);
|
2013-06-20 17:41:45 +00:00
|
|
|
|
2013-06-18 15:58:05 +00:00
|
|
|
/// \brief Scan the basic block and look for patterns that are likely to start
|
|
|
|
/// a vectorization chain.
|
2013-07-07 06:57:07 +00:00
|
|
|
bool vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R);
|
|
|
|
|
|
|
|
bool vectorizeStoreChain(ArrayRef<Value *> Chain, int CostThreshold,
|
|
|
|
BoUpSLP &R);
|
2013-04-15 22:00:26 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
bool vectorizeStores(ArrayRef<StoreInst *> Stores, int costThreshold,
|
|
|
|
BoUpSLP &R);
|
2013-04-15 22:00:26 +00:00
|
|
|
private:
|
|
|
|
StoreListMap StoreRefs;
|
|
|
|
};
|
|
|
|
|
2014-01-24 17:20:08 +00:00
|
|
|
/// \brief Check that the Values in the slice in VL array are still existent in
|
2013-11-19 22:20:20 +00:00
|
|
|
/// the WeakVH array.
|
|
|
|
/// Vectorization of part of the VL array may cause later values in the VL array
|
|
|
|
/// to become invalid. We track when this has happened in the WeakVH array.
|
2015-03-02 15:24:36 +00:00
|
|
|
static bool hasValueBeenRAUWed(ArrayRef<Value *> VL, ArrayRef<WeakVH> VH,
|
|
|
|
unsigned SliceBegin, unsigned SliceSize) {
|
|
|
|
VL = VL.slice(SliceBegin, SliceSize);
|
|
|
|
VH = VH.slice(SliceBegin, SliceSize);
|
|
|
|
return !std::equal(VL.begin(), VL.end(), VH.begin());
|
2013-11-19 22:20:20 +00:00
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
bool SLPVectorizer::vectorizeStoreChain(ArrayRef<Value *> Chain,
|
|
|
|
int CostThreshold, BoUpSLP &R) {
|
|
|
|
unsigned ChainLen = Chain.size();
|
|
|
|
DEBUG(dbgs() << "SLP: Analyzing a store chain of length " << ChainLen
|
|
|
|
<< "\n");
|
|
|
|
Type *StoreTy = cast<StoreInst>(Chain[0])->getValueOperand()->getType();
|
2015-03-10 02:37:25 +00:00
|
|
|
auto &DL = cast<StoreInst>(Chain[0])->getModule()->getDataLayout();
|
|
|
|
unsigned Sz = DL.getTypeSizeInBits(StoreTy);
|
2013-07-07 06:57:07 +00:00
|
|
|
unsigned VF = MinVecRegSize / Sz;
|
|
|
|
|
|
|
|
if (!isPowerOf2_32(Sz) || VF < 2)
|
|
|
|
return false;
|
|
|
|
|
2014-03-28 17:21:27 +00:00
|
|
|
// Keep track of values that were deleted by vectorizing in the loop below.
|
2013-11-19 22:20:20 +00:00
|
|
|
SmallVector<WeakVH, 8> TrackValues(Chain.begin(), Chain.end());
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
bool Changed = false;
|
|
|
|
// Look for profitable vectorizable trees at all offsets, starting at zero.
|
|
|
|
for (unsigned i = 0, e = ChainLen; i < e; ++i) {
|
|
|
|
if (i + VF > e)
|
|
|
|
break;
|
2013-11-19 22:20:20 +00:00
|
|
|
|
|
|
|
// Check that a previous iteration of this loop did not delete the Value.
|
|
|
|
if (hasValueBeenRAUWed(Chain, TrackValues, i, VF))
|
|
|
|
continue;
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
DEBUG(dbgs() << "SLP: Analyzing " << VF << " stores at offset " << i
|
|
|
|
<< "\n");
|
|
|
|
ArrayRef<Value *> Operands = Chain.slice(i, VF);
|
|
|
|
|
|
|
|
R.buildTree(Operands);
|
|
|
|
|
|
|
|
int Cost = R.getTreeCost();
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "SLP: Found cost=" << Cost << " for VF=" << VF << "\n");
|
|
|
|
if (Cost < CostThreshold) {
|
|
|
|
DEBUG(dbgs() << "SLP: Decided to vectorize cost=" << Cost << "\n");
|
|
|
|
R.vectorizeTree();
|
|
|
|
|
|
|
|
// Move to the next bundle.
|
|
|
|
i += VF - 1;
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-19 22:20:18 +00:00
|
|
|
return Changed;
|
2013-07-07 06:57:07 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool SLPVectorizer::vectorizeStores(ArrayRef<StoreInst *> Stores,
|
2013-07-16 15:25:17 +00:00
|
|
|
int costThreshold, BoUpSLP &R) {
|
2015-03-10 02:37:25 +00:00
|
|
|
SetVector<StoreInst *> Heads, Tails;
|
|
|
|
SmallDenseMap<StoreInst *, StoreInst *> ConsecutiveChain;
|
2013-07-07 06:57:07 +00:00
|
|
|
|
|
|
|
// We may run into multiple chains that merge into a single chain. We mark the
|
|
|
|
// stores that we vectorized so that we don't visit the same store twice.
|
|
|
|
BoUpSLP::ValueSet VectorizedStores;
|
|
|
|
bool Changed = false;
|
|
|
|
|
|
|
|
// Do a quadratic search on all of the given stores and find
|
2013-07-14 06:15:46 +00:00
|
|
|
// all of the pairs of stores that follow each other.
|
2013-07-16 15:25:17 +00:00
|
|
|
for (unsigned i = 0, e = Stores.size(); i < e; ++i) {
|
2013-07-07 06:57:07 +00:00
|
|
|
for (unsigned j = 0; j < e; ++j) {
|
2013-07-21 06:12:57 +00:00
|
|
|
if (i == j)
|
2013-07-07 06:57:07 +00:00
|
|
|
continue;
|
2015-03-10 02:37:25 +00:00
|
|
|
const DataLayout &DL = Stores[i]->getModule()->getDataLayout();
|
|
|
|
if (R.isConsecutiveAccess(Stores[i], Stores[j], DL)) {
|
2013-07-07 06:57:07 +00:00
|
|
|
Tails.insert(Stores[j]);
|
|
|
|
Heads.insert(Stores[i]);
|
|
|
|
ConsecutiveChain[Stores[i]] = Stores[j];
|
|
|
|
}
|
|
|
|
}
|
2013-07-16 15:25:17 +00:00
|
|
|
}
|
2013-07-07 06:57:07 +00:00
|
|
|
|
|
|
|
// For stores that start but don't end a link in the chain:
|
2015-03-10 02:37:25 +00:00
|
|
|
for (SetVector<StoreInst *>::iterator it = Heads.begin(), e = Heads.end();
|
2013-07-07 06:57:07 +00:00
|
|
|
it != e; ++it) {
|
|
|
|
if (Tails.count(*it))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
// We found a store instr that starts a chain. Now follow the chain and try
|
|
|
|
// to vectorize it.
|
|
|
|
BoUpSLP::ValueList Operands;
|
2015-03-10 02:37:25 +00:00
|
|
|
StoreInst *I = *it;
|
2013-07-07 06:57:07 +00:00
|
|
|
// Collect the chain into a list.
|
|
|
|
while (Tails.count(I) || Heads.count(I)) {
|
|
|
|
if (VectorizedStores.count(I))
|
|
|
|
break;
|
|
|
|
Operands.push_back(I);
|
|
|
|
// Move to the next value in the chain.
|
|
|
|
I = ConsecutiveChain[I];
|
|
|
|
}
|
|
|
|
|
|
|
|
bool Vectorized = vectorizeStoreChain(Operands, costThreshold, R);
|
|
|
|
|
|
|
|
// Mark the vectorized stores so that we don't vectorize them again.
|
|
|
|
if (Vectorized)
|
|
|
|
VectorizedStores.insert(Operands.begin(), Operands.end());
|
|
|
|
Changed |= Vectorized;
|
|
|
|
}
|
|
|
|
|
|
|
|
return Changed;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) {
|
2013-04-20 05:23:11 +00:00
|
|
|
unsigned count = 0;
|
2013-04-15 22:00:26 +00:00
|
|
|
StoreRefs.clear();
|
2015-03-10 02:37:25 +00:00
|
|
|
const DataLayout &DL = BB->getModule()->getDataLayout();
|
2013-04-15 22:00:26 +00:00
|
|
|
for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; ++it) {
|
|
|
|
StoreInst *SI = dyn_cast<StoreInst>(it);
|
|
|
|
if (!SI)
|
|
|
|
continue;
|
|
|
|
|
2013-10-16 17:52:40 +00:00
|
|
|
// Don't touch volatile stores.
|
|
|
|
if (!SI->isSimple())
|
|
|
|
continue;
|
|
|
|
|
2013-04-15 22:00:26 +00:00
|
|
|
// Check that the pointer points to scalars.
|
2013-04-20 22:29:43 +00:00
|
|
|
Type *Ty = SI->getValueOperand()->getType();
|
2015-02-12 02:30:56 +00:00
|
|
|
if (!isValidElementType(Ty))
|
2014-04-29 19:37:20 +00:00
|
|
|
continue;
|
2013-04-15 22:00:26 +00:00
|
|
|
|
2013-10-02 19:06:06 +00:00
|
|
|
// Find the base pointer.
|
|
|
|
Value *Ptr = GetUnderlyingObject(SI->getPointerOperand(), DL);
|
2013-04-15 22:00:26 +00:00
|
|
|
|
|
|
|
// Save the store locations.
|
|
|
|
StoreRefs[Ptr].push_back(SI);
|
2013-04-20 05:23:11 +00:00
|
|
|
count++;
|
2013-04-09 19:44:35 +00:00
|
|
|
}
|
2013-04-20 05:23:11 +00:00
|
|
|
return count;
|
2013-04-15 22:00:26 +00:00
|
|
|
}
|
|
|
|
|
2014-08-01 08:05:55 +00:00
|
|
|
bool SLPVectorizer::tryToVectorizePair(Value *A, Value *B, BoUpSLP &R) {
|
2013-06-20 17:54:36 +00:00
|
|
|
if (!A || !B)
|
|
|
|
return false;
|
2013-04-20 09:49:10 +00:00
|
|
|
Value *VL[] = { A, B };
|
2014-08-01 08:05:55 +00:00
|
|
|
return tryToVectorizeList(VL, R, None, true);
|
2013-04-20 07:22:58 +00:00
|
|
|
}
|
|
|
|
|
2014-05-04 17:10:15 +00:00
|
|
|
bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
|
2014-08-01 08:05:55 +00:00
|
|
|
ArrayRef<Value *> BuildVector,
|
|
|
|
bool allowReorder) {
|
2013-06-18 15:58:05 +00:00
|
|
|
if (VL.size() < 2)
|
|
|
|
return false;
|
|
|
|
|
2013-06-20 17:54:36 +00:00
|
|
|
DEBUG(dbgs() << "SLP: Vectorizing a list of length = " << VL.size() << ".\n");
|
2013-04-20 22:29:43 +00:00
|
|
|
|
2013-06-18 15:58:05 +00:00
|
|
|
// Check that all of the parts are scalar instructions of the same type.
|
|
|
|
Instruction *I0 = dyn_cast<Instruction>(VL[0]);
|
2013-06-20 17:54:36 +00:00
|
|
|
if (!I0)
|
2013-09-03 17:26:04 +00:00
|
|
|
return false;
|
2013-06-18 15:58:05 +00:00
|
|
|
|
|
|
|
unsigned Opcode0 = I0->getOpcode();
|
2015-03-10 02:37:25 +00:00
|
|
|
const DataLayout &DL = I0->getModule()->getDataLayout();
|
2013-11-19 22:20:18 +00:00
|
|
|
|
2013-09-03 17:26:04 +00:00
|
|
|
Type *Ty0 = I0->getType();
|
2015-03-10 02:37:25 +00:00
|
|
|
unsigned Sz = DL.getTypeSizeInBits(Ty0);
|
2013-09-03 17:26:04 +00:00
|
|
|
unsigned VF = MinVecRegSize / Sz;
|
2013-06-18 15:58:05 +00:00
|
|
|
|
2013-04-20 22:29:43 +00:00
|
|
|
for (int i = 0, e = VL.size(); i < e; ++i) {
|
|
|
|
Type *Ty = VL[i]->getType();
|
2015-02-12 02:30:56 +00:00
|
|
|
if (!isValidElementType(Ty))
|
2013-09-03 17:26:04 +00:00
|
|
|
return false;
|
2013-06-18 15:58:05 +00:00
|
|
|
Instruction *Inst = dyn_cast<Instruction>(VL[i]);
|
|
|
|
if (!Inst || Inst->getOpcode() != Opcode0)
|
2013-09-03 17:26:04 +00:00
|
|
|
return false;
|
2013-04-20 22:29:43 +00:00
|
|
|
}
|
|
|
|
|
2013-09-03 17:26:04 +00:00
|
|
|
bool Changed = false;
|
2013-11-19 22:20:18 +00:00
|
|
|
|
2014-04-05 20:30:31 +00:00
|
|
|
// Keep track of values that were deleted by vectorizing in the loop below.
|
2013-11-19 22:20:20 +00:00
|
|
|
SmallVector<WeakVH, 8> TrackValues(VL.begin(), VL.end());
|
|
|
|
|
2013-09-03 17:26:04 +00:00
|
|
|
for (unsigned i = 0, e = VL.size(); i < e; ++i) {
|
|
|
|
unsigned OpsWidth = 0;
|
2013-11-19 22:20:18 +00:00
|
|
|
|
|
|
|
if (i + VF > e)
|
2013-09-03 17:26:04 +00:00
|
|
|
OpsWidth = e - i;
|
|
|
|
else
|
|
|
|
OpsWidth = VF;
|
|
|
|
|
|
|
|
if (!isPowerOf2_32(OpsWidth) || OpsWidth < 2)
|
|
|
|
break;
|
2013-07-11 20:56:13 +00:00
|
|
|
|
2013-11-19 22:20:20 +00:00
|
|
|
// Check that a previous iteration of this loop did not delete the Value.
|
|
|
|
if (hasValueBeenRAUWed(VL, TrackValues, i, OpsWidth))
|
|
|
|
continue;
|
|
|
|
|
2013-11-19 22:20:18 +00:00
|
|
|
DEBUG(dbgs() << "SLP: Analyzing " << OpsWidth << " operations "
|
|
|
|
<< "\n");
|
2013-09-03 17:26:04 +00:00
|
|
|
ArrayRef<Value *> Ops = VL.slice(i, OpsWidth);
|
2013-11-19 22:20:18 +00:00
|
|
|
|
2014-05-04 17:10:15 +00:00
|
|
|
ArrayRef<Value *> BuildVectorSlice;
|
|
|
|
if (!BuildVector.empty())
|
|
|
|
BuildVectorSlice = BuildVector.slice(i, OpsWidth);
|
|
|
|
|
|
|
|
R.buildTree(Ops, BuildVectorSlice);
|
2014-08-01 08:05:55 +00:00
|
|
|
// TODO: check if we can allow reordering also for other cases than
|
|
|
|
// tryToVectorizePair()
|
|
|
|
if (allowReorder && R.shouldReorder()) {
|
|
|
|
assert(Ops.size() == 2);
|
|
|
|
assert(BuildVectorSlice.empty());
|
|
|
|
Value *ReorderedOps[] = { Ops[1], Ops[0] };
|
|
|
|
R.buildTree(ReorderedOps, None);
|
|
|
|
}
|
2013-09-03 17:26:04 +00:00
|
|
|
int Cost = R.getTreeCost();
|
2013-11-19 22:20:18 +00:00
|
|
|
|
2013-09-03 17:26:04 +00:00
|
|
|
if (Cost < -SLPCostThreshold) {
|
2014-03-28 17:21:27 +00:00
|
|
|
DEBUG(dbgs() << "SLP: Vectorizing list at cost:" << Cost << ".\n");
|
2014-05-04 17:10:15 +00:00
|
|
|
Value *VectorizedRoot = R.vectorizeTree();
|
|
|
|
|
|
|
|
// Reconstruct the build vector by extracting the vectorized root. This
|
|
|
|
// way we handle the case where some elements of the vector are undefined.
|
|
|
|
// (return (inserelt <4 xi32> (insertelt undef (opd0) 0) (opd1) 2))
|
|
|
|
if (!BuildVectorSlice.empty()) {
|
|
|
|
// The insert point is the last build vector instruction. The vectorized
|
|
|
|
// root will precede it. This guarantees that we get an instruction. The
|
|
|
|
// vectorized tree could have been constant folded.
|
|
|
|
Instruction *InsertAfter = cast<Instruction>(BuildVectorSlice.back());
|
|
|
|
unsigned VecIdx = 0;
|
|
|
|
for (auto &V : BuildVectorSlice) {
|
|
|
|
IRBuilder<true, NoFolder> Builder(
|
|
|
|
++BasicBlock::iterator(InsertAfter));
|
|
|
|
InsertElementInst *IE = cast<InsertElementInst>(V);
|
|
|
|
Instruction *Extract = cast<Instruction>(Builder.CreateExtractElement(
|
|
|
|
VectorizedRoot, Builder.getInt32(VecIdx++)));
|
|
|
|
IE->setOperand(1, Extract);
|
|
|
|
IE->removeFromParent();
|
|
|
|
IE->insertAfter(Extract);
|
|
|
|
InsertAfter = IE;
|
|
|
|
}
|
|
|
|
}
|
2013-09-03 17:26:04 +00:00
|
|
|
// Move to the next bundle.
|
|
|
|
i += VF - 1;
|
|
|
|
Changed = true;
|
|
|
|
}
|
|
|
|
}
|
2013-11-19 22:20:18 +00:00
|
|
|
|
|
|
|
return Changed;
|
2013-04-15 22:00:26 +00:00
|
|
|
}
|
2013-04-09 19:44:35 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
bool SLPVectorizer::tryToVectorize(BinaryOperator *V, BoUpSLP &R) {
|
2013-06-20 17:54:36 +00:00
|
|
|
if (!V)
|
|
|
|
return false;
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-04-15 22:00:26 +00:00
|
|
|
// Try to vectorize V.
|
2014-08-01 08:05:55 +00:00
|
|
|
if (tryToVectorizePair(V->getOperand(0), V->getOperand(1), R))
|
2013-04-14 03:22:20 +00:00
|
|
|
return true;
|
|
|
|
|
2013-04-15 22:00:26 +00:00
|
|
|
BinaryOperator *A = dyn_cast<BinaryOperator>(V->getOperand(0));
|
|
|
|
BinaryOperator *B = dyn_cast<BinaryOperator>(V->getOperand(1));
|
|
|
|
// Try to skip B.
|
|
|
|
if (B && B->hasOneUse()) {
|
|
|
|
BinaryOperator *B0 = dyn_cast<BinaryOperator>(B->getOperand(0));
|
|
|
|
BinaryOperator *B1 = dyn_cast<BinaryOperator>(B->getOperand(1));
|
|
|
|
if (tryToVectorizePair(A, B0, R)) {
|
2013-04-14 05:15:53 +00:00
|
|
|
return true;
|
2013-04-14 03:22:20 +00:00
|
|
|
}
|
2013-04-15 22:00:26 +00:00
|
|
|
if (tryToVectorizePair(A, B1, R)) {
|
|
|
|
return true;
|
2013-04-14 03:22:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-30 21:04:51 +00:00
|
|
|
// Try to skip A.
|
2013-04-15 22:00:26 +00:00
|
|
|
if (A && A->hasOneUse()) {
|
|
|
|
BinaryOperator *A0 = dyn_cast<BinaryOperator>(A->getOperand(0));
|
|
|
|
BinaryOperator *A1 = dyn_cast<BinaryOperator>(A->getOperand(1));
|
|
|
|
if (tryToVectorizePair(A0, B, R)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (tryToVectorizePair(A1, B, R)) {
|
|
|
|
return true;
|
2013-04-14 03:22:20 +00:00
|
|
|
}
|
|
|
|
}
|
2013-04-15 22:00:26 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2013-04-14 03:22:20 +00:00
|
|
|
|
2013-09-21 01:06:00 +00:00
|
|
|
/// \brief Generate a shuffle mask to be used in a reduction tree.
|
|
|
|
///
|
|
|
|
/// \param VecLen The length of the vector to be reduced.
|
|
|
|
/// \param NumEltsToRdx The number of elements that should be reduced in the
|
|
|
|
/// vector.
|
|
|
|
/// \param IsPairwise Whether the reduction is a pairwise or splitting
|
|
|
|
/// reduction. A pairwise reduction will generate a mask of
|
|
|
|
/// <0,2,...> or <1,3,..> while a splitting reduction will generate
|
|
|
|
/// <2,3, undef,undef> for a vector of 4 and NumElts = 2.
|
|
|
|
/// \param IsLeft True will generate a mask of even elements, odd otherwise.
|
|
|
|
static Value *createRdxShuffleMask(unsigned VecLen, unsigned NumEltsToRdx,
|
|
|
|
bool IsPairwise, bool IsLeft,
|
|
|
|
IRBuilder<> &Builder) {
|
|
|
|
assert((IsPairwise || !IsLeft) && "Don't support a <0,1,undef,...> mask");
|
|
|
|
|
|
|
|
SmallVector<Constant *, 32> ShuffleMask(
|
|
|
|
VecLen, UndefValue::get(Builder.getInt32Ty()));
|
|
|
|
|
|
|
|
if (IsPairwise)
|
|
|
|
// Build a mask of 0, 2, ... (left) or 1, 3, ... (right).
|
|
|
|
for (unsigned i = 0; i != NumEltsToRdx; ++i)
|
|
|
|
ShuffleMask[i] = Builder.getInt32(2 * i + !IsLeft);
|
|
|
|
else
|
|
|
|
// Move the upper half of the vector to the lower half.
|
|
|
|
for (unsigned i = 0; i != NumEltsToRdx; ++i)
|
|
|
|
ShuffleMask[i] = Builder.getInt32(NumEltsToRdx + i);
|
|
|
|
|
|
|
|
return ConstantVector::get(ShuffleMask);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/// Model horizontal reductions.
|
|
|
|
///
|
|
|
|
/// A horizontal reduction is a tree of reduction operations (currently add and
|
|
|
|
/// fadd) that has operations that can be put into a vector as its leaf.
|
|
|
|
/// For example, this tree:
|
|
|
|
///
|
|
|
|
/// mul mul mul mul
|
|
|
|
/// \ / \ /
|
|
|
|
/// + +
|
|
|
|
/// \ /
|
|
|
|
/// +
|
|
|
|
/// This tree has "mul" as its reduced values and "+" as its reduction
|
|
|
|
/// operations. A reduction might be feeding into a store or a binary operation
|
|
|
|
/// feeding a phi.
|
|
|
|
/// ...
|
|
|
|
/// \ /
|
|
|
|
/// +
|
2013-09-21 05:37:30 +00:00
|
|
|
/// |
|
2013-09-21 01:06:00 +00:00
|
|
|
/// phi +=
|
|
|
|
///
|
|
|
|
/// Or:
|
|
|
|
/// ...
|
|
|
|
/// \ /
|
|
|
|
/// +
|
2013-09-21 05:37:30 +00:00
|
|
|
/// |
|
2013-09-21 01:06:00 +00:00
|
|
|
/// *p =
|
|
|
|
///
|
|
|
|
class HorizontalReduction {
|
2014-05-04 17:10:15 +00:00
|
|
|
SmallVector<Value *, 16> ReductionOps;
|
2013-09-21 01:06:00 +00:00
|
|
|
SmallVector<Value *, 32> ReducedVals;
|
|
|
|
|
|
|
|
BinaryOperator *ReductionRoot;
|
|
|
|
PHINode *ReductionPHI;
|
|
|
|
|
|
|
|
/// The opcode of the reduction.
|
|
|
|
unsigned ReductionOpcode;
|
|
|
|
/// The opcode of the values we perform a reduction on.
|
|
|
|
unsigned ReducedValueOpcode;
|
|
|
|
/// The width of one full horizontal reduction operation.
|
|
|
|
unsigned ReduxWidth;
|
|
|
|
/// Should we model this reduction as a pairwise reduction tree or a tree that
|
|
|
|
/// splits the vector in halves and adds those halves.
|
|
|
|
bool IsPairwiseReduction;
|
|
|
|
|
|
|
|
public:
|
|
|
|
HorizontalReduction()
|
2014-04-25 05:29:35 +00:00
|
|
|
: ReductionRoot(nullptr), ReductionPHI(nullptr), ReductionOpcode(0),
|
2013-09-21 01:06:00 +00:00
|
|
|
ReducedValueOpcode(0), ReduxWidth(0), IsPairwiseReduction(false) {}
|
|
|
|
|
|
|
|
/// \brief Try to find a reduction tree.
|
2015-03-10 02:37:25 +00:00
|
|
|
bool matchAssociativeReduction(PHINode *Phi, BinaryOperator *B) {
|
2013-09-21 01:06:00 +00:00
|
|
|
assert((!Phi ||
|
|
|
|
std::find(Phi->op_begin(), Phi->op_end(), B) != Phi->op_end()) &&
|
|
|
|
"Thi phi needs to use the binary operator");
|
|
|
|
|
|
|
|
// We could have a initial reductions that is not an add.
|
|
|
|
// r *= v1 + v2 + v3 + v4
|
|
|
|
// In such a case start looking for a tree rooted in the first '+'.
|
|
|
|
if (Phi) {
|
|
|
|
if (B->getOperand(0) == Phi) {
|
2014-04-25 05:29:35 +00:00
|
|
|
Phi = nullptr;
|
2013-09-21 01:06:00 +00:00
|
|
|
B = dyn_cast<BinaryOperator>(B->getOperand(1));
|
|
|
|
} else if (B->getOperand(1) == Phi) {
|
2014-04-25 05:29:35 +00:00
|
|
|
Phi = nullptr;
|
2013-09-21 01:06:00 +00:00
|
|
|
B = dyn_cast<BinaryOperator>(B->getOperand(0));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!B)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
Type *Ty = B->getType();
|
2015-02-12 02:30:56 +00:00
|
|
|
if (!isValidElementType(Ty))
|
2013-09-21 01:06:00 +00:00
|
|
|
return false;
|
|
|
|
|
2015-03-10 02:37:25 +00:00
|
|
|
const DataLayout &DL = B->getModule()->getDataLayout();
|
2013-09-21 01:06:00 +00:00
|
|
|
ReductionOpcode = B->getOpcode();
|
|
|
|
ReducedValueOpcode = 0;
|
2015-03-10 02:37:25 +00:00
|
|
|
ReduxWidth = MinVecRegSize / DL.getTypeSizeInBits(Ty);
|
2013-09-21 01:06:00 +00:00
|
|
|
ReductionRoot = B;
|
|
|
|
ReductionPHI = Phi;
|
|
|
|
|
|
|
|
if (ReduxWidth < 4)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// We currently only support adds.
|
|
|
|
if (ReductionOpcode != Instruction::Add &&
|
|
|
|
ReductionOpcode != Instruction::FAdd)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Post order traverse the reduction tree starting at B. We only handle true
|
|
|
|
// trees containing only binary operators.
|
|
|
|
SmallVector<std::pair<BinaryOperator *, unsigned>, 32> Stack;
|
|
|
|
Stack.push_back(std::make_pair(B, 0));
|
|
|
|
while (!Stack.empty()) {
|
|
|
|
BinaryOperator *TreeN = Stack.back().first;
|
|
|
|
unsigned EdgeToVist = Stack.back().second++;
|
|
|
|
bool IsReducedValue = TreeN->getOpcode() != ReductionOpcode;
|
|
|
|
|
|
|
|
// Only handle trees in the current basic block.
|
|
|
|
if (TreeN->getParent() != B->getParent())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Each tree node needs to have one user except for the ultimate
|
|
|
|
// reduction.
|
|
|
|
if (!TreeN->hasOneUse() && TreeN != B)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Postorder vist.
|
|
|
|
if (EdgeToVist == 2 || IsReducedValue) {
|
|
|
|
if (IsReducedValue) {
|
|
|
|
// Make sure that the opcodes of the operations that we are going to
|
|
|
|
// reduce match.
|
|
|
|
if (!ReducedValueOpcode)
|
|
|
|
ReducedValueOpcode = TreeN->getOpcode();
|
|
|
|
else if (ReducedValueOpcode != TreeN->getOpcode())
|
|
|
|
return false;
|
|
|
|
ReducedVals.push_back(TreeN);
|
|
|
|
} else {
|
|
|
|
// We need to be able to reassociate the adds.
|
|
|
|
if (!TreeN->isAssociative())
|
|
|
|
return false;
|
2014-05-04 17:10:15 +00:00
|
|
|
ReductionOps.push_back(TreeN);
|
2013-09-21 01:06:00 +00:00
|
|
|
}
|
|
|
|
// Retract.
|
|
|
|
Stack.pop_back();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Visit left or right.
|
|
|
|
Value *NextV = TreeN->getOperand(EdgeToVist);
|
|
|
|
BinaryOperator *Next = dyn_cast<BinaryOperator>(NextV);
|
|
|
|
if (Next)
|
|
|
|
Stack.push_back(std::make_pair(Next, 0));
|
|
|
|
else if (NextV != Phi)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Attempt to vectorize the tree found by
|
|
|
|
/// matchAssociativeReduction.
|
|
|
|
bool tryToReduce(BoUpSLP &V, TargetTransformInfo *TTI) {
|
|
|
|
if (ReducedVals.empty())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
unsigned NumReducedVals = ReducedVals.size();
|
|
|
|
if (NumReducedVals < ReduxWidth)
|
|
|
|
return false;
|
|
|
|
|
2014-04-25 05:29:35 +00:00
|
|
|
Value *VectorizedTree = nullptr;
|
2013-09-21 01:06:00 +00:00
|
|
|
IRBuilder<> Builder(ReductionRoot);
|
|
|
|
FastMathFlags Unsafe;
|
|
|
|
Unsafe.setUnsafeAlgebra();
|
|
|
|
Builder.SetFastMathFlags(Unsafe);
|
|
|
|
unsigned i = 0;
|
|
|
|
|
|
|
|
for (; i < NumReducedVals - ReduxWidth + 1; i += ReduxWidth) {
|
2014-08-27 05:25:25 +00:00
|
|
|
V.buildTree(makeArrayRef(&ReducedVals[i], ReduxWidth), ReductionOps);
|
2013-09-21 01:06:00 +00:00
|
|
|
|
|
|
|
// Estimate cost.
|
|
|
|
int Cost = V.getTreeCost() + getReductionCost(TTI, ReducedVals[i]);
|
|
|
|
if (Cost >= -SLPCostThreshold)
|
|
|
|
break;
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "SLP: Vectorizing horizontal reduction at cost:" << Cost
|
|
|
|
<< ". (HorRdx)\n");
|
|
|
|
|
|
|
|
// Vectorize a tree.
|
|
|
|
DebugLoc Loc = cast<Instruction>(ReducedVals[i])->getDebugLoc();
|
|
|
|
Value *VectorizedRoot = V.vectorizeTree();
|
|
|
|
|
|
|
|
// Emit a reduction.
|
|
|
|
Value *ReducedSubTree = emitReduction(VectorizedRoot, Builder);
|
|
|
|
if (VectorizedTree) {
|
|
|
|
Builder.SetCurrentDebugLocation(Loc);
|
|
|
|
VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree,
|
|
|
|
ReducedSubTree, "bin.rdx");
|
|
|
|
} else
|
|
|
|
VectorizedTree = ReducedSubTree;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (VectorizedTree) {
|
|
|
|
// Finish the reduction.
|
|
|
|
for (; i < NumReducedVals; ++i) {
|
|
|
|
Builder.SetCurrentDebugLocation(
|
|
|
|
cast<Instruction>(ReducedVals[i])->getDebugLoc());
|
|
|
|
VectorizedTree = createBinOp(Builder, ReductionOpcode, VectorizedTree,
|
|
|
|
ReducedVals[i]);
|
|
|
|
}
|
|
|
|
// Update users.
|
|
|
|
if (ReductionPHI) {
|
2014-04-28 04:05:08 +00:00
|
|
|
assert(ReductionRoot && "Need a reduction operation");
|
2013-09-21 01:06:00 +00:00
|
|
|
ReductionRoot->setOperand(0, VectorizedTree);
|
|
|
|
ReductionRoot->setOperand(1, ReductionPHI);
|
|
|
|
} else
|
|
|
|
ReductionRoot->replaceAllUsesWith(VectorizedTree);
|
|
|
|
}
|
2014-04-25 05:29:35 +00:00
|
|
|
return VectorizedTree != nullptr;
|
2013-09-21 01:06:00 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
|
|
|
|
/// \brief Calcuate the cost of a reduction.
|
|
|
|
int getReductionCost(TargetTransformInfo *TTI, Value *FirstReducedVal) {
|
|
|
|
Type *ScalarTy = FirstReducedVal->getType();
|
|
|
|
Type *VecTy = VectorType::get(ScalarTy, ReduxWidth);
|
|
|
|
|
|
|
|
int PairwiseRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, true);
|
|
|
|
int SplittingRdxCost = TTI->getReductionCost(ReductionOpcode, VecTy, false);
|
|
|
|
|
|
|
|
IsPairwiseReduction = PairwiseRdxCost < SplittingRdxCost;
|
|
|
|
int VecReduxCost = IsPairwiseReduction ? PairwiseRdxCost : SplittingRdxCost;
|
|
|
|
|
|
|
|
int ScalarReduxCost =
|
|
|
|
ReduxWidth * TTI->getArithmeticInstrCost(ReductionOpcode, VecTy);
|
|
|
|
|
|
|
|
DEBUG(dbgs() << "SLP: Adding cost " << VecReduxCost - ScalarReduxCost
|
|
|
|
<< " for reduction that starts with " << *FirstReducedVal
|
|
|
|
<< " (It is a "
|
|
|
|
<< (IsPairwiseReduction ? "pairwise" : "splitting")
|
|
|
|
<< " reduction)\n");
|
|
|
|
|
|
|
|
return VecReduxCost - ScalarReduxCost;
|
|
|
|
}
|
|
|
|
|
|
|
|
static Value *createBinOp(IRBuilder<> &Builder, unsigned Opcode, Value *L,
|
|
|
|
Value *R, const Twine &Name = "") {
|
|
|
|
if (Opcode == Instruction::FAdd)
|
|
|
|
return Builder.CreateFAdd(L, R, Name);
|
|
|
|
return Builder.CreateBinOp((Instruction::BinaryOps)Opcode, L, R, Name);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// \brief Emit a horizontal reduction of the vectorized value.
|
|
|
|
Value *emitReduction(Value *VectorizedValue, IRBuilder<> &Builder) {
|
|
|
|
assert(VectorizedValue && "Need to have a vectorized tree node");
|
|
|
|
assert(isPowerOf2_32(ReduxWidth) &&
|
|
|
|
"We only handle power-of-two reductions for now");
|
|
|
|
|
2015-01-09 10:23:48 +00:00
|
|
|
Value *TmpVec = VectorizedValue;
|
2013-09-21 01:06:00 +00:00
|
|
|
for (unsigned i = ReduxWidth / 2; i != 0; i >>= 1) {
|
|
|
|
if (IsPairwiseReduction) {
|
|
|
|
Value *LeftMask =
|
|
|
|
createRdxShuffleMask(ReduxWidth, i, true, true, Builder);
|
|
|
|
Value *RightMask =
|
|
|
|
createRdxShuffleMask(ReduxWidth, i, true, false, Builder);
|
|
|
|
|
|
|
|
Value *LeftShuf = Builder.CreateShuffleVector(
|
|
|
|
TmpVec, UndefValue::get(TmpVec->getType()), LeftMask, "rdx.shuf.l");
|
|
|
|
Value *RightShuf = Builder.CreateShuffleVector(
|
|
|
|
TmpVec, UndefValue::get(TmpVec->getType()), (RightMask),
|
|
|
|
"rdx.shuf.r");
|
|
|
|
TmpVec = createBinOp(Builder, ReductionOpcode, LeftShuf, RightShuf,
|
|
|
|
"bin.rdx");
|
|
|
|
} else {
|
|
|
|
Value *UpperHalf =
|
|
|
|
createRdxShuffleMask(ReduxWidth, i, false, false, Builder);
|
|
|
|
Value *Shuf = Builder.CreateShuffleVector(
|
|
|
|
TmpVec, UndefValue::get(TmpVec->getType()), UpperHalf, "rdx.shuf");
|
|
|
|
TmpVec = createBinOp(Builder, ReductionOpcode, TmpVec, Shuf, "bin.rdx");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The result is in the first element of the vector.
|
|
|
|
return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-08-26 17:56:35 +00:00
|
|
|
/// \brief Recognize construction of vectors like
|
|
|
|
/// %ra = insertelement <4 x float> undef, float %s0, i32 0
|
|
|
|
/// %rb = insertelement <4 x float> %ra, float %s1, i32 1
|
|
|
|
/// %rc = insertelement <4 x float> %rb, float %s2, i32 2
|
|
|
|
/// %rd = insertelement <4 x float> %rc, float %s3, i32 3
|
|
|
|
///
|
|
|
|
/// Returns true if it matches
|
|
|
|
///
|
2014-05-04 17:10:15 +00:00
|
|
|
static bool findBuildVector(InsertElementInst *FirstInsertElem,
|
|
|
|
SmallVectorImpl<Value *> &BuildVector,
|
|
|
|
SmallVectorImpl<Value *> &BuildVectorOpds) {
|
|
|
|
if (!isa<UndefValue>(FirstInsertElem->getOperand(0)))
|
2013-08-26 17:56:35 +00:00
|
|
|
return false;
|
|
|
|
|
2014-05-04 17:10:15 +00:00
|
|
|
InsertElementInst *IE = FirstInsertElem;
|
2013-08-26 17:56:35 +00:00
|
|
|
while (true) {
|
2014-05-04 17:10:15 +00:00
|
|
|
BuildVector.push_back(IE);
|
|
|
|
BuildVectorOpds.push_back(IE->getOperand(1));
|
2013-08-26 17:56:35 +00:00
|
|
|
|
|
|
|
if (IE->use_empty())
|
|
|
|
return false;
|
|
|
|
|
2014-03-09 03:16:01 +00:00
|
|
|
InsertElementInst *NextUse = dyn_cast<InsertElementInst>(IE->user_back());
|
2013-08-26 17:56:35 +00:00
|
|
|
if (!NextUse)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// If this isn't the final use, make sure the next insertelement is the only
|
|
|
|
// use. It's OK if the final constructed vector is used multiple times
|
|
|
|
if (!IE->hasOneUse())
|
|
|
|
return false;
|
|
|
|
|
|
|
|
IE = NextUse;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-10-12 18:56:27 +00:00
|
|
|
static bool PhiTypeSorterFunc(Value *V, Value *V2) {
|
|
|
|
return V->getType() < V2->getType();
|
|
|
|
}
|
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
bool SLPVectorizer::vectorizeChainsInBlock(BasicBlock *BB, BoUpSLP &R) {
|
2013-04-15 22:00:26 +00:00
|
|
|
bool Changed = false;
|
2013-07-12 00:04:18 +00:00
|
|
|
SmallVector<Value *, 4> Incoming;
|
2013-10-12 18:56:27 +00:00
|
|
|
SmallSet<Value *, 16> VisitedInstrs;
|
|
|
|
|
|
|
|
bool HaveVectorizedPhiNodes = true;
|
|
|
|
while (HaveVectorizedPhiNodes) {
|
|
|
|
HaveVectorizedPhiNodes = false;
|
|
|
|
|
|
|
|
// Collect the incoming values from the PHIs.
|
|
|
|
Incoming.clear();
|
|
|
|
for (BasicBlock::iterator instr = BB->begin(), ie = BB->end(); instr != ie;
|
|
|
|
++instr) {
|
|
|
|
PHINode *P = dyn_cast<PHINode>(instr);
|
|
|
|
if (!P)
|
|
|
|
break;
|
2013-08-20 21:21:45 +00:00
|
|
|
|
2013-10-12 18:56:27 +00:00
|
|
|
if (!VisitedInstrs.count(P))
|
|
|
|
Incoming.push_back(P);
|
|
|
|
}
|
2013-07-12 00:04:18 +00:00
|
|
|
|
2013-10-12 18:56:27 +00:00
|
|
|
// Sort by type.
|
|
|
|
std::stable_sort(Incoming.begin(), Incoming.end(), PhiTypeSorterFunc);
|
2013-07-12 00:04:18 +00:00
|
|
|
|
2013-10-12 18:56:27 +00:00
|
|
|
// Try to vectorize elements base on their type.
|
|
|
|
for (SmallVector<Value *, 4>::iterator IncIt = Incoming.begin(),
|
|
|
|
E = Incoming.end();
|
|
|
|
IncIt != E;) {
|
|
|
|
|
|
|
|
// Look for the next elements with the same type.
|
|
|
|
SmallVector<Value *, 4>::iterator SameTypeIt = IncIt;
|
|
|
|
while (SameTypeIt != E &&
|
|
|
|
(*SameTypeIt)->getType() == (*IncIt)->getType()) {
|
|
|
|
VisitedInstrs.insert(*SameTypeIt);
|
|
|
|
++SameTypeIt;
|
|
|
|
}
|
2013-08-20 21:21:45 +00:00
|
|
|
|
2013-10-12 18:56:27 +00:00
|
|
|
// Try to vectorize them.
|
|
|
|
unsigned NumElts = (SameTypeIt - IncIt);
|
|
|
|
DEBUG(errs() << "SLP: Trying to vectorize starting at PHIs (" << NumElts << ")\n");
|
2014-08-27 05:25:25 +00:00
|
|
|
if (NumElts > 1 && tryToVectorizeList(makeArrayRef(IncIt, NumElts), R)) {
|
2013-10-12 18:56:27 +00:00
|
|
|
// Success start over because instructions might have been changed.
|
|
|
|
HaveVectorizedPhiNodes = true;
|
2013-08-20 21:21:45 +00:00
|
|
|
Changed = true;
|
2013-10-12 18:56:27 +00:00
|
|
|
break;
|
2013-08-20 21:21:45 +00:00
|
|
|
}
|
|
|
|
|
2014-01-24 17:20:08 +00:00
|
|
|
// Start over at the next instruction of a different type (or the end).
|
2013-10-12 18:56:27 +00:00
|
|
|
IncIt = SameTypeIt;
|
2013-07-12 00:04:18 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-20 21:21:45 +00:00
|
|
|
VisitedInstrs.clear();
|
|
|
|
|
|
|
|
for (BasicBlock::iterator it = BB->begin(), e = BB->end(); it != e; it++) {
|
|
|
|
// We may go through BB multiple times so skip the one we have checked.
|
2014-11-19 07:49:26 +00:00
|
|
|
if (!VisitedInstrs.insert(it).second)
|
2013-08-20 21:21:45 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (isa<DbgInfoIntrinsic>(it))
|
2013-06-20 17:54:36 +00:00
|
|
|
continue;
|
2013-04-15 22:00:26 +00:00
|
|
|
|
|
|
|
// Try to vectorize reductions that use PHINodes.
|
2013-08-20 21:21:45 +00:00
|
|
|
if (PHINode *P = dyn_cast<PHINode>(it)) {
|
2013-04-15 22:00:26 +00:00
|
|
|
// Check that the PHI is a reduction PHI.
|
2013-06-20 17:54:36 +00:00
|
|
|
if (P->getNumIncomingValues() != 2)
|
|
|
|
return Changed;
|
|
|
|
Value *Rdx =
|
|
|
|
(P->getIncomingBlock(0) == BB
|
|
|
|
? (P->getIncomingValue(0))
|
2014-04-25 05:29:35 +00:00
|
|
|
: (P->getIncomingBlock(1) == BB ? P->getIncomingValue(1)
|
|
|
|
: nullptr));
|
2013-04-15 22:00:26 +00:00
|
|
|
// Check if this is a Binary Operator.
|
|
|
|
BinaryOperator *BI = dyn_cast_or_null<BinaryOperator>(Rdx);
|
|
|
|
if (!BI)
|
2013-04-09 19:44:35 +00:00
|
|
|
continue;
|
2013-04-12 21:11:14 +00:00
|
|
|
|
2013-09-21 01:06:00 +00:00
|
|
|
// Try to match and vectorize a horizontal reduction.
|
|
|
|
HorizontalReduction HorRdx;
|
2015-03-10 02:37:25 +00:00
|
|
|
if (ShouldVectorizeHor && HorRdx.matchAssociativeReduction(P, BI) &&
|
2013-09-21 01:06:00 +00:00
|
|
|
HorRdx.tryToReduce(R, TTI)) {
|
|
|
|
Changed = true;
|
|
|
|
it = BB->begin();
|
|
|
|
e = BB->end();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *Inst = BI->getOperand(0);
|
2013-06-20 17:54:36 +00:00
|
|
|
if (Inst == P)
|
|
|
|
Inst = BI->getOperand(1);
|
2013-06-22 21:34:10 +00:00
|
|
|
|
2013-08-20 21:21:45 +00:00
|
|
|
if (tryToVectorize(dyn_cast<BinaryOperator>(Inst), R)) {
|
|
|
|
// We would like to start over since some instructions are deleted
|
|
|
|
// and the iterator may become invalid value.
|
|
|
|
Changed = true;
|
|
|
|
it = BB->begin();
|
|
|
|
e = BB->end();
|
2013-09-21 01:06:00 +00:00
|
|
|
continue;
|
2013-08-20 21:21:45 +00:00
|
|
|
}
|
2013-09-21 01:06:00 +00:00
|
|
|
|
2013-04-15 22:00:26 +00:00
|
|
|
continue;
|
|
|
|
}
|
2013-04-12 21:11:14 +00:00
|
|
|
|
2013-09-21 01:06:00 +00:00
|
|
|
// Try to vectorize horizontal reductions feeding into a store.
|
2013-09-25 14:02:32 +00:00
|
|
|
if (ShouldStartVectorizeHorAtStore)
|
|
|
|
if (StoreInst *SI = dyn_cast<StoreInst>(it))
|
|
|
|
if (BinaryOperator *BinOp =
|
|
|
|
dyn_cast<BinaryOperator>(SI->getValueOperand())) {
|
|
|
|
HorizontalReduction HorRdx;
|
2015-03-10 02:37:25 +00:00
|
|
|
if (((HorRdx.matchAssociativeReduction(nullptr, BinOp) &&
|
2013-09-25 14:02:32 +00:00
|
|
|
HorRdx.tryToReduce(R, TTI)) ||
|
|
|
|
tryToVectorize(BinOp, R))) {
|
|
|
|
Changed = true;
|
|
|
|
it = BB->begin();
|
|
|
|
e = BB->end();
|
|
|
|
continue;
|
|
|
|
}
|
2013-09-21 01:06:00 +00:00
|
|
|
}
|
|
|
|
|
2014-11-19 16:07:38 +00:00
|
|
|
// Try to vectorize horizontal reductions feeding into a return.
|
|
|
|
if (ReturnInst *RI = dyn_cast<ReturnInst>(it))
|
|
|
|
if (RI->getNumOperands() != 0)
|
|
|
|
if (BinaryOperator *BinOp =
|
|
|
|
dyn_cast<BinaryOperator>(RI->getOperand(0))) {
|
|
|
|
DEBUG(dbgs() << "SLP: Found a return to vectorize.\n");
|
|
|
|
if (tryToVectorizePair(BinOp->getOperand(0),
|
|
|
|
BinOp->getOperand(1), R)) {
|
|
|
|
Changed = true;
|
|
|
|
it = BB->begin();
|
|
|
|
e = BB->end();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-15 22:00:26 +00:00
|
|
|
// Try to vectorize trees that start at compare instructions.
|
2013-08-20 21:21:45 +00:00
|
|
|
if (CmpInst *CI = dyn_cast<CmpInst>(it)) {
|
2013-04-15 22:00:26 +00:00
|
|
|
if (tryToVectorizePair(CI->getOperand(0), CI->getOperand(1), R)) {
|
2013-08-20 21:21:45 +00:00
|
|
|
Changed = true;
|
|
|
|
// We would like to start over since some instructions are deleted
|
|
|
|
// and the iterator may become invalid value.
|
|
|
|
it = BB->begin();
|
|
|
|
e = BB->end();
|
2013-04-15 22:00:26 +00:00
|
|
|
continue;
|
|
|
|
}
|
2013-08-20 21:21:45 +00:00
|
|
|
|
|
|
|
for (int i = 0; i < 2; ++i) {
|
2014-07-30 21:07:56 +00:00
|
|
|
if (BinaryOperator *BI = dyn_cast<BinaryOperator>(CI->getOperand(i))) {
|
2014-08-01 08:05:55 +00:00
|
|
|
if (tryToVectorizePair(BI->getOperand(0), BI->getOperand(1), R)) {
|
2014-07-30 21:07:56 +00:00
|
|
|
Changed = true;
|
|
|
|
// We would like to start over since some instructions are deleted
|
|
|
|
// and the iterator may become invalid value.
|
|
|
|
it = BB->begin();
|
|
|
|
e = BB->end();
|
2015-02-02 12:45:34 +00:00
|
|
|
break;
|
2014-07-30 21:07:56 +00:00
|
|
|
}
|
|
|
|
}
|
2013-08-20 21:21:45 +00:00
|
|
|
}
|
2013-04-15 22:00:26 +00:00
|
|
|
continue;
|
2013-04-09 19:44:35 +00:00
|
|
|
}
|
2013-08-26 17:56:35 +00:00
|
|
|
|
|
|
|
// Try to vectorize trees that start at insertelement instructions.
|
2014-05-04 17:10:15 +00:00
|
|
|
if (InsertElementInst *FirstInsertElem = dyn_cast<InsertElementInst>(it)) {
|
|
|
|
SmallVector<Value *, 16> BuildVector;
|
|
|
|
SmallVector<Value *, 16> BuildVectorOpds;
|
|
|
|
if (!findBuildVector(FirstInsertElem, BuildVector, BuildVectorOpds))
|
2013-08-26 17:56:35 +00:00
|
|
|
continue;
|
|
|
|
|
2014-05-04 17:10:15 +00:00
|
|
|
// Vectorize starting with the build vector operands ignoring the
|
|
|
|
// BuildVector instructions for the purpose of scheduling and user
|
|
|
|
// extraction.
|
|
|
|
if (tryToVectorizeList(BuildVectorOpds, R, BuildVector)) {
|
2013-08-26 17:56:35 +00:00
|
|
|
Changed = true;
|
|
|
|
it = BB->begin();
|
|
|
|
e = BB->end();
|
|
|
|
}
|
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
2013-04-09 19:44:35 +00:00
|
|
|
}
|
|
|
|
|
2013-04-15 22:00:26 +00:00
|
|
|
return Changed;
|
|
|
|
}
|
2013-04-09 19:44:35 +00:00
|
|
|
|
2013-07-07 06:57:07 +00:00
|
|
|
bool SLPVectorizer::vectorizeStoreChains(BoUpSLP &R) {
|
2013-04-15 22:00:26 +00:00
|
|
|
bool Changed = false;
|
|
|
|
// Attempt to sort and vectorize each of the store-groups.
|
|
|
|
for (StoreListMap::iterator it = StoreRefs.begin(), e = StoreRefs.end();
|
|
|
|
it != e; ++it) {
|
|
|
|
if (it->second.size() < 2)
|
|
|
|
continue;
|
2013-04-14 03:22:20 +00:00
|
|
|
|
2013-06-20 17:54:36 +00:00
|
|
|
DEBUG(dbgs() << "SLP: Analyzing a store chain of length "
|
2013-07-16 15:25:17 +00:00
|
|
|
<< it->second.size() << ".\n");
|
2013-04-09 19:44:35 +00:00
|
|
|
|
2013-07-16 15:25:17 +00:00
|
|
|
// Process the stores in chunks of 16.
|
|
|
|
for (unsigned CI = 0, CE = it->second.size(); CI < CE; CI+=16) {
|
|
|
|
unsigned Len = std::min<unsigned>(CE - CI, 16);
|
2014-08-27 05:25:25 +00:00
|
|
|
Changed |= vectorizeStores(makeArrayRef(&it->second[CI], Len),
|
|
|
|
-SLPCostThreshold, R);
|
2013-07-16 15:25:17 +00:00
|
|
|
}
|
2013-04-09 19:44:35 +00:00
|
|
|
}
|
2013-04-15 22:00:26 +00:00
|
|
|
return Changed;
|
|
|
|
}
|
2013-04-09 19:44:35 +00:00
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
char SLPVectorizer::ID = 0;
|
|
|
|
static const char lv_name[] = "SLP Vectorizer";
|
|
|
|
INITIALIZE_PASS_BEGIN(SLPVectorizer, SV_NAME, lv_name, false, false)
|
|
|
|
INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
|
[PM] Change the core design of the TTI analysis to use a polymorphic
type erased interface and a single analysis pass rather than an
extremely complex analysis group.
The end result is that the TTI analysis can contain a type erased
implementation that supports the polymorphic TTI interface. We can build
one from a target-specific implementation or from a dummy one in the IR.
I've also factored all of the code into "mix-in"-able base classes,
including CRTP base classes to facilitate calling back up to the most
specialized form when delegating horizontally across the surface. These
aren't as clean as I would like and I'm planning to work on cleaning
some of this up, but I wanted to start by putting into the right form.
There are a number of reasons for this change, and this particular
design. The first and foremost reason is that an analysis group is
complete overkill, and the chaining delegation strategy was so opaque,
confusing, and high overhead that TTI was suffering greatly for it.
Several of the TTI functions had failed to be implemented in all places
because of the chaining-based delegation making there be no checking of
this. A few other functions were implemented with incorrect delegation.
The message to me was very clear working on this -- the delegation and
analysis group structure was too confusing to be useful here.
The other reason of course is that this is *much* more natural fit for
the new pass manager. This will lay the ground work for a type-erased
per-function info object that can look up the correct subtarget and even
cache it.
Yet another benefit is that this will significantly simplify the
interaction of the pass managers and the TargetMachine. See the future
work below.
The downside of this change is that it is very, very verbose. I'm going
to work to improve that, but it is somewhat an implementation necessity
in C++ to do type erasure. =/ I discussed this design really extensively
with Eric and Hal prior to going down this path, and afterward showed
them the result. No one was really thrilled with it, but there doesn't
seem to be a substantially better alternative. Using a base class and
virtual method dispatch would make the code much shorter, but as
discussed in the update to the programmer's manual and elsewhere,
a polymorphic interface feels like the more principled approach even if
this is perhaps the least compelling example of it. ;]
Ultimately, there is still a lot more to be done here, but this was the
huge chunk that I couldn't really split things out of because this was
the interface change to TTI. I've tried to minimize all the other parts
of this. The follow up work should include at least:
1) Improving the TargetMachine interface by having it directly return
a TTI object. Because we have a non-pass object with value semantics
and an internal type erasure mechanism, we can narrow the interface
of the TargetMachine to *just* do what we need: build and return
a TTI object that we can then insert into the pass pipeline.
2) Make the TTI object be fully specialized for a particular function.
This will include splitting off a minimal form of it which is
sufficient for the inliner and the old pass manager.
3) Add a new pass manager analysis which produces TTI objects from the
target machine for each function. This may actually be done as part
of #2 in order to use the new analysis to implement #2.
4) Work on narrowing the API between TTI and the targets so that it is
easier to understand and less verbose to type erase.
5) Work on narrowing the API between TTI and its clients so that it is
easier to understand and less verbose to forward.
6) Try to improve the CRTP-based delegation. I feel like this code is
just a bit messy and exacerbating the complexity of implementing
the TTI in each target.
Many thanks to Eric and Hal for their help here. I ended up blocked on
this somewhat more abruptly than I expected, and so I appreciate getting
it sorted out very quickly.
Differential Revision: http://reviews.llvm.org/D7293
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@227669 91177308-0d34-0410-b5e6-96231b3b80d8
2015-01-31 03:43:40 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
|
2015-01-04 12:03:27 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
|
2013-04-09 19:44:35 +00:00
|
|
|
INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
|
|
|
|
INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
|
|
|
|
INITIALIZE_PASS_END(SLPVectorizer, SV_NAME, lv_name, false, false)
|
|
|
|
|
|
|
|
namespace llvm {
|
2013-06-20 17:54:36 +00:00
|
|
|
Pass *createSLPVectorizerPass() { return new SLPVectorizer(); }
|
2013-04-09 19:44:35 +00:00
|
|
|
}
|