From 167a57ca452efbb014ebce7ecfa99501b5039611 Mon Sep 17 00:00:00 2001 From: Eli Bendersky Date: Thu, 1 May 2014 18:38:36 +0000 Subject: [PATCH] Add an optimization that does CSE in a group of similar GEPs. This optimization merges the common part of a group of GEPs, so we can compute each pointer address by adding a simple offset to the common part. The optimization is currently only enabled for the NVPTX backend, where it has a large payoff on some benchmarks. Review: http://reviews.llvm.org/D3462 Patch by Jingyue Wu. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@207783 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/InitializePasses.h | 1 + include/llvm/LinkAllPasses.h | 1 + include/llvm/Transforms/Scalar.h | 6 + lib/Target/NVPTX/NVPTXTargetMachine.cpp | 21 +- lib/Transforms/Scalar/Scalar.cpp | 1 + .../Scalar/SeparateConstOffsetFromGEP.cpp | 583 ++++++++++++++++++ .../NVPTX/lit.local.cfg | 4 + .../NVPTX/split-gep-and-gvn.ll | 60 ++ .../NVPTX/split-gep.ll | 101 +++ 9 files changed, 774 insertions(+), 4 deletions(-) create mode 100644 lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp create mode 100644 test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg create mode 100644 test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll create mode 100644 test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h index 232e422de1c..8e536159db1 100644 --- a/include/llvm/InitializePasses.h +++ b/include/llvm/InitializePasses.h @@ -238,6 +238,7 @@ void initializeSimpleInlinerPass(PassRegistry&); void initializeRegisterCoalescerPass(PassRegistry&); void initializeSingleLoopExtractorPass(PassRegistry&); void initializeSinkingPass(PassRegistry&); +void initializeSeparateConstOffsetFromGEPPass(PassRegistry &); void initializeSlotIndexesPass(PassRegistry&); void initializeSpillPlacementPass(PassRegistry&); void initializeStackProtectorPass(PassRegistry&); diff --git a/include/llvm/LinkAllPasses.h b/include/llvm/LinkAllPasses.h index 9cb1c5c8a83..2616ebd1fab 100644 --- a/include/llvm/LinkAllPasses.h +++ b/include/llvm/LinkAllPasses.h @@ -156,6 +156,7 @@ namespace { (void) llvm::createBBVectorizePass(); (void) llvm::createPartiallyInlineLibCallsPass(); (void) llvm::createScalarizerPass(); + (void) llvm::createSeparateConstOffsetFromGEPPass(); (void)new llvm::IntervalPartition(); (void)new llvm::FindUsedTypes(); diff --git a/include/llvm/Transforms/Scalar.h b/include/llvm/Transforms/Scalar.h index 453de03972d..6aea643c42a 100644 --- a/include/llvm/Transforms/Scalar.h +++ b/include/llvm/Transforms/Scalar.h @@ -377,6 +377,12 @@ FunctionPass *createScalarizerPass(); // AddDiscriminators - Add DWARF path discriminators to the IR. FunctionPass *createAddDiscriminatorsPass(); +//===----------------------------------------------------------------------===// +// +// SeparateConstOffsetFromGEP - Split GEPs for better CSE +// +FunctionPass *createSeparateConstOffsetFromGEPPass(); + } // End llvm namespace #endif diff --git a/lib/Target/NVPTX/NVPTXTargetMachine.cpp b/lib/Target/NVPTX/NVPTXTargetMachine.cpp index 0cc5c516292..26a4f840520 100644 --- a/lib/Target/NVPTX/NVPTXTargetMachine.cpp +++ b/lib/Target/NVPTX/NVPTXTargetMachine.cpp @@ -147,10 +147,23 @@ void NVPTXPassConfig::addIRPasses() { addPass(createNVPTXAssignValidGlobalNamesPass()); addPass(createGenericToNVVMPass()); addPass(createNVPTXFavorNonGenericAddrSpacesPass()); - // The FavorNonGenericAddrSpaces pass may remove instructions and leave some - // values unused. Therefore, we run a DCE pass right afterwards. We could - // remove unused values in an ad-hoc manner, but it requires manual work and - // might be error-prone. + addPass(createSeparateConstOffsetFromGEPPass()); + // The SeparateConstOffsetFromGEP pass creates variadic bases that can be used + // by multiple GEPs. Run GVN or EarlyCSE to really reuse them. GVN generates + // significantly better code than EarlyCSE for some of our benchmarks. + if (getOptLevel() == CodeGenOpt::Aggressive) + addPass(createGVNPass()); + else + addPass(createEarlyCSEPass()); + // Both FavorNonGenericAddrSpaces and SeparateConstOffsetFromGEP may leave + // some dead code. We could remove dead code in an ad-hoc manner, but that + // requires manual work and might be error-prone. + // + // The FavorNonGenericAddrSpaces pass shortcuts unnecessary addrspacecasts, + // and leave them unused. + // + // SeparateConstOffsetFromGEP rebuilds a new index from the old index, and the + // old index and some of its intermediate results may become unused. addPass(createDeadCodeEliminationPass()); } diff --git a/lib/Transforms/Scalar/Scalar.cpp b/lib/Transforms/Scalar/Scalar.cpp index 09167b9e82a..f8f828c8405 100644 --- a/lib/Transforms/Scalar/Scalar.cpp +++ b/lib/Transforms/Scalar/Scalar.cpp @@ -64,6 +64,7 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) { initializeStructurizeCFGPass(Registry); initializeSinkingPass(Registry); initializeTailCallElimPass(Registry); + initializeSeparateConstOffsetFromGEPPass(Registry); } void LLVMInitializeScalarOpts(LLVMPassRegistryRef R) { diff --git a/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp new file mode 100644 index 00000000000..0465f237ece --- /dev/null +++ b/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp @@ -0,0 +1,583 @@ +//===-- SeparateConstOffsetFromGEP.cpp - ------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Loop unrolling may create many similar GEPs for array accesses. +// e.g., a 2-level loop +// +// float a[32][32]; // global variable +// +// for (int i = 0; i < 2; ++i) { +// for (int j = 0; j < 2; ++j) { +// ... +// ... = a[x + i][y + j]; +// ... +// } +// } +// +// will probably be unrolled to: +// +// gep %a, 0, %x, %y; load +// gep %a, 0, %x, %y + 1; load +// gep %a, 0, %x + 1, %y; load +// gep %a, 0, %x + 1, %y + 1; load +// +// LLVM's GVN does not use partial redundancy elimination yet, and is thus +// unable to reuse (gep %a, 0, %x, %y). As a result, this misoptimization incurs +// significant slowdown in targets with limited addressing modes. For instance, +// because the PTX target does not support the reg+reg addressing mode, the +// NVPTX backend emits PTX code that literally computes the pointer address of +// each GEP, wasting tons of registers. It emits the following PTX for the +// first load and similar PTX for other loads. +// +// mov.u32 %r1, %x; +// mov.u32 %r2, %y; +// mul.wide.u32 %rl2, %r1, 128; +// mov.u64 %rl3, a; +// add.s64 %rl4, %rl3, %rl2; +// mul.wide.u32 %rl5, %r2, 4; +// add.s64 %rl6, %rl4, %rl5; +// ld.global.f32 %f1, [%rl6]; +// +// To reduce the register pressure, the optimization implemented in this file +// merges the common part of a group of GEPs, so we can compute each pointer +// address by adding a simple offset to the common part, saving many registers. +// +// It works by splitting each GEP into a variadic base and a constant offset. +// The variadic base can be computed once and reused by multiple GEPs, and the +// constant offsets can be nicely folded into the reg+immediate addressing mode +// (supported by most targets) without using any extra register. +// +// For instance, we transform the four GEPs and four loads in the above example +// into: +// +// base = gep a, 0, x, y +// load base +// laod base + 1 * sizeof(float) +// load base + 32 * sizeof(float) +// load base + 33 * sizeof(float) +// +// Given the transformed IR, a backend that supports the reg+immediate +// addressing mode can easily fold the pointer arithmetics into the loads. For +// example, the NVPTX backend can easily fold the pointer arithmetics into the +// ld.global.f32 instructions, and the resultant PTX uses much fewer registers. +// +// mov.u32 %r1, %tid.x; +// mov.u32 %r2, %tid.y; +// mul.wide.u32 %rl2, %r1, 128; +// mov.u64 %rl3, a; +// add.s64 %rl4, %rl3, %rl2; +// mul.wide.u32 %rl5, %r2, 4; +// add.s64 %rl6, %rl4, %rl5; +// ld.global.f32 %f1, [%rl6]; // so far the same as unoptimized PTX +// ld.global.f32 %f2, [%rl6+4]; // much better +// ld.global.f32 %f3, [%rl6+128]; // much better +// ld.global.f32 %f4, [%rl6+132]; // much better +// +//===----------------------------------------------------------------------===// + +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/Analysis/ValueTracking.h" +#include "llvm/IR/Constants.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Instructions.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Operator.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/raw_ostream.h" +#include "llvm/Transforms/Scalar.h" + +using namespace llvm; + +static cl::opt DisableSeparateConstOffsetFromGEP( + "disable-separate-const-offset-from-gep", cl::init(false), + cl::desc("Do not separate the constant offset from a GEP instruction"), + cl::Hidden); + +namespace { + +/// \brief A helper class for separating a constant offset from a GEP index. +/// +/// In real programs, a GEP index may be more complicated than a simple addition +/// of something and a constant integer which can be trivially splitted. For +/// example, to split ((a << 3) | 5) + b, we need to search deeper for the +/// constant offset, so that we can seperate the index to (a << 3) + b and 5. +/// +/// Therefore, this class looks into the expression that computes a given GEP +/// index, and tries to find a constant integer that can be hoisted to the +/// outermost level of the expression as an addition. Not every constant in an +/// expression can jump out. e.g., we cannot transform (b * (a + 5)) to (b * a + +/// 5); nor can we transform (3 * (a + 5)) to (3 * a + 5), however in this case, +/// -instcombine probably already optimized (3 * (a + 5)) to (3 * a + 15). +class ConstantOffsetExtractor { + public: + /// Extracts a constant offset from the given GEP index. It outputs the + /// numeric value of the extracted constant offset (0 if failed), and a + /// new index representing the remainder (equal to the original index minus + /// the constant offset). + /// \p Idx The given GEP index + /// \p NewIdx The new index to replace + /// \p DL The datalayout of the module + /// \p IP Calculating the new index requires new instructions. IP indicates + /// where to insert them (typically right before the GEP). + static int64_t Extract(Value *Idx, Value *&NewIdx, const DataLayout *DL, + Instruction *IP); + /// Looks for a constant offset without extracting it. The meaning of the + /// arguments and the return value are the same as Extract. + static int64_t Find(Value *Idx, const DataLayout *DL); + + private: + ConstantOffsetExtractor(const DataLayout *Layout, Instruction *InsertionPt) + : DL(Layout), IP(InsertionPt) {} + /// Searches the expression that computes V for a constant offset. If the + /// searching is successful, update UserChain as a path from V to the constant + /// offset. + int64_t find(Value *V); + /// A helper function to look into both operands of a binary operator U. + /// \p IsSub Whether U is a sub operator. If so, we need to negate the + /// constant offset at some point. + int64_t findInEitherOperand(User *U, bool IsSub); + /// After finding the constant offset and how it is reached from the GEP + /// index, we build a new index which is a clone of the old one except the + /// constant offset is removed. For example, given (a + (b + 5)) and knowning + /// the constant offset is 5, this function returns (a + b). + /// + /// We cannot simply change the constant to zero because the expression that + /// computes the index or its intermediate result may be used by others. + Value *rebuildWithoutConstantOffset(); + // A helper function for rebuildWithoutConstantOffset that rebuilds the direct + // user (U) of the constant offset (C). + Value *rebuildLeafWithoutConstantOffset(User *U, Value *C); + /// Returns a clone of U except the first occurrence of From with To. + Value *cloneAndReplace(User *U, Value *From, Value *To); + + /// Returns true if LHS and RHS have no bits in common, i.e., LHS | RHS == 0. + bool NoCommonBits(Value *LHS, Value *RHS) const; + /// Computes which bits are known to be one or zero. + /// \p KnownOne Mask of all bits that are known to be one. + /// \p KnownZero Mask of all bits that are known to be zero. + void ComputeKnownBits(Value *V, APInt &KnownOne, APInt &KnownZero) const; + /// Finds the first use of Used in U. Returns -1 if not found. + static unsigned FindFirstUse(User *U, Value *Used); + + /// The path from the constant offset to the old GEP index. e.g., if the GEP + /// index is "a * b + (c + 5)". After running function find, UserChain[0] will + /// be the constant 5, UserChain[1] will be the subexpression "c + 5", and + /// UserChain[2] will be the entire expression "a * b + (c + 5)". + /// + /// This path helps rebuildWithoutConstantOffset rebuild the new GEP index. + SmallVector UserChain; + /// The data layout of the module. Used in ComputeKnownBits. + const DataLayout *DL; + Instruction *IP; /// Insertion position of cloned instructions. +}; + +/// \brief A pass that tries to split every GEP in the function into a variadic +/// base and a constant offset. It is a FuntionPass because searching for the +/// constant offset may inspect other basic blocks. +class SeparateConstOffsetFromGEP : public FunctionPass { + public: + static char ID; + SeparateConstOffsetFromGEP() : FunctionPass(ID) { + initializeSeparateConstOffsetFromGEPPass(*PassRegistry::getPassRegistry()); + } + + void getAnalysisUsage(AnalysisUsage &AU) const override { + AU.addRequired(); + AU.addRequired(); + } + bool runOnFunction(Function &F) override; + + private: + /// Tries to split the given GEP into a variadic base and a constant offset, + /// and returns true if the splitting succeeds. + bool splitGEP(GetElementPtrInst *GEP); + /// Finds the constant offset within each index, and accumulates them. This + /// function only inspects the GEP without changing it. The output + /// NeedsExtraction indicates whether we can extract a non-zero constant + /// offset from any index. + int64_t accumulateByteOffset(GetElementPtrInst *GEP, const DataLayout *DL, + bool &NeedsExtraction); +}; +} // anonymous namespace + +char SeparateConstOffsetFromGEP::ID = 0; +INITIALIZE_PASS_BEGIN( + SeparateConstOffsetFromGEP, "separate-const-offset-from-gep", + "Split GEPs to a variadic base and a constant offset for better CSE", false, + false) +INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) +INITIALIZE_PASS_DEPENDENCY(DataLayoutPass) +INITIALIZE_PASS_END( + SeparateConstOffsetFromGEP, "separate-const-offset-from-gep", + "Split GEPs to a variadic base and a constant offset for better CSE", false, + false) + +FunctionPass *llvm::createSeparateConstOffsetFromGEPPass() { + return new SeparateConstOffsetFromGEP(); +} + +int64_t ConstantOffsetExtractor::findInEitherOperand(User *U, bool IsSub) { + assert(U->getNumOperands() == 2); + int64_t ConstantOffset = find(U->getOperand(0)); + // If we found a constant offset in the left operand, stop and return that. + // This shortcut might cause us to miss opportunities of combining the + // constant offsets in both operands, e.g., (a + 4) + (b + 5) => (a + b) + 9. + // However, such cases are probably already handled by -instcombine, + // given this pass runs after the standard optimizations. + if (ConstantOffset != 0) return ConstantOffset; + ConstantOffset = find(U->getOperand(1)); + // If U is a sub operator, negate the constant offset found in the right + // operand. + return IsSub ? -ConstantOffset : ConstantOffset; +} + +int64_t ConstantOffsetExtractor::find(Value *V) { + // TODO(jingyue): We can even trace into integer/pointer casts, such as + // inttoptr, ptrtoint, bitcast, and addrspacecast. We choose to handle only + // integers because it gives good enough results for our benchmarks. + assert(V->getType()->isIntegerTy()); + + User *U = dyn_cast(V); + // We cannot do much with Values that are not a User, such as BasicBlock and + // MDNode. + if (U == nullptr) return 0; + + int64_t ConstantOffset = 0; + if (ConstantInt *CI = dyn_cast(U)) { + // Hooray, we found it! + ConstantOffset = CI->getSExtValue(); + } else if (Operator *O = dyn_cast(U)) { + // The GEP index may be more complicated than a simple addition of a + // varaible and a constant. Therefore, we trace into subexpressions for more + // hoisting opportunities. + switch (O->getOpcode()) { + case Instruction::Add: { + ConstantOffset = findInEitherOperand(U, false); + break; + } + case Instruction::Sub: { + ConstantOffset = findInEitherOperand(U, true); + break; + } + case Instruction::Or: { + // If LHS and RHS don't have common bits, (LHS | RHS) is equivalent to + // (LHS + RHS). + if (NoCommonBits(U->getOperand(0), U->getOperand(1))) + ConstantOffset = findInEitherOperand(U, false); + break; + } + case Instruction::SExt: { + // For safety, we trace into sext only when its operand is marked + // "nsw" because xxx.nsw guarantees no signed wrap. e.g., we can safely + // transform "sext (add nsw a, 5)" into "add nsw (sext a), 5". + if (BinaryOperator *BO = dyn_cast(U->getOperand(0))) { + if (BO->hasNoSignedWrap()) + ConstantOffset = find(U->getOperand(0)); + } + break; + } + case Instruction::ZExt: { + // Similarly, we trace into zext only when its operand is marked with + // "nuw" because zext (add nuw a, b) == add nuw (zext a), (zext b). + if (BinaryOperator *BO = dyn_cast(U->getOperand(0))) { + if (BO->hasNoUnsignedWrap()) + ConstantOffset = find(U->getOperand(0)); + } + break; + } + } + } + // If we found a non-zero constant offset, adds it to the path for future + // transformation (rebuildWithoutConstantOffset). Zero is a valid constant + // offset, but doesn't help this optimization. + if (ConstantOffset != 0) + UserChain.push_back(U); + return ConstantOffset; +} + +unsigned ConstantOffsetExtractor::FindFirstUse(User *U, Value *Used) { + for (unsigned I = 0, E = U->getNumOperands(); I < E; ++I) { + if (U->getOperand(I) == Used) + return I; + } + return -1; +} + +Value *ConstantOffsetExtractor::cloneAndReplace(User *U, Value *From, + Value *To) { + // Finds in U the first use of From. It is safe to ignore future occurrences + // of From, because findInEitherOperand similarly stops searching the right + // operand when the first operand has a non-zero constant offset. + unsigned OpNo = FindFirstUse(U, From); + assert(OpNo != (unsigned)-1 && "UserChain wasn't built correctly"); + + // ConstantOffsetExtractor::find only follows Operators (i.e., Instructions + // and ConstantExprs). Therefore, U is either an Instruction or a + // ConstantExpr. + if (Instruction *I = dyn_cast(U)) { + Instruction *Clone = I->clone(); + Clone->setOperand(OpNo, To); + Clone->insertBefore(IP); + return Clone; + } + // cast(To) is safe because a ConstantExpr only uses Constants. + return cast(U) + ->getWithOperandReplaced(OpNo, cast(To)); +} + +Value *ConstantOffsetExtractor::rebuildLeafWithoutConstantOffset(User *U, + Value *C) { + assert(U->getNumOperands() <= 2 && + "We didn't trace into any operator with more than 2 operands"); + // If U has only one operand which is the constant offset, removing the + // constant offset leaves U as a null value. + if (U->getNumOperands() == 1) + return Constant::getNullValue(U->getType()); + + // U->getNumOperands() == 2 + unsigned OpNo = FindFirstUse(U, C); // U->getOperand(OpNo) == C + assert(OpNo < 2 && "UserChain wasn't built correctly"); + Value *TheOther = U->getOperand(1 - OpNo); // The other operand of U + // If U = C - X, removing C makes U = -X; otherwise U will simply be X. + if (!isa(U) || OpNo == 1) + return TheOther; + if (isa(U)) + return ConstantExpr::getNeg(cast(TheOther)); + return BinaryOperator::CreateNeg(TheOther, "", IP); +} + +Value *ConstantOffsetExtractor::rebuildWithoutConstantOffset() { + assert(UserChain.size() > 0 && "you at least found a constant, right?"); + // Start with the constant and go up through UserChain, each time building a + // clone of the subexpression but with the constant removed. + // e.g., to build a clone of (a + (b + (c + 5)) but with the 5 removed, we + // first c, then (b + c), and finally (a + (b + c)). + // + // Fast path: if the GEP index is a constant, simply returns 0. + if (UserChain.size() == 1) + return ConstantInt::get(UserChain[0]->getType(), 0); + + Value *Remainder = + rebuildLeafWithoutConstantOffset(UserChain[1], UserChain[0]); + for (size_t I = 2; I < UserChain.size(); ++I) + Remainder = cloneAndReplace(UserChain[I], UserChain[I - 1], Remainder); + return Remainder; +} + +int64_t ConstantOffsetExtractor::Extract(Value *Idx, Value *&NewIdx, + const DataLayout *DL, + Instruction *IP) { + ConstantOffsetExtractor Extractor(DL, IP); + // Find a non-zero constant offset first. + int64_t ConstantOffset = Extractor.find(Idx); + if (ConstantOffset == 0) + return 0; + // Then rebuild a new index with the constant removed. + NewIdx = Extractor.rebuildWithoutConstantOffset(); + return ConstantOffset; +} + +int64_t ConstantOffsetExtractor::Find(Value *Idx, const DataLayout *DL) { + return ConstantOffsetExtractor(DL, nullptr).find(Idx); +} + +void ConstantOffsetExtractor::ComputeKnownBits(Value *V, APInt &KnownOne, + APInt &KnownZero) const { + IntegerType *IT = cast(V->getType()); + KnownOne = APInt(IT->getBitWidth(), 0); + KnownZero = APInt(IT->getBitWidth(), 0); + llvm::ComputeMaskedBits(V, KnownZero, KnownOne, DL, 0); +} + +bool ConstantOffsetExtractor::NoCommonBits(Value *LHS, Value *RHS) const { + assert(LHS->getType() == RHS->getType() && + "LHS and RHS should have the same type"); + APInt LHSKnownOne, LHSKnownZero, RHSKnownOne, RHSKnownZero; + ComputeKnownBits(LHS, LHSKnownOne, LHSKnownZero); + ComputeKnownBits(RHS, RHSKnownOne, RHSKnownZero); + return (LHSKnownZero | RHSKnownZero).isAllOnesValue(); +} + +int64_t SeparateConstOffsetFromGEP::accumulateByteOffset( + GetElementPtrInst *GEP, const DataLayout *DL, bool &NeedsExtraction) { + NeedsExtraction = false; + int64_t AccumulativeByteOffset = 0; + gep_type_iterator GTI = gep_type_begin(*GEP); + for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { + if (isa(*GTI)) { + // Tries to extract a constant offset from this GEP index. + int64_t ConstantOffset = + ConstantOffsetExtractor::Find(GEP->getOperand(I), DL); + if (ConstantOffset != 0) { + NeedsExtraction = true; + // A GEP may have multiple indices. We accumulate the extracted + // constant offset to a byte offset, and later offset the remainder of + // the original GEP with this byte offset. + AccumulativeByteOffset += + ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType()); + } + } + } + return AccumulativeByteOffset; +} + +bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) { + // Skip vector GEPs. + if (GEP->getType()->isVectorTy()) + return false; + + // The backend can already nicely handle the case where all indices are + // constant. + if (GEP->hasAllConstantIndices()) + return false; + + bool Changed = false; + + // Shortcuts integer casts. Eliminating these explicit casts can make + // subsequent optimizations more obvious: ConstantOffsetExtractor needn't + // trace into these casts. + if (GEP->isInBounds()) { + // Doing this to inbounds GEPs is safe because their indices are guaranteed + // to be non-negative and in bounds. + gep_type_iterator GTI = gep_type_begin(*GEP); + for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { + if (isa(*GTI)) { + if (Operator *O = dyn_cast(GEP->getOperand(I))) { + if (O->getOpcode() == Instruction::SExt || + O->getOpcode() == Instruction::ZExt) { + GEP->setOperand(I, O->getOperand(0)); + Changed = true; + } + } + } + } + } + + const DataLayout *DL = &getAnalysis().getDataLayout(); + bool NeedsExtraction; + int64_t AccumulativeByteOffset = + accumulateByteOffset(GEP, DL, NeedsExtraction); + + if (!NeedsExtraction) + return Changed; + // Before really splitting the GEP, check whether the backend supports the + // addressing mode we are about to produce. If no, this splitting probably + // won't be beneficial. + TargetTransformInfo &TTI = getAnalysis(); + if (!TTI.isLegalAddressingMode(GEP->getType()->getElementType(), + /*BaseGV=*/nullptr, AccumulativeByteOffset, + /*HasBaseReg=*/true, /*Scale=*/0)) { + return Changed; + } + + // Remove the constant offset in each GEP index. The resultant GEP computes + // the variadic base. + gep_type_iterator GTI = gep_type_begin(*GEP); + for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) { + if (isa(*GTI)) { + Value *NewIdx = nullptr; + // Tries to extract a constant offset from this GEP index. + int64_t ConstantOffset = + ConstantOffsetExtractor::Extract(GEP->getOperand(I), NewIdx, DL, GEP); + if (ConstantOffset != 0) { + assert(NewIdx && "ConstantOffset != 0 implies NewIdx is set"); + GEP->setOperand(I, NewIdx); + // Clear the inbounds attribute because the new index may be off-bound. + // e.g., + // + // b = add i64 a, 5 + // addr = gep inbounds float* p, i64 b + // + // is transformed to: + // + // addr2 = gep float* p, i64 a + // addr = gep float* addr2, i64 5 + // + // If a is -4, although the old index b is in bounds, the new index a is + // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if the + // inbounds keyword is not present, the offsets are added to the base + // address with silently-wrapping two's complement arithmetic". + // Therefore, the final code will be a semantically equivalent. + // + // TODO(jingyue): do some range analysis to keep as many inbounds as + // possible. GEPs with inbounds are more friendly to alias analysis. + GEP->setIsInBounds(false); + Changed = true; + } + } + } + + // Offsets the base with the accumulative byte offset. + // + // %gep ; the base + // ... %gep ... + // + // => add the offset + // + // %gep2 ; clone of %gep + // %0 = ptrtoint %gep2 + // %1 = add %0, + // %new.gep = inttoptr %1 + // %gep ; will be removed + // ... %gep ... + // + // => replace all uses of %gep with %new.gep and remove %gep + // + // %gep2 ; clone of %gep + // %0 = ptrtoint %gep2 + // %1 = add %0, + // %new.gep = inttoptr %1 + // ... %new.gep ... + // + // TODO(jingyue): Emit a GEP instead of an "uglygep" + // (http://llvm.org/docs/GetElementPtr.html#what-s-an-uglygep) to make the IR + // prettier and more alias analysis friendly. One caveat: if the original GEP + // ends with a StructType, we need to split the GEP at the last + // SequentialType. For instance, consider the following IR: + // + // %struct.S = type { float, double } + // @array = global [1024 x %struct.S] + // %p = getelementptr %array, 0, %i + 5, 1 + // + // To separate the constant 5 from %p, we would need to split %p at the last + // array index so that we have: + // + // %addr = gep %array, 0, %i + // %p = gep %addr, 5, 1 + Instruction *NewGEP = GEP->clone(); + NewGEP->insertBefore(GEP); + Type *IntPtrTy = DL->getIntPtrType(GEP->getType()); + Value *Addr = new PtrToIntInst(NewGEP, IntPtrTy, "", GEP); + Addr = BinaryOperator::CreateAdd( + Addr, ConstantInt::get(IntPtrTy, AccumulativeByteOffset, true), "", GEP); + Addr = new IntToPtrInst(Addr, GEP->getType(), "", GEP); + + GEP->replaceAllUsesWith(Addr); + GEP->eraseFromParent(); + + return true; +} + +bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) { + if (DisableSeparateConstOffsetFromGEP) + return false; + + bool Changed = false; + for (Function::iterator B = F.begin(), BE = F.end(); B != BE; ++B) { + for (BasicBlock::iterator I = B->begin(), IE = B->end(); I != IE; ) { + if (GetElementPtrInst *GEP = dyn_cast(I++)) { + Changed |= splitGEP(GEP); + } + // No need to split GEP ConstantExprs because all its indices are constant + // already. + } + } + return Changed; +} diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg new file mode 100644 index 00000000000..40532cdaa20 --- /dev/null +++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/lit.local.cfg @@ -0,0 +1,4 @@ +targets = set(config.root.targets_to_build.split()) +if not 'NVPTX' in targets: + config.unsupported = True + diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll new file mode 100644 index 00000000000..66f4096fa96 --- /dev/null +++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll @@ -0,0 +1,60 @@ +; RUN: llc < %s -march=nvptx -mcpu=sm_20 | FileCheck %s --check-prefix=PTX +; RUN: llc < %s -march=nvptx64 -mcpu=sm_20 | FileCheck %s --check-prefix=PTX +; RUN: opt < %s -S -separate-const-offset-from-gep -gvn -dce | FileCheck %s --check-prefix=IR + +; Verifies the SeparateConstOffsetFromGEP pass. +; The following code computes +; *output = array[x][y] + array[x][y+1] + array[x+1][y] + array[x+1][y+1] +; +; We expect SeparateConstOffsetFromGEP to transform it to +; +; float *base = &a[x][y]; +; *output = base[0] + base[1] + base[32] + base[33]; +; +; so the backend can emit PTX that uses fewer virtual registers. + +target datalayout = "e-i64:64-v16:16-v32:32-n16:32:64" +target triple = "nvptx64-unknown-unknown" + +@array = internal addrspace(3) constant [32 x [32 x float]] zeroinitializer, align 4 + +define void @sum_of_array(i32 %x, i32 %y, float* nocapture %output) { +.preheader: + %0 = zext i32 %y to i64 + %1 = zext i32 %x to i64 + %2 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %0 + %3 = addrspacecast float addrspace(3)* %2 to float* + %4 = load float* %3, align 4 + %5 = fadd float %4, 0.000000e+00 + %6 = add i32 %y, 1 + %7 = zext i32 %6 to i64 + %8 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %1, i64 %7 + %9 = addrspacecast float addrspace(3)* %8 to float* + %10 = load float* %9, align 4 + %11 = fadd float %5, %10 + %12 = add i32 %x, 1 + %13 = zext i32 %12 to i64 + %14 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %0 + %15 = addrspacecast float addrspace(3)* %14 to float* + %16 = load float* %15, align 4 + %17 = fadd float %11, %16 + %18 = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i64 %13, i64 %7 + %19 = addrspacecast float addrspace(3)* %18 to float* + %20 = load float* %19, align 4 + %21 = fadd float %17, %20 + store float %21, float* %output, align 4 + ret void +} + +; PTX-LABEL: sum_of_array( +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rl|r)[0-9]+]]{{\]}} +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}} +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}} +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}} + +; IR-LABEL: @sum_of_array( +; IR: [[BASE_PTR:%[0-9]+]] = getelementptr inbounds [32 x [32 x float]] addrspace(3)* @array, i64 0, i32 %x, i32 %y +; IR: [[BASE_INT:%[0-9]+]] = ptrtoint float addrspace(3)* [[BASE_PTR]] to i64 +; IR: %5 = add i64 [[BASE_INT]], 4 +; IR: %10 = add i64 [[BASE_INT]], 128 +; IR: %15 = add i64 [[BASE_INT]], 132 diff --git a/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll new file mode 100644 index 00000000000..f4020019c9a --- /dev/null +++ b/test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep.ll @@ -0,0 +1,101 @@ +; RUN: opt < %s -separate-const-offset-from-gep -dce -S | FileCheck %s + +; Several unit tests for -separate-const-offset-from-gep. The transformation +; heavily relies on TargetTransformInfo, so we put these tests under +; target-specific folders. + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +; target triple is necessary; otherwise TargetTransformInfo rejects any +; addressing mode. +target triple = "nvptx64-unknown-unknown" + +%struct.S = type { float, double } + +@struct_array = global [1024 x %struct.S] zeroinitializer, align 16 +@float_2d_array = global [32 x [32 x float]] zeroinitializer, align 4 + +; We should not extract any struct field indices, because fields in a struct +; may have different types. +define double* @struct(i32 %i) { +entry: + %add = add nsw i32 %i, 5 + %idxprom = sext i32 %add to i64 + %p = getelementptr inbounds [1024 x %struct.S]* @struct_array, i64 0, i64 %idxprom, i32 1 + ret double* %p +} +; CHECK-LABEL: @struct +; CHECK: getelementptr [1024 x %struct.S]* @struct_array, i64 0, i32 %i, i32 1 + +; We should be able to trace into sext/zext if it's directly used as a GEP +; index. +define float* @sext_zext(i32 %i, i32 %j) { +entry: + %i1 = add i32 %i, 1 + %j2 = add i32 %j, 2 + %i1.ext = sext i32 %i1 to i64 + %j2.ext = zext i32 %j2 to i64 + %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i1.ext, i64 %j2.ext + ret float* %p +} +; CHECK-LABEL: @sext_zext +; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i32 %i, i32 %j +; CHECK: add i64 %{{[0-9]+}}, 136 + +; We should be able to trace into sext/zext if it can be distributed to both +; operands, e.g., sext (add nsw a, b) == add nsw (sext a), (sext b) +define float* @ext_add_no_overflow(i64 %a, i32 %b, i64 %c, i32 %d) { + %b1 = add nsw i32 %b, 1 + %b2 = sext i32 %b1 to i64 + %i = add i64 %a, %b2 + %d1 = add nuw i32 %d, 1 + %d2 = zext i32 %d1 to i64 + %j = add i64 %c, %d2 + %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %j + ret float* %p +} +; CHECK-LABEL: @ext_add_no_overflow +; CHECK: [[BASE_PTR:%[0-9]+]] = getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %{{[0-9]+}}, i64 %{{[0-9]+}} +; CHECK: [[BASE_INT:%[0-9]+]] = ptrtoint float* [[BASE_PTR]] to i64 +; CHECK: add i64 [[BASE_INT]], 132 + +; We should treat "or" with no common bits (%k) as "add", and leave "or" with +; potentially common bits (%l) as is. +define float* @or(i64 %i) { +entry: + %j = shl i64 %i, 2 + %k = or i64 %j, 3 ; no common bits + %l = or i64 %j, 4 ; potentially common bits + %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %k, i64 %l + ret float* %p +} +; CHECK-LABEL: @or +; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %j, i64 %l +; CHECK: add i64 %{{[0-9]+}}, 384 + +; The subexpression (b + 5) is used in both "i = a + (b + 5)" and "*out = b + +; 5". When extracting the constant offset 5, make sure "*out = b + 5" isn't +; affected. +define float* @expr(i64 %a, i64 %b, i64* %out) { +entry: + %b5 = add i64 %b, 5 + %i = add i64 %b5, %a + %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 0 + store i64 %b5, i64* %out + ret float* %p +} +; CHECK-LABEL: @expr +; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %0, i64 0 +; CHECK: add i64 %{{[0-9]+}}, 640 +; CHECK: store i64 %b5, i64* %out + +; Verifies we handle "sub" correctly. +define float* @sub(i64 %i, i64 %j) { + %i2 = sub i64 %i, 5 ; i - 5 + %j2 = sub i64 5, %j ; 5 - i + %p = getelementptr inbounds [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i2, i64 %j2 + ret float* %p +} +; CHECK-LABEL: @sub +; CHECK: %[[j2:[0-9]+]] = sub i64 0, %j +; CHECK: getelementptr [32 x [32 x float]]* @float_2d_array, i64 0, i64 %i, i64 %[[j2]] +; CHECK: add i64 %{{[0-9]+}}, -620