From 3cd4e5095b06b2be94a0cab3060272aae3460167 Mon Sep 17 00:00:00 2001 From: Evan Cheng Date: Fri, 16 Mar 2007 08:46:27 +0000 Subject: [PATCH] Sink a binary expression into its use blocks if it is a loop invariant computation used as GEP indexes and if the expression can be folded into target addressing mode of GEP load / store use types. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@35123 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp | 92 +++++++++++++++++++ 1 file changed, 92 insertions(+) diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 62ee06e9422..e40171a6b0d 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -13,6 +13,7 @@ #define DEBUG_TYPE "isel" #include "llvm/Analysis/AliasAnalysis.h" +#include "llvm/Analysis/LoopInfo.h" #include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/CodeGen/ScheduleDAG.h" #include "llvm/CallingConv.h" @@ -58,6 +59,9 @@ ViewSchedDAGs("view-sched-dags", cl::Hidden, static const bool ViewISelDAGs = 0, ViewSchedDAGs = 0; #endif +static cl::opt +EnableGEPIndexSink("enable-gep-index-sinking", cl::Hidden, + cl::desc("Sink invariant GEP index computation into use blocks")); //===---------------------------------------------------------------------===// /// @@ -3703,6 +3707,8 @@ void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const { // FIXME: we only modify the CFG to split critical edges. This // updates dom and loop info. AU.addRequired(); + AU.addRequired(); + AU.setPreservesAll(); } @@ -3959,6 +3965,88 @@ static bool OptimizeGEPExpression(GetElementPtrInst *GEPI, return true; } +/// isLoopInvariantInst - Returns true if all operands of the instruction are +/// loop invariants in the specified loop. +static bool isLoopInvariantInst(Instruction *I, Loop *L) { + // The instruction is loop invariant if all of its operands are loop-invariant + for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) + if (!L->isLoopInvariant(I->getOperand(i))) + return false; + return true; +} + +/// SinkInvariantGEPIndex - If a GEP instruction has a variable index that has +/// been hoisted out of the loop by LICM pass, sink it back into the use BB +/// if it can be determined that the index computation can be folded into the +/// addressing mode of the load / store uses. +static bool SinkInvariantGEPIndex(BinaryOperator *BinOp, LoopInfo *loopInfo, + const TargetLowering &TLI) { + if (!EnableGEPIndexSink) + return false; + + // Only look at Add / Sub for now. + if (BinOp->getOpcode() != Instruction::Add && + BinOp->getOpcode() != Instruction::Sub) + return false; + + /// InsertedOps - Only insert a duplicate in each block once. + std::map InsertedOps; + + bool MadeChange = false; + BasicBlock *DefBB = BinOp->getParent(); + for (Value::use_iterator UI = BinOp->use_begin(), E = BinOp->use_end(); + UI != E; ) { + Instruction *User = cast(*UI); + + // Preincrement use iterator so we don't invalidate it. + ++UI; + + // Only look for GEP use in another block. + if (User->getParent() == DefBB) continue; + + if (isa(User)) { + BasicBlock *UserBB = User->getParent(); + Loop *L = loopInfo->getLoopFor(UserBB); + + // Only sink if expression is a loop invariant in the use BB. + if (isLoopInvariantInst(BinOp, L) && !User->use_empty()) { + const Type *UseTy = NULL; + // FIXME: We are assuming all the uses of the GEP will have the + // same type. + Instruction *GEPUser = cast(*User->use_begin()); + if (LoadInst *Load = dyn_cast(GEPUser)) + UseTy = Load->getType(); + else if (StoreInst *Store = dyn_cast(GEPUser)) + UseTy = Store->getOperand(0)->getType(); + + // Check if it is possible to fold the expression to address mode. + if (UseTy && + TLI.isLegalAddressExpression(Instruction::Add, BinOp->getOperand(0), + BinOp->getOperand(1), UseTy)) { + // Sink it into user block. + BinaryOperator *&InsertedOp = InsertedOps[UserBB]; + if (!InsertedOp) { + BasicBlock::iterator InsertPt = UserBB->begin(); + while (isa(InsertPt)) ++InsertPt; + + InsertedOp = + BinaryOperator::create(BinOp->getOpcode(), BinOp->getOperand(0), + BinOp->getOperand(1), "", InsertPt); + } + + User->replaceUsesOfWith(BinOp, InsertedOp); + MadeChange = true; + } + } + } + } + + if (BinOp->use_empty()) + BinOp->eraseFromParent(); + + return MadeChange; +} + /// SplitEdgeNicely - Split the critical edge from TI to it's specified /// successor if it will improve codegen. We only do this if the successor has @@ -4021,6 +4109,8 @@ bool SelectionDAGISel::runOnFunction(Function &Fn) { RegMap = MF.getSSARegMap(); DOUT << "\n\n\n=== " << Fn.getName() << "\n"; + LoopInfo *loopInfo = &getAnalysis(); + // First, split all critical edges. // // In this pass we also look for GEP and cast instructions that are used @@ -4089,6 +4179,8 @@ bool SelectionDAGISel::runOnFunction(Function &Fn) { // If, after promotion, these are the same types, this is a noop copy. if (SrcVT == DstVT) MadeChange |= OptimizeNoopCopyExpression(CI); + } else if (BinaryOperator *BinOp = dyn_cast(I)) { + MadeChange |= SinkInvariantGEPIndex(BinOp, loopInfo, TLI); } } }