2005-07-30 00:12:19 +00:00
|
|
|
//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 20:36:04 +00:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2005-07-30 00:12:19 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file contains the implementation of the scalar evolution expander,
|
|
|
|
// which is used to generate the code corresponding to a given scalar evolution
|
|
|
|
// expression.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "llvm/Analysis/ScalarEvolutionExpander.h"
|
2006-12-07 01:30:32 +00:00
|
|
|
#include "llvm/Analysis/LoopInfo.h"
|
2005-07-30 00:12:19 +00:00
|
|
|
using namespace llvm;
|
|
|
|
|
2006-02-04 09:51:53 +00:00
|
|
|
/// InsertCastOfTo - Insert a cast of V to the specified type, doing what
|
|
|
|
/// we can to share the casts.
|
2006-12-13 08:06:42 +00:00
|
|
|
Value *SCEVExpander::InsertCastOfTo(Instruction::CastOps opcode, Value *V,
|
|
|
|
const Type *Ty) {
|
2006-02-04 09:51:53 +00:00
|
|
|
// FIXME: keep track of the cast instruction.
|
|
|
|
if (Constant *C = dyn_cast<Constant>(V))
|
2006-12-12 23:36:14 +00:00
|
|
|
return ConstantExpr::getCast(opcode, C, Ty);
|
2006-02-04 09:51:53 +00:00
|
|
|
|
|
|
|
if (Argument *A = dyn_cast<Argument>(V)) {
|
|
|
|
// Check to see if there is already a cast!
|
|
|
|
for (Value::use_iterator UI = A->use_begin(), E = A->use_end();
|
|
|
|
UI != E; ++UI) {
|
|
|
|
if ((*UI)->getType() == Ty)
|
2008-02-09 18:30:13 +00:00
|
|
|
if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI)))
|
|
|
|
if (CI->getOpcode() == opcode) {
|
|
|
|
// If the cast isn't the first instruction of the function, move it.
|
|
|
|
if (BasicBlock::iterator(CI) !=
|
|
|
|
A->getParent()->getEntryBlock().begin()) {
|
|
|
|
CI->moveBefore(A->getParent()->getEntryBlock().begin());
|
|
|
|
}
|
|
|
|
return CI;
|
2006-02-04 09:51:53 +00:00
|
|
|
}
|
|
|
|
}
|
2008-05-16 19:29:10 +00:00
|
|
|
return CastInst::Create(opcode, V, Ty, V->getName(),
|
2006-12-12 23:36:14 +00:00
|
|
|
A->getParent()->getEntryBlock().begin());
|
2006-02-04 09:51:53 +00:00
|
|
|
}
|
2008-02-09 18:30:13 +00:00
|
|
|
|
2006-02-04 09:51:53 +00:00
|
|
|
Instruction *I = cast<Instruction>(V);
|
2008-02-09 18:30:13 +00:00
|
|
|
|
2006-02-04 09:51:53 +00:00
|
|
|
// Check to see if there is already a cast. If there is, use it.
|
|
|
|
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
|
|
|
|
UI != E; ++UI) {
|
|
|
|
if ((*UI)->getType() == Ty)
|
2008-02-09 18:30:13 +00:00
|
|
|
if (CastInst *CI = dyn_cast<CastInst>(cast<Instruction>(*UI)))
|
|
|
|
if (CI->getOpcode() == opcode) {
|
|
|
|
BasicBlock::iterator It = I; ++It;
|
|
|
|
if (isa<InvokeInst>(I))
|
|
|
|
It = cast<InvokeInst>(I)->getNormalDest()->begin();
|
|
|
|
while (isa<PHINode>(It)) ++It;
|
|
|
|
if (It != BasicBlock::iterator(CI)) {
|
|
|
|
// Splice the cast immediately after the operand in question.
|
|
|
|
CI->moveBefore(It);
|
|
|
|
}
|
|
|
|
return CI;
|
2006-02-04 09:51:53 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
BasicBlock::iterator IP = I; ++IP;
|
|
|
|
if (InvokeInst *II = dyn_cast<InvokeInst>(I))
|
|
|
|
IP = II->getNormalDest()->begin();
|
|
|
|
while (isa<PHINode>(IP)) ++IP;
|
2008-05-16 19:29:10 +00:00
|
|
|
return CastInst::Create(opcode, V, Ty, V->getName(), IP);
|
2006-02-04 09:51:53 +00:00
|
|
|
}
|
|
|
|
|
2007-04-13 05:04:18 +00:00
|
|
|
/// InsertBinop - Insert the specified binary operator, doing a small amount
|
|
|
|
/// of work to avoid inserting an obviously redundant operation.
|
|
|
|
Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, Value *LHS,
|
2008-06-15 19:07:39 +00:00
|
|
|
Value *RHS, Instruction *InsertPt) {
|
2007-06-15 19:21:55 +00:00
|
|
|
// Fold a binop with constant operands.
|
|
|
|
if (Constant *CLHS = dyn_cast<Constant>(LHS))
|
|
|
|
if (Constant *CRHS = dyn_cast<Constant>(RHS))
|
|
|
|
return ConstantExpr::get(Opcode, CLHS, CRHS);
|
|
|
|
|
2007-04-13 05:04:18 +00:00
|
|
|
// Do a quick scan to see if we have this binop nearby. If so, reuse it.
|
|
|
|
unsigned ScanLimit = 6;
|
2008-06-15 19:07:39 +00:00
|
|
|
BasicBlock::iterator BlockBegin = InsertPt->getParent()->begin();
|
|
|
|
if (InsertPt != BlockBegin) {
|
|
|
|
// Scanning starts from the last instruction before InsertPt.
|
|
|
|
BasicBlock::iterator IP = InsertPt;
|
|
|
|
--IP;
|
|
|
|
for (; ScanLimit; --IP, --ScanLimit) {
|
|
|
|
if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(IP))
|
|
|
|
if (BinOp->getOpcode() == Opcode && BinOp->getOperand(0) == LHS &&
|
|
|
|
BinOp->getOperand(1) == RHS)
|
|
|
|
return BinOp;
|
|
|
|
if (IP == BlockBegin) break;
|
|
|
|
}
|
2007-04-13 05:04:18 +00:00
|
|
|
}
|
2008-06-15 19:07:39 +00:00
|
|
|
|
|
|
|
// If we haven't found this binop, insert it.
|
2008-05-16 19:29:10 +00:00
|
|
|
return BinaryOperator::Create(Opcode, LHS, RHS, "tmp", InsertPt);
|
2007-04-13 05:04:18 +00:00
|
|
|
}
|
|
|
|
|
2008-06-18 16:37:11 +00:00
|
|
|
Value *SCEVExpander::visitAddExpr(SCEVAddExpr *S) {
|
|
|
|
Value *V = expand(S->getOperand(S->getNumOperands()-1));
|
|
|
|
|
|
|
|
// Emit a bunch of add instructions
|
|
|
|
for (int i = S->getNumOperands()-2; i >= 0; --i)
|
|
|
|
V = InsertBinop(Instruction::Add, V, expand(S->getOperand(i)),
|
|
|
|
InsertPt);
|
|
|
|
return V;
|
|
|
|
}
|
|
|
|
|
2005-07-30 00:12:19 +00:00
|
|
|
Value *SCEVExpander::visitMulExpr(SCEVMulExpr *S) {
|
|
|
|
int FirstOp = 0; // Set if we should emit a subtract.
|
|
|
|
if (SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getOperand(0)))
|
|
|
|
if (SC->getValue()->isAllOnesValue())
|
|
|
|
FirstOp = 1;
|
|
|
|
|
|
|
|
int i = S->getNumOperands()-2;
|
2007-06-15 14:38:12 +00:00
|
|
|
Value *V = expand(S->getOperand(i+1));
|
2005-07-30 00:12:19 +00:00
|
|
|
|
|
|
|
// Emit a bunch of multiply instructions
|
|
|
|
for (; i >= FirstOp; --i)
|
2007-06-15 14:38:12 +00:00
|
|
|
V = InsertBinop(Instruction::Mul, V, expand(S->getOperand(i)),
|
2007-04-13 05:04:18 +00:00
|
|
|
InsertPt);
|
2005-07-30 00:12:19 +00:00
|
|
|
// -1 * ... ---> 0 - ...
|
|
|
|
if (FirstOp == 1)
|
2007-04-13 05:04:18 +00:00
|
|
|
V = InsertBinop(Instruction::Sub, Constant::getNullValue(V->getType()), V,
|
|
|
|
InsertPt);
|
2005-07-30 00:12:19 +00:00
|
|
|
return V;
|
|
|
|
}
|
|
|
|
|
2008-07-08 05:05:37 +00:00
|
|
|
Value *SCEVExpander::visitUDivExpr(SCEVUDivExpr *S) {
|
|
|
|
Value *LHS = expand(S->getLHS());
|
|
|
|
if (SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
|
|
|
|
const APInt &RHS = SC->getValue()->getValue();
|
|
|
|
if (RHS.isPowerOf2())
|
|
|
|
return InsertBinop(Instruction::LShr, LHS,
|
|
|
|
ConstantInt::get(S->getType(), RHS.logBase2()),
|
|
|
|
InsertPt);
|
|
|
|
}
|
|
|
|
|
|
|
|
Value *RHS = expand(S->getRHS());
|
|
|
|
return InsertBinop(Instruction::UDiv, LHS, RHS, InsertPt);
|
|
|
|
}
|
|
|
|
|
2008-12-02 08:05:48 +00:00
|
|
|
Value *SCEVExpander::visitSDivExpr(SCEVSDivExpr *S) {
|
|
|
|
// Do not fold sdiv into ashr, unless you know that LHS is positive. On
|
|
|
|
// negative values, it rounds the wrong way.
|
|
|
|
|
|
|
|
Value *LHS = expand(S->getLHS());
|
|
|
|
Value *RHS = expand(S->getRHS());
|
|
|
|
return InsertBinop(Instruction::SDiv, LHS, RHS, InsertPt);
|
|
|
|
}
|
|
|
|
|
2005-07-30 00:12:19 +00:00
|
|
|
Value *SCEVExpander::visitAddRecExpr(SCEVAddRecExpr *S) {
|
|
|
|
const Type *Ty = S->getType();
|
|
|
|
const Loop *L = S->getLoop();
|
|
|
|
// We cannot yet do fp recurrences, e.g. the xform of {X,+,F} --> X+{0,+,F}
|
2007-01-15 02:27:26 +00:00
|
|
|
assert(Ty->isInteger() && "Cannot expand fp recurrences yet!");
|
2005-07-30 00:12:19 +00:00
|
|
|
|
|
|
|
// {X,+,F} --> X + {0,+,F}
|
2008-06-18 16:23:07 +00:00
|
|
|
if (!S->getStart()->isZero()) {
|
2007-06-15 14:38:12 +00:00
|
|
|
Value *Start = expand(S->getStart());
|
2005-07-30 00:12:19 +00:00
|
|
|
std::vector<SCEVHandle> NewOps(S->op_begin(), S->op_end());
|
2007-10-22 18:31:58 +00:00
|
|
|
NewOps[0] = SE.getIntegerSCEV(0, Ty);
|
|
|
|
Value *Rest = expand(SE.getAddRecExpr(NewOps, L));
|
2005-07-30 00:12:19 +00:00
|
|
|
|
|
|
|
// FIXME: look for an existing add to use.
|
2007-04-13 05:04:18 +00:00
|
|
|
return InsertBinop(Instruction::Add, Rest, Start, InsertPt);
|
2005-07-30 00:12:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// {0,+,1} --> Insert a canonical induction variable into the loop!
|
2008-06-22 19:23:09 +00:00
|
|
|
if (S->isAffine() &&
|
2007-10-22 18:31:58 +00:00
|
|
|
S->getOperand(1) == SE.getIntegerSCEV(1, Ty)) {
|
2005-07-30 00:12:19 +00:00
|
|
|
// Create and insert the PHI node for the induction variable in the
|
|
|
|
// specified loop.
|
|
|
|
BasicBlock *Header = L->getHeader();
|
2008-04-06 20:25:17 +00:00
|
|
|
PHINode *PN = PHINode::Create(Ty, "indvar", Header->begin());
|
2005-07-30 00:12:19 +00:00
|
|
|
PN->addIncoming(Constant::getNullValue(Ty), L->getLoopPreheader());
|
|
|
|
|
|
|
|
pred_iterator HPI = pred_begin(Header);
|
|
|
|
assert(HPI != pred_end(Header) && "Loop with zero preds???");
|
|
|
|
if (!L->contains(*HPI)) ++HPI;
|
|
|
|
assert(HPI != pred_end(Header) && L->contains(*HPI) &&
|
|
|
|
"No backedge in loop?");
|
|
|
|
|
|
|
|
// Insert a unit add instruction right before the terminator corresponding
|
|
|
|
// to the back-edge.
|
2007-01-21 00:29:26 +00:00
|
|
|
Constant *One = ConstantInt::get(Ty, 1);
|
2008-05-16 19:29:10 +00:00
|
|
|
Instruction *Add = BinaryOperator::CreateAdd(PN, One, "indvar.next",
|
2005-07-30 00:12:19 +00:00
|
|
|
(*HPI)->getTerminator());
|
|
|
|
|
|
|
|
pred_iterator PI = pred_begin(Header);
|
|
|
|
if (*PI == L->getLoopPreheader())
|
|
|
|
++PI;
|
|
|
|
PN->addIncoming(Add, *PI);
|
|
|
|
return PN;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the canonical induction variable I for this loop.
|
|
|
|
Value *I = getOrInsertCanonicalInductionVariable(L, Ty);
|
|
|
|
|
Fix a problem that Nate noticed with LSR:
When inserting code for an addrec expression with a non-unit stride, be
more careful where we insert the multiply. In particular, insert the multiply
in the outermost loop we can, instead of the requested insertion point.
This allows LSR to notice the mul in the right loop, reducing it when it gets
to it. This allows it to reduce the multiply, where before it missed it.
This happens quite a bit in the test suite, for example, eliminating 2
multiplies in art, 3 in ammp, 4 in apsi, reducing from 1050 multiplies to
910 muls in galgel (!), from 877 to 859 in applu, and 36 to 30 in bzip2.
This speeds up galgel from 16.45s to 16.01s, applu from 14.21 to 13.94s and
fourinarow from 66.67s to 63.48s.
This implements Transforms/LoopStrengthReduce/nested-reduce.ll
llvm-svn: 24102
2005-10-30 06:24:33 +00:00
|
|
|
// If this is a simple linear addrec, emit it now as a special case.
|
2008-06-22 19:23:09 +00:00
|
|
|
if (S->isAffine()) { // {0,+,F} --> i*F
|
2007-06-15 14:38:12 +00:00
|
|
|
Value *F = expand(S->getOperand(1));
|
Fix a problem that Nate noticed with LSR:
When inserting code for an addrec expression with a non-unit stride, be
more careful where we insert the multiply. In particular, insert the multiply
in the outermost loop we can, instead of the requested insertion point.
This allows LSR to notice the mul in the right loop, reducing it when it gets
to it. This allows it to reduce the multiply, where before it missed it.
This happens quite a bit in the test suite, for example, eliminating 2
multiplies in art, 3 in ammp, 4 in apsi, reducing from 1050 multiplies to
910 muls in galgel (!), from 877 to 859 in applu, and 36 to 30 in bzip2.
This speeds up galgel from 16.45s to 16.01s, applu from 14.21 to 13.94s and
fourinarow from 66.67s to 63.48s.
This implements Transforms/LoopStrengthReduce/nested-reduce.ll
llvm-svn: 24102
2005-10-30 06:24:33 +00:00
|
|
|
|
|
|
|
// IF the step is by one, just return the inserted IV.
|
2007-01-11 12:24:14 +00:00
|
|
|
if (ConstantInt *CI = dyn_cast<ConstantInt>(F))
|
2007-03-01 19:45:00 +00:00
|
|
|
if (CI->getValue() == 1)
|
Fix a problem that Nate noticed with LSR:
When inserting code for an addrec expression with a non-unit stride, be
more careful where we insert the multiply. In particular, insert the multiply
in the outermost loop we can, instead of the requested insertion point.
This allows LSR to notice the mul in the right loop, reducing it when it gets
to it. This allows it to reduce the multiply, where before it missed it.
This happens quite a bit in the test suite, for example, eliminating 2
multiplies in art, 3 in ammp, 4 in apsi, reducing from 1050 multiplies to
910 muls in galgel (!), from 877 to 859 in applu, and 36 to 30 in bzip2.
This speeds up galgel from 16.45s to 16.01s, applu from 14.21 to 13.94s and
fourinarow from 66.67s to 63.48s.
This implements Transforms/LoopStrengthReduce/nested-reduce.ll
llvm-svn: 24102
2005-10-30 06:24:33 +00:00
|
|
|
return I;
|
|
|
|
|
|
|
|
// If the insert point is directly inside of the loop, emit the multiply at
|
|
|
|
// the insert point. Otherwise, L is a loop that is a parent of the insert
|
|
|
|
// point loop. If we can, move the multiply to the outer most loop that it
|
|
|
|
// is safe to be in.
|
|
|
|
Instruction *MulInsertPt = InsertPt;
|
|
|
|
Loop *InsertPtLoop = LI.getLoopFor(MulInsertPt->getParent());
|
|
|
|
if (InsertPtLoop != L && InsertPtLoop &&
|
|
|
|
L->contains(InsertPtLoop->getHeader())) {
|
2008-06-14 16:48:22 +00:00
|
|
|
do {
|
Fix a problem that Nate noticed with LSR:
When inserting code for an addrec expression with a non-unit stride, be
more careful where we insert the multiply. In particular, insert the multiply
in the outermost loop we can, instead of the requested insertion point.
This allows LSR to notice the mul in the right loop, reducing it when it gets
to it. This allows it to reduce the multiply, where before it missed it.
This happens quite a bit in the test suite, for example, eliminating 2
multiplies in art, 3 in ammp, 4 in apsi, reducing from 1050 multiplies to
910 muls in galgel (!), from 877 to 859 in applu, and 36 to 30 in bzip2.
This speeds up galgel from 16.45s to 16.01s, applu from 14.21 to 13.94s and
fourinarow from 66.67s to 63.48s.
This implements Transforms/LoopStrengthReduce/nested-reduce.ll
llvm-svn: 24102
2005-10-30 06:24:33 +00:00
|
|
|
// If we cannot hoist the multiply out of this loop, don't.
|
|
|
|
if (!InsertPtLoop->isLoopInvariant(F)) break;
|
|
|
|
|
2008-06-14 16:48:22 +00:00
|
|
|
BasicBlock *InsertPtLoopPH = InsertPtLoop->getLoopPreheader();
|
|
|
|
|
|
|
|
// If this loop hasn't got a preheader, we aren't able to hoist the
|
|
|
|
// multiply.
|
|
|
|
if (!InsertPtLoopPH)
|
|
|
|
break;
|
|
|
|
|
|
|
|
// Otherwise, move the insert point to the preheader.
|
|
|
|
MulInsertPt = InsertPtLoopPH->getTerminator();
|
Fix a problem that Nate noticed with LSR:
When inserting code for an addrec expression with a non-unit stride, be
more careful where we insert the multiply. In particular, insert the multiply
in the outermost loop we can, instead of the requested insertion point.
This allows LSR to notice the mul in the right loop, reducing it when it gets
to it. This allows it to reduce the multiply, where before it missed it.
This happens quite a bit in the test suite, for example, eliminating 2
multiplies in art, 3 in ammp, 4 in apsi, reducing from 1050 multiplies to
910 muls in galgel (!), from 877 to 859 in applu, and 36 to 30 in bzip2.
This speeds up galgel from 16.45s to 16.01s, applu from 14.21 to 13.94s and
fourinarow from 66.67s to 63.48s.
This implements Transforms/LoopStrengthReduce/nested-reduce.ll
llvm-svn: 24102
2005-10-30 06:24:33 +00:00
|
|
|
InsertPtLoop = InsertPtLoop->getParentLoop();
|
2008-06-14 16:48:22 +00:00
|
|
|
} while (InsertPtLoop != L);
|
Fix a problem that Nate noticed with LSR:
When inserting code for an addrec expression with a non-unit stride, be
more careful where we insert the multiply. In particular, insert the multiply
in the outermost loop we can, instead of the requested insertion point.
This allows LSR to notice the mul in the right loop, reducing it when it gets
to it. This allows it to reduce the multiply, where before it missed it.
This happens quite a bit in the test suite, for example, eliminating 2
multiplies in art, 3 in ammp, 4 in apsi, reducing from 1050 multiplies to
910 muls in galgel (!), from 877 to 859 in applu, and 36 to 30 in bzip2.
This speeds up galgel from 16.45s to 16.01s, applu from 14.21 to 13.94s and
fourinarow from 66.67s to 63.48s.
This implements Transforms/LoopStrengthReduce/nested-reduce.ll
llvm-svn: 24102
2005-10-30 06:24:33 +00:00
|
|
|
}
|
|
|
|
|
2007-04-13 05:04:18 +00:00
|
|
|
return InsertBinop(Instruction::Mul, I, F, MulInsertPt);
|
2005-07-30 00:12:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a chain of recurrences, turn it into a closed form, using the
|
|
|
|
// folders, then expandCodeFor the closed form. This allows the folders to
|
|
|
|
// simplify the expression without having to build a bunch of special code
|
|
|
|
// into this folder.
|
2007-10-22 18:31:58 +00:00
|
|
|
SCEVHandle IH = SE.getUnknown(I); // Get I as a "symbolic" SCEV.
|
2005-07-30 00:12:19 +00:00
|
|
|
|
2007-10-22 18:31:58 +00:00
|
|
|
SCEVHandle V = S->evaluateAtIteration(IH, SE);
|
2006-12-07 01:30:32 +00:00
|
|
|
//cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
|
2005-07-30 00:12:19 +00:00
|
|
|
|
2007-06-15 14:38:12 +00:00
|
|
|
return expand(V);
|
2005-07-30 00:12:19 +00:00
|
|
|
}
|
2007-08-20 21:17:26 +00:00
|
|
|
|
2008-06-22 19:09:18 +00:00
|
|
|
Value *SCEVExpander::visitTruncateExpr(SCEVTruncateExpr *S) {
|
|
|
|
Value *V = expand(S->getOperand());
|
2008-10-13 10:21:17 +00:00
|
|
|
return CastInst::CreateTruncOrBitCast(V, S->getType(), "tmp.", InsertPt);
|
2008-06-22 19:09:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Value *SCEVExpander::visitZeroExtendExpr(SCEVZeroExtendExpr *S) {
|
|
|
|
Value *V = expand(S->getOperand());
|
2008-10-13 10:21:17 +00:00
|
|
|
return CastInst::CreateZExtOrBitCast(V, S->getType(), "tmp.", InsertPt);
|
2008-06-22 19:09:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Value *SCEVExpander::visitSignExtendExpr(SCEVSignExtendExpr *S) {
|
|
|
|
Value *V = expand(S->getOperand());
|
2008-10-13 10:21:17 +00:00
|
|
|
return CastInst::CreateSExtOrBitCast(V, S->getType(), "tmp.", InsertPt);
|
2008-06-22 19:09:18 +00:00
|
|
|
}
|
|
|
|
|
2007-11-25 22:41:31 +00:00
|
|
|
Value *SCEVExpander::visitSMaxExpr(SCEVSMaxExpr *S) {
|
|
|
|
Value *LHS = expand(S->getOperand(0));
|
|
|
|
for (unsigned i = 1; i < S->getNumOperands(); ++i) {
|
|
|
|
Value *RHS = expand(S->getOperand(i));
|
|
|
|
Value *ICmp = new ICmpInst(ICmpInst::ICMP_SGT, LHS, RHS, "tmp", InsertPt);
|
2008-04-06 20:25:17 +00:00
|
|
|
LHS = SelectInst::Create(ICmp, LHS, RHS, "smax", InsertPt);
|
2007-11-25 22:41:31 +00:00
|
|
|
}
|
|
|
|
return LHS;
|
|
|
|
}
|
|
|
|
|
2008-02-20 06:48:22 +00:00
|
|
|
Value *SCEVExpander::visitUMaxExpr(SCEVUMaxExpr *S) {
|
|
|
|
Value *LHS = expand(S->getOperand(0));
|
|
|
|
for (unsigned i = 1; i < S->getNumOperands(); ++i) {
|
|
|
|
Value *RHS = expand(S->getOperand(i));
|
|
|
|
Value *ICmp = new ICmpInst(ICmpInst::ICMP_UGT, LHS, RHS, "tmp", InsertPt);
|
2008-04-06 20:25:17 +00:00
|
|
|
LHS = SelectInst::Create(ICmp, LHS, RHS, "umax", InsertPt);
|
2008-02-20 06:48:22 +00:00
|
|
|
}
|
|
|
|
return LHS;
|
|
|
|
}
|
|
|
|
|
2008-06-22 19:09:18 +00:00
|
|
|
Value *SCEVExpander::expandCodeFor(SCEVHandle SH, Instruction *IP) {
|
|
|
|
// Expand the code for this SCEV.
|
|
|
|
this->InsertPt = IP;
|
|
|
|
return expand(SH);
|
|
|
|
}
|
|
|
|
|
2007-08-20 21:17:26 +00:00
|
|
|
Value *SCEVExpander::expand(SCEV *S) {
|
|
|
|
// Check to see if we already expanded this.
|
|
|
|
std::map<SCEVHandle, Value*>::iterator I = InsertedExpressions.find(S);
|
|
|
|
if (I != InsertedExpressions.end())
|
|
|
|
return I->second;
|
|
|
|
|
|
|
|
Value *V = visit(S);
|
|
|
|
InsertedExpressions[S] = V;
|
|
|
|
return V;
|
|
|
|
}
|