2004-02-23 23:08:11 +00:00
|
|
|
//===-- llvm/CodeGen/VirtRegMap.h - Virtual Register Map -*- C++ -*--------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
|
|
|
// This file was developed by the LLVM research group and is distributed under
|
|
|
|
// the University of Illinois Open Source License. See LICENSE.TXT for details.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2004-09-30 01:54:45 +00:00
|
|
|
// This file implements a virtual register map. This maps virtual registers to
|
|
|
|
// physical registers and virtual registers to stack slots. It is created and
|
|
|
|
// updated by a register allocator and then used by a machine code rewriter that
|
|
|
|
// adds spill code and rewrites virtual into physical register references.
|
2004-02-23 23:08:11 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef LLVM_CODEGEN_VIRTREGMAP_H
|
|
|
|
#define LLVM_CODEGEN_VIRTREGMAP_H
|
|
|
|
|
2004-09-30 01:54:45 +00:00
|
|
|
#include "llvm/Target/MRegisterInfo.h"
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2007-02-01 05:32:05 +00:00
|
|
|
#include "llvm/ADT/IndexedMap.h"
|
2006-12-07 01:30:32 +00:00
|
|
|
#include "llvm/Support/Streams.h"
|
2004-03-01 20:05:10 +00:00
|
|
|
#include <map>
|
2004-02-23 23:08:11 +00:00
|
|
|
|
|
|
|
namespace llvm {
|
2004-09-30 01:54:45 +00:00
|
|
|
class MachineInstr;
|
2007-08-07 16:34:05 +00:00
|
|
|
class MachineFunction;
|
2006-09-05 02:12:02 +00:00
|
|
|
class TargetInstrInfo;
|
2004-09-30 01:54:45 +00:00
|
|
|
|
|
|
|
class VirtRegMap {
|
|
|
|
public:
|
2007-03-20 08:13:50 +00:00
|
|
|
enum {
|
|
|
|
NO_PHYS_REG = 0,
|
2007-04-04 07:40:01 +00:00
|
|
|
NO_STACK_SLOT = (1L << 30)-1,
|
|
|
|
MAX_STACK_SLOT = (1L << 18)-1
|
2007-03-20 08:13:50 +00:00
|
|
|
};
|
|
|
|
|
2006-05-01 21:16:03 +00:00
|
|
|
enum ModRef { isRef = 1, isMod = 2, isModRef = 3 };
|
2004-10-01 23:15:36 +00:00
|
|
|
typedef std::multimap<MachineInstr*,
|
|
|
|
std::pair<unsigned, ModRef> > MI2VirtMapTy;
|
2004-09-30 01:54:45 +00:00
|
|
|
|
|
|
|
private:
|
2006-09-05 02:12:02 +00:00
|
|
|
const TargetInstrInfo &TII;
|
|
|
|
|
2004-09-30 02:15:18 +00:00
|
|
|
MachineFunction &MF;
|
2004-10-01 00:35:07 +00:00
|
|
|
/// Virt2PhysMap - This is a virtual to physical register
|
|
|
|
/// mapping. Each virtual register is required to have an entry in
|
|
|
|
/// it; even spilled virtual registers (the register mapped to a
|
|
|
|
/// spilled register is the temporary used to load it from the
|
|
|
|
/// stack).
|
2007-02-01 05:32:05 +00:00
|
|
|
IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2PhysMap;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// Virt2StackSlotMap - This is virtual register to stack slot
|
|
|
|
/// mapping. Each spilled virtual register has an entry in it
|
|
|
|
/// which corresponds to the stack slot this register is spilled
|
|
|
|
/// at.
|
2007-02-01 05:32:05 +00:00
|
|
|
IndexedMap<int, VirtReg2IndexFunctor> Virt2StackSlotMap;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
|
|
|
|
/// Virt2StackSlotMap - This is virtual register to rematerialization id
|
|
|
|
/// mapping. Each spilled virtual register that should be remat'd has an
|
|
|
|
/// entry in it which corresponds to the remat id.
|
2007-08-13 23:45:17 +00:00
|
|
|
IndexedMap<int, VirtReg2IndexFunctor> Virt2ReMatIdMap;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
|
|
|
|
/// Virt2SplitMap - This is virtual register to splitted virtual register
|
|
|
|
/// mapping.
|
|
|
|
IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2SplitMap;
|
|
|
|
|
2007-12-05 09:51:10 +00:00
|
|
|
/// Virt2SplitKillMap - This is splitted virtual register to its last use
|
2007-12-05 10:24:35 +00:00
|
|
|
/// (kill) index mapping.
|
|
|
|
IndexedMap<unsigned> Virt2SplitKillMap;
|
2007-12-05 09:51:10 +00:00
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
/// ReMatMap - This is virtual register to re-materialized instruction
|
|
|
|
/// mapping. Each virtual register whose definition is going to be
|
|
|
|
/// re-materialized has an entry in it.
|
|
|
|
IndexedMap<MachineInstr*, VirtReg2IndexFunctor> ReMatMap;
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// MI2VirtMap - This is MachineInstr to virtual register
|
|
|
|
/// mapping. In the case of memory spill code being folded into
|
|
|
|
/// instructions, we need to know which virtual register was
|
|
|
|
/// read/written by this instruction.
|
2004-09-30 02:15:18 +00:00
|
|
|
MI2VirtMapTy MI2VirtMap;
|
2005-04-21 22:36:52 +00:00
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
/// SpillPt2VirtMap - This records the virtual registers which should
|
|
|
|
/// be spilled right after the MachineInstr due to live interval
|
|
|
|
/// splitting.
|
2007-12-05 08:16:32 +00:00
|
|
|
std::map<MachineInstr*, std::vector<std::pair<unsigned,bool> > >
|
|
|
|
SpillPt2VirtMap;
|
2007-03-20 08:13:50 +00:00
|
|
|
|
2007-11-29 01:06:25 +00:00
|
|
|
/// RestorePt2VirtMap - This records the virtual registers which should
|
|
|
|
/// be restored right before the MachineInstr due to live interval
|
|
|
|
/// splitting.
|
|
|
|
std::map<MachineInstr*, std::vector<unsigned> > RestorePt2VirtMap;
|
|
|
|
|
2007-03-20 08:13:50 +00:00
|
|
|
/// ReMatId - Instead of assigning a stack slot to a to be rematerialized
|
2007-04-04 07:40:01 +00:00
|
|
|
/// virtual register, an unique id is being assigned. This keeps track of
|
2007-03-20 08:13:50 +00:00
|
|
|
/// the highest id used so far. Note, this starts at (1<<18) to avoid
|
|
|
|
/// conflicts with stack slot numbers.
|
|
|
|
int ReMatId;
|
|
|
|
|
2004-09-30 01:54:45 +00:00
|
|
|
VirtRegMap(const VirtRegMap&); // DO NOT IMPLEMENT
|
|
|
|
void operator=(const VirtRegMap&); // DO NOT IMPLEMENT
|
|
|
|
|
|
|
|
public:
|
2007-08-02 21:21:54 +00:00
|
|
|
explicit VirtRegMap(MachineFunction &mf);
|
2004-09-30 01:54:45 +00:00
|
|
|
|
|
|
|
void grow();
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief returns true if the specified virtual register is
|
|
|
|
/// mapped to a physical register
|
2004-09-30 01:54:45 +00:00
|
|
|
bool hasPhys(unsigned virtReg) const {
|
|
|
|
return getPhys(virtReg) != NO_PHYS_REG;
|
|
|
|
}
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief returns the physical register mapped to the specified
|
|
|
|
/// virtual register
|
2004-09-30 01:54:45 +00:00
|
|
|
unsigned getPhys(unsigned virtReg) const {
|
|
|
|
assert(MRegisterInfo::isVirtualRegister(virtReg));
|
2004-09-30 02:15:18 +00:00
|
|
|
return Virt2PhysMap[virtReg];
|
2004-09-30 01:54:45 +00:00
|
|
|
}
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief creates a mapping for the specified virtual register to
|
|
|
|
/// the specified physical register
|
2004-09-30 01:54:45 +00:00
|
|
|
void assignVirt2Phys(unsigned virtReg, unsigned physReg) {
|
|
|
|
assert(MRegisterInfo::isVirtualRegister(virtReg) &&
|
|
|
|
MRegisterInfo::isPhysicalRegister(physReg));
|
2004-09-30 02:15:18 +00:00
|
|
|
assert(Virt2PhysMap[virtReg] == NO_PHYS_REG &&
|
2004-09-30 01:54:45 +00:00
|
|
|
"attempt to assign physical register to already mapped "
|
|
|
|
"virtual register");
|
2004-09-30 02:15:18 +00:00
|
|
|
Virt2PhysMap[virtReg] = physReg;
|
2004-09-30 01:54:45 +00:00
|
|
|
}
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief clears the specified virtual register's, physical
|
|
|
|
/// register mapping
|
2004-09-30 01:54:45 +00:00
|
|
|
void clearVirt(unsigned virtReg) {
|
|
|
|
assert(MRegisterInfo::isVirtualRegister(virtReg));
|
2004-09-30 02:15:18 +00:00
|
|
|
assert(Virt2PhysMap[virtReg] != NO_PHYS_REG &&
|
2004-09-30 01:54:45 +00:00
|
|
|
"attempt to clear a not assigned virtual register");
|
2004-09-30 02:15:18 +00:00
|
|
|
Virt2PhysMap[virtReg] = NO_PHYS_REG;
|
2004-09-30 01:54:45 +00:00
|
|
|
}
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief clears all virtual to physical register mappings
|
2004-09-30 01:54:45 +00:00
|
|
|
void clearAllVirt() {
|
2004-09-30 02:15:18 +00:00
|
|
|
Virt2PhysMap.clear();
|
2004-09-30 01:54:45 +00:00
|
|
|
grow();
|
|
|
|
}
|
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
/// @brief records virtReg is a split live interval from SReg.
|
|
|
|
void setIsSplitFromReg(unsigned virtReg, unsigned SReg) {
|
|
|
|
Virt2SplitMap[virtReg] = SReg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief returns the live interval virtReg is split from.
|
|
|
|
unsigned getPreSplitReg(unsigned virtReg) {
|
|
|
|
return Virt2SplitMap[virtReg];
|
|
|
|
}
|
|
|
|
|
2007-08-13 23:45:17 +00:00
|
|
|
/// @brief returns true is the specified virtual register is not
|
|
|
|
/// mapped to a stack slot or rematerialized.
|
|
|
|
bool isAssignedReg(unsigned virtReg) const {
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
if (getStackSlot(virtReg) == NO_STACK_SLOT &&
|
|
|
|
getReMatId(virtReg) == NO_STACK_SLOT)
|
|
|
|
return true;
|
|
|
|
// Split register can be assigned a physical register as well as a
|
|
|
|
// stack slot or remat id.
|
|
|
|
return (Virt2SplitMap[virtReg] && Virt2PhysMap[virtReg] != NO_PHYS_REG);
|
2004-09-30 01:54:45 +00:00
|
|
|
}
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief returns the stack slot mapped to the specified virtual
|
|
|
|
/// register
|
2004-09-30 01:54:45 +00:00
|
|
|
int getStackSlot(unsigned virtReg) const {
|
|
|
|
assert(MRegisterInfo::isVirtualRegister(virtReg));
|
2004-09-30 02:15:18 +00:00
|
|
|
return Virt2StackSlotMap[virtReg];
|
2004-09-30 01:54:45 +00:00
|
|
|
}
|
|
|
|
|
2007-08-13 23:45:17 +00:00
|
|
|
/// @brief returns the rematerialization id mapped to the specified virtual
|
|
|
|
/// register
|
|
|
|
int getReMatId(unsigned virtReg) const {
|
|
|
|
assert(MRegisterInfo::isVirtualRegister(virtReg));
|
|
|
|
return Virt2ReMatIdMap[virtReg];
|
|
|
|
}
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief create a mapping for the specifed virtual register to
|
|
|
|
/// the next available stack slot
|
2004-09-30 01:54:45 +00:00
|
|
|
int assignVirt2StackSlot(unsigned virtReg);
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief create a mapping for the specified virtual register to
|
|
|
|
/// the specified stack slot
|
2004-09-30 01:54:45 +00:00
|
|
|
void assignVirt2StackSlot(unsigned virtReg, int frameIndex);
|
|
|
|
|
2007-03-20 08:13:50 +00:00
|
|
|
/// @brief assign an unique re-materialization id to the specified
|
|
|
|
/// virtual register.
|
|
|
|
int assignVirtReMatId(unsigned virtReg);
|
2007-08-13 23:45:17 +00:00
|
|
|
/// @brief assign an unique re-materialization id to the specified
|
|
|
|
/// virtual register.
|
|
|
|
void assignVirtReMatId(unsigned virtReg, int id);
|
2007-03-20 08:13:50 +00:00
|
|
|
|
|
|
|
/// @brief returns true if the specified virtual register is being
|
|
|
|
/// re-materialized.
|
|
|
|
bool isReMaterialized(unsigned virtReg) const {
|
2007-08-13 23:45:17 +00:00
|
|
|
return ReMatMap[virtReg] != NULL;
|
2007-03-20 08:13:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief returns the original machine instruction being re-issued
|
|
|
|
/// to re-materialize the specified virtual register.
|
2007-08-13 23:45:17 +00:00
|
|
|
MachineInstr *getReMaterializedMI(unsigned virtReg) const {
|
2007-03-20 08:13:50 +00:00
|
|
|
return ReMatMap[virtReg];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief records the specified virtual register will be
|
|
|
|
/// re-materialized and the original instruction which will be re-issed
|
2007-08-13 23:45:17 +00:00
|
|
|
/// for this purpose. If parameter all is true, then all uses of the
|
|
|
|
/// registers are rematerialized and it's safe to delete the definition.
|
2007-03-20 08:13:50 +00:00
|
|
|
void setVirtIsReMaterialized(unsigned virtReg, MachineInstr *def) {
|
|
|
|
ReMatMap[virtReg] = def;
|
|
|
|
}
|
|
|
|
|
2007-12-05 09:51:10 +00:00
|
|
|
/// @brief record the last use (kill) of a split virtual register.
|
2007-12-05 10:24:35 +00:00
|
|
|
void addKillPoint(unsigned virtReg, unsigned index) {
|
|
|
|
Virt2SplitKillMap[virtReg] = index;
|
2007-12-05 09:51:10 +00:00
|
|
|
}
|
|
|
|
|
2007-12-05 10:24:35 +00:00
|
|
|
unsigned getKillPoint(unsigned virtReg) const {
|
|
|
|
return Virt2SplitKillMap[virtReg];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief remove the last use (kill) of a split virtual register.
|
2007-12-05 09:51:10 +00:00
|
|
|
void removeKillPoint(unsigned virtReg) {
|
2007-12-05 10:24:35 +00:00
|
|
|
Virt2SplitKillMap[virtReg] = 0;
|
2007-12-05 09:51:10 +00:00
|
|
|
}
|
|
|
|
|
2007-11-28 01:28:46 +00:00
|
|
|
/// @brief returns true if the specified MachineInstr is a spill point.
|
|
|
|
bool isSpillPt(MachineInstr *Pt) const {
|
|
|
|
return SpillPt2VirtMap.find(Pt) != SpillPt2VirtMap.end();
|
|
|
|
}
|
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
/// @brief returns the virtual registers that should be spilled due to
|
|
|
|
/// splitting right after the specified MachineInstr.
|
2007-12-05 08:16:32 +00:00
|
|
|
std::vector<std::pair<unsigned,bool> > &getSpillPtSpills(MachineInstr *Pt) {
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
return SpillPt2VirtMap[Pt];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief records the specified MachineInstr as a spill point for virtReg.
|
2007-12-05 08:16:32 +00:00
|
|
|
void addSpillPoint(unsigned virtReg, bool isKill, MachineInstr *Pt) {
|
2007-11-28 01:28:46 +00:00
|
|
|
if (SpillPt2VirtMap.find(Pt) != SpillPt2VirtMap.end())
|
2007-12-05 08:16:32 +00:00
|
|
|
SpillPt2VirtMap[Pt].push_back(std::make_pair(virtReg, isKill));
|
2007-11-28 01:28:46 +00:00
|
|
|
else {
|
2007-12-05 08:16:32 +00:00
|
|
|
std::vector<std::pair<unsigned,bool> > Virts;
|
|
|
|
Virts.push_back(std::make_pair(virtReg, isKill));
|
2007-11-28 01:28:46 +00:00
|
|
|
SpillPt2VirtMap.insert(std::make_pair(Pt, Virts));
|
|
|
|
}
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void transferSpillPts(MachineInstr *Old, MachineInstr *New) {
|
2007-12-05 08:16:32 +00:00
|
|
|
std::map<MachineInstr*,std::vector<std::pair<unsigned,bool> > >::iterator
|
|
|
|
I = SpillPt2VirtMap.find(Old);
|
2007-11-28 01:28:46 +00:00
|
|
|
if (I == SpillPt2VirtMap.end())
|
|
|
|
return;
|
|
|
|
while (!I->second.empty()) {
|
2007-12-05 08:16:32 +00:00
|
|
|
unsigned virtReg = I->second.back().first;
|
|
|
|
bool isKill = I->second.back().second;
|
2007-11-28 01:28:46 +00:00
|
|
|
I->second.pop_back();
|
2007-12-05 08:16:32 +00:00
|
|
|
addSpillPoint(virtReg, isKill, New);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
}
|
2007-11-28 01:28:46 +00:00
|
|
|
SpillPt2VirtMap.erase(I);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
}
|
|
|
|
|
2007-11-29 01:06:25 +00:00
|
|
|
/// @brief returns true if the specified MachineInstr is a restore point.
|
|
|
|
bool isRestorePt(MachineInstr *Pt) const {
|
|
|
|
return RestorePt2VirtMap.find(Pt) != RestorePt2VirtMap.end();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief returns the virtual registers that should be restoreed due to
|
|
|
|
/// splitting right after the specified MachineInstr.
|
|
|
|
std::vector<unsigned> &getRestorePtRestores(MachineInstr *Pt) {
|
|
|
|
return RestorePt2VirtMap[Pt];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief records the specified MachineInstr as a restore point for virtReg.
|
|
|
|
void addRestorePoint(unsigned virtReg, MachineInstr *Pt) {
|
|
|
|
if (RestorePt2VirtMap.find(Pt) != RestorePt2VirtMap.end())
|
|
|
|
RestorePt2VirtMap[Pt].push_back(virtReg);
|
|
|
|
else {
|
|
|
|
std::vector<unsigned> Virts;
|
|
|
|
Virts.push_back(virtReg);
|
|
|
|
RestorePt2VirtMap.insert(std::make_pair(Pt, Virts));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void transferRestorePts(MachineInstr *Old, MachineInstr *New) {
|
|
|
|
std::map<MachineInstr*,std::vector<unsigned> >::iterator I =
|
|
|
|
RestorePt2VirtMap.find(Old);
|
|
|
|
if (I == RestorePt2VirtMap.end())
|
|
|
|
return;
|
|
|
|
while (!I->second.empty()) {
|
|
|
|
unsigned virtReg = I->second.back();
|
|
|
|
I->second.pop_back();
|
|
|
|
addRestorePoint(virtReg, New);
|
|
|
|
}
|
|
|
|
RestorePt2VirtMap.erase(I);
|
|
|
|
}
|
|
|
|
|
2004-10-01 23:15:36 +00:00
|
|
|
/// @brief Updates information about the specified virtual register's value
|
2007-12-02 08:30:39 +00:00
|
|
|
/// folded into newMI machine instruction.
|
|
|
|
void virtFolded(unsigned VirtReg, MachineInstr *OldMI, MachineInstr *NewMI,
|
|
|
|
ModRef MRInfo);
|
2004-09-30 01:54:45 +00:00
|
|
|
|
2007-10-13 02:50:24 +00:00
|
|
|
/// @brief Updates information about the specified virtual register's value
|
|
|
|
/// folded into the specified machine instruction.
|
|
|
|
void virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo);
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief returns the virtual registers' values folded in memory
|
|
|
|
/// operands of this instruction
|
2004-09-30 02:15:18 +00:00
|
|
|
std::pair<MI2VirtMapTy::const_iterator, MI2VirtMapTy::const_iterator>
|
2004-09-30 01:54:45 +00:00
|
|
|
getFoldedVirts(MachineInstr* MI) const {
|
2004-09-30 02:15:18 +00:00
|
|
|
return MI2VirtMap.equal_range(MI);
|
2004-09-30 01:54:45 +00:00
|
|
|
}
|
2006-05-01 21:16:03 +00:00
|
|
|
|
2007-11-28 01:28:46 +00:00
|
|
|
/// RemoveMachineInstrFromMaps - MI is being erased, remove it from the
|
|
|
|
/// the folded instruction map and spill point map.
|
|
|
|
void RemoveMachineInstrFromMaps(MachineInstr *MI) {
|
2006-05-01 22:03:24 +00:00
|
|
|
MI2VirtMap.erase(MI);
|
2007-11-28 01:28:46 +00:00
|
|
|
SpillPt2VirtMap.erase(MI);
|
2007-11-29 01:06:25 +00:00
|
|
|
RestorePt2VirtMap.erase(MI);
|
2006-05-01 21:16:03 +00:00
|
|
|
}
|
2004-09-30 01:54:45 +00:00
|
|
|
|
|
|
|
void print(std::ostream &OS) const;
|
2006-12-17 05:15:13 +00:00
|
|
|
void print(std::ostream *OS) const { if (OS) print(*OS); }
|
2004-09-30 01:54:45 +00:00
|
|
|
void dump() const;
|
|
|
|
};
|
|
|
|
|
2006-12-17 05:15:13 +00:00
|
|
|
inline std::ostream *operator<<(std::ostream *OS, const VirtRegMap &VRM) {
|
|
|
|
VRM.print(OS);
|
|
|
|
return OS;
|
|
|
|
}
|
2004-09-30 01:54:45 +00:00
|
|
|
inline std::ostream &operator<<(std::ostream &OS, const VirtRegMap &VRM) {
|
|
|
|
VRM.print(OS);
|
|
|
|
return OS;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Spiller interface: Implementations of this interface assign spilled
|
|
|
|
/// virtual registers to stack slots, rewriting the code.
|
|
|
|
struct Spiller {
|
|
|
|
virtual ~Spiller();
|
|
|
|
virtual bool runOnMachineFunction(MachineFunction &MF,
|
2006-05-01 21:16:03 +00:00
|
|
|
VirtRegMap &VRM) = 0;
|
2004-09-30 01:54:45 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/// createSpiller - Create an return a spiller object, as specified on the
|
|
|
|
/// command line.
|
|
|
|
Spiller* createSpiller();
|
2004-02-24 08:58:30 +00:00
|
|
|
|
2004-02-23 23:08:11 +00:00
|
|
|
} // End llvm namespace
|
|
|
|
|
|
|
|
#endif
|