2004-02-23 23:08:11 +00:00
|
|
|
//===-- llvm/CodeGen/VirtRegMap.h - Virtual Register Map -*- C++ -*--------===//
|
|
|
|
//
|
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-29 20:36:04 +00:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2004-02-23 23:08:11 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
2004-09-30 01:54:45 +00:00
|
|
|
// This file implements a virtual register map. This maps virtual registers to
|
|
|
|
// physical registers and virtual registers to stack slots. It is created and
|
|
|
|
// updated by a register allocator and then used by a machine code rewriter that
|
|
|
|
// adds spill code and rewrites virtual into physical register references.
|
2004-02-23 23:08:11 +00:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#ifndef LLVM_CODEGEN_VIRTREGMAP_H
|
|
|
|
#define LLVM_CODEGEN_VIRTREGMAP_H
|
|
|
|
|
2009-03-13 05:55:11 +00:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2009-09-04 20:41:11 +00:00
|
|
|
#include "llvm/CodeGen/LiveInterval.h"
|
2008-02-10 18:45:23 +00:00
|
|
|
#include "llvm/Target/TargetRegisterInfo.h"
|
2008-04-11 17:53:36 +00:00
|
|
|
#include "llvm/ADT/BitVector.h"
|
2009-05-03 18:32:42 +00:00
|
|
|
#include "llvm/ADT/DenseMap.h"
|
2007-02-01 05:32:05 +00:00
|
|
|
#include "llvm/ADT/IndexedMap.h"
|
2008-02-27 03:04:06 +00:00
|
|
|
#include "llvm/ADT/SmallPtrSet.h"
|
2009-01-05 17:59:02 +00:00
|
|
|
#include "llvm/ADT/SmallVector.h"
|
2004-03-01 20:05:10 +00:00
|
|
|
#include <map>
|
2004-02-23 23:08:11 +00:00
|
|
|
|
|
|
|
namespace llvm {
|
2009-05-03 18:32:42 +00:00
|
|
|
class LiveIntervals;
|
2004-09-30 01:54:45 +00:00
|
|
|
class MachineInstr;
|
2007-08-07 16:34:05 +00:00
|
|
|
class MachineFunction;
|
2009-06-14 20:22:55 +00:00
|
|
|
class MachineRegisterInfo;
|
2006-09-05 02:12:02 +00:00
|
|
|
class TargetInstrInfo;
|
2009-05-04 03:30:11 +00:00
|
|
|
class TargetRegisterInfo;
|
2009-07-24 10:36:58 +00:00
|
|
|
class raw_ostream;
|
2004-09-30 01:54:45 +00:00
|
|
|
|
2009-03-13 05:55:11 +00:00
|
|
|
class VirtRegMap : public MachineFunctionPass {
|
2004-09-30 01:54:45 +00:00
|
|
|
public:
|
2007-03-20 08:13:50 +00:00
|
|
|
enum {
|
|
|
|
NO_PHYS_REG = 0,
|
2007-04-04 07:40:01 +00:00
|
|
|
NO_STACK_SLOT = (1L << 30)-1,
|
|
|
|
MAX_STACK_SLOT = (1L << 18)-1
|
2007-03-20 08:13:50 +00:00
|
|
|
};
|
|
|
|
|
2006-05-01 21:16:03 +00:00
|
|
|
enum ModRef { isRef = 1, isMod = 2, isModRef = 3 };
|
2004-10-01 23:15:36 +00:00
|
|
|
typedef std::multimap<MachineInstr*,
|
|
|
|
std::pair<unsigned, ModRef> > MI2VirtMapTy;
|
2004-09-30 01:54:45 +00:00
|
|
|
|
|
|
|
private:
|
2009-06-14 20:22:55 +00:00
|
|
|
MachineRegisterInfo *MRI;
|
2009-03-13 05:55:11 +00:00
|
|
|
const TargetInstrInfo *TII;
|
2009-05-04 03:30:11 +00:00
|
|
|
const TargetRegisterInfo *TRI;
|
2009-03-13 05:55:11 +00:00
|
|
|
MachineFunction *MF;
|
2009-05-04 03:30:11 +00:00
|
|
|
|
|
|
|
DenseMap<const TargetRegisterClass*, BitVector> allocatableRCRegs;
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// Virt2PhysMap - This is a virtual to physical register
|
|
|
|
/// mapping. Each virtual register is required to have an entry in
|
|
|
|
/// it; even spilled virtual registers (the register mapped to a
|
|
|
|
/// spilled register is the temporary used to load it from the
|
|
|
|
/// stack).
|
2007-02-01 05:32:05 +00:00
|
|
|
IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2PhysMap;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// Virt2StackSlotMap - This is virtual register to stack slot
|
|
|
|
/// mapping. Each spilled virtual register has an entry in it
|
|
|
|
/// which corresponds to the stack slot this register is spilled
|
|
|
|
/// at.
|
2007-02-01 05:32:05 +00:00
|
|
|
IndexedMap<int, VirtReg2IndexFunctor> Virt2StackSlotMap;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
|
2008-03-12 20:50:04 +00:00
|
|
|
/// Virt2ReMatIdMap - This is virtual register to rematerialization id
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
/// mapping. Each spilled virtual register that should be remat'd has an
|
|
|
|
/// entry in it which corresponds to the remat id.
|
2007-08-13 23:45:17 +00:00
|
|
|
IndexedMap<int, VirtReg2IndexFunctor> Virt2ReMatIdMap;
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
|
|
|
|
/// Virt2SplitMap - This is virtual register to splitted virtual register
|
|
|
|
/// mapping.
|
|
|
|
IndexedMap<unsigned, VirtReg2IndexFunctor> Virt2SplitMap;
|
|
|
|
|
2007-12-05 09:51:10 +00:00
|
|
|
/// Virt2SplitKillMap - This is splitted virtual register to its last use
|
2007-12-05 10:24:35 +00:00
|
|
|
/// (kill) index mapping.
|
2009-11-03 23:52:08 +00:00
|
|
|
IndexedMap<SlotIndex> Virt2SplitKillMap;
|
2007-12-05 09:51:10 +00:00
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
/// ReMatMap - This is virtual register to re-materialized instruction
|
|
|
|
/// mapping. Each virtual register whose definition is going to be
|
|
|
|
/// re-materialized has an entry in it.
|
|
|
|
IndexedMap<MachineInstr*, VirtReg2IndexFunctor> ReMatMap;
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// MI2VirtMap - This is MachineInstr to virtual register
|
|
|
|
/// mapping. In the case of memory spill code being folded into
|
|
|
|
/// instructions, we need to know which virtual register was
|
|
|
|
/// read/written by this instruction.
|
2004-09-30 02:15:18 +00:00
|
|
|
MI2VirtMapTy MI2VirtMap;
|
2005-04-21 22:36:52 +00:00
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
/// SpillPt2VirtMap - This records the virtual registers which should
|
|
|
|
/// be spilled right after the MachineInstr due to live interval
|
|
|
|
/// splitting.
|
2007-12-05 08:16:32 +00:00
|
|
|
std::map<MachineInstr*, std::vector<std::pair<unsigned,bool> > >
|
|
|
|
SpillPt2VirtMap;
|
2007-03-20 08:13:50 +00:00
|
|
|
|
2007-11-29 01:06:25 +00:00
|
|
|
/// RestorePt2VirtMap - This records the virtual registers which should
|
|
|
|
/// be restored right before the MachineInstr due to live interval
|
|
|
|
/// splitting.
|
|
|
|
std::map<MachineInstr*, std::vector<unsigned> > RestorePt2VirtMap;
|
|
|
|
|
2008-03-11 07:19:34 +00:00
|
|
|
/// EmergencySpillMap - This records the physical registers that should
|
|
|
|
/// be spilled / restored around the MachineInstr since the register
|
|
|
|
/// allocator has run out of registers.
|
|
|
|
std::map<MachineInstr*, std::vector<unsigned> > EmergencySpillMap;
|
|
|
|
|
|
|
|
/// EmergencySpillSlots - This records emergency spill slots used to
|
|
|
|
/// spill physical registers when the register allocator runs out of
|
|
|
|
/// registers. Ideally only one stack slot is used per function per
|
|
|
|
/// register class.
|
|
|
|
std::map<const TargetRegisterClass*, int> EmergencySpillSlots;
|
|
|
|
|
2007-03-20 08:13:50 +00:00
|
|
|
/// ReMatId - Instead of assigning a stack slot to a to be rematerialized
|
2007-04-04 07:40:01 +00:00
|
|
|
/// virtual register, an unique id is being assigned. This keeps track of
|
2007-03-20 08:13:50 +00:00
|
|
|
/// the highest id used so far. Note, this starts at (1<<18) to avoid
|
|
|
|
/// conflicts with stack slot numbers.
|
|
|
|
int ReMatId;
|
|
|
|
|
2008-02-27 03:04:06 +00:00
|
|
|
/// LowSpillSlot, HighSpillSlot - Lowest and highest spill slot indexes.
|
|
|
|
int LowSpillSlot, HighSpillSlot;
|
|
|
|
|
|
|
|
/// SpillSlotToUsesMap - Records uses for each register spill slot.
|
|
|
|
SmallVector<SmallPtrSet<MachineInstr*, 4>, 8> SpillSlotToUsesMap;
|
|
|
|
|
2008-04-11 17:53:36 +00:00
|
|
|
/// ImplicitDefed - One bit for each virtual register. If set it indicates
|
|
|
|
/// the register is implicitly defined.
|
|
|
|
BitVector ImplicitDefed;
|
|
|
|
|
2009-05-03 18:32:42 +00:00
|
|
|
/// UnusedRegs - A list of physical registers that have not been used.
|
|
|
|
BitVector UnusedRegs;
|
|
|
|
|
2010-11-16 00:41:01 +00:00
|
|
|
/// createSpillSlot - Allocate a spill slot for RC from MFI.
|
|
|
|
unsigned createSpillSlot(const TargetRegisterClass *RC);
|
|
|
|
|
2004-09-30 01:54:45 +00:00
|
|
|
VirtRegMap(const VirtRegMap&); // DO NOT IMPLEMENT
|
|
|
|
void operator=(const VirtRegMap&); // DO NOT IMPLEMENT
|
|
|
|
|
|
|
|
public:
|
2009-03-13 05:55:11 +00:00
|
|
|
static char ID;
|
2010-08-06 18:33:48 +00:00
|
|
|
VirtRegMap() : MachineFunctionPass(ID), Virt2PhysMap(NO_PHYS_REG),
|
2009-03-13 05:55:11 +00:00
|
|
|
Virt2StackSlotMap(NO_STACK_SLOT),
|
|
|
|
Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0),
|
2009-11-03 23:52:08 +00:00
|
|
|
Virt2SplitKillMap(SlotIndex()), ReMatMap(NULL),
|
2009-03-13 05:55:11 +00:00
|
|
|
ReMatId(MAX_STACK_SLOT+1),
|
|
|
|
LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) { }
|
|
|
|
virtual bool runOnMachineFunction(MachineFunction &MF);
|
|
|
|
|
|
|
|
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
AU.setPreservesAll();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
2004-09-30 01:54:45 +00:00
|
|
|
|
2010-07-26 23:44:11 +00:00
|
|
|
MachineFunction &getMachineFunction() const {
|
|
|
|
assert(MF && "getMachineFunction called before runOnMAchineFunction");
|
|
|
|
return *MF;
|
|
|
|
}
|
|
|
|
|
2004-09-30 01:54:45 +00:00
|
|
|
void grow();
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief returns true if the specified virtual register is
|
|
|
|
/// mapped to a physical register
|
2004-09-30 01:54:45 +00:00
|
|
|
bool hasPhys(unsigned virtReg) const {
|
|
|
|
return getPhys(virtReg) != NO_PHYS_REG;
|
|
|
|
}
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief returns the physical register mapped to the specified
|
|
|
|
/// virtual register
|
2004-09-30 01:54:45 +00:00
|
|
|
unsigned getPhys(unsigned virtReg) const {
|
2008-02-10 18:45:23 +00:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
|
2004-09-30 02:15:18 +00:00
|
|
|
return Virt2PhysMap[virtReg];
|
2004-09-30 01:54:45 +00:00
|
|
|
}
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief creates a mapping for the specified virtual register to
|
|
|
|
/// the specified physical register
|
2004-09-30 01:54:45 +00:00
|
|
|
void assignVirt2Phys(unsigned virtReg, unsigned physReg) {
|
2008-02-10 18:45:23 +00:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(virtReg) &&
|
|
|
|
TargetRegisterInfo::isPhysicalRegister(physReg));
|
2004-09-30 02:15:18 +00:00
|
|
|
assert(Virt2PhysMap[virtReg] == NO_PHYS_REG &&
|
2004-09-30 01:54:45 +00:00
|
|
|
"attempt to assign physical register to already mapped "
|
|
|
|
"virtual register");
|
2004-09-30 02:15:18 +00:00
|
|
|
Virt2PhysMap[virtReg] = physReg;
|
2004-09-30 01:54:45 +00:00
|
|
|
}
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief clears the specified virtual register's, physical
|
|
|
|
/// register mapping
|
2004-09-30 01:54:45 +00:00
|
|
|
void clearVirt(unsigned virtReg) {
|
2008-02-10 18:45:23 +00:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
|
2004-09-30 02:15:18 +00:00
|
|
|
assert(Virt2PhysMap[virtReg] != NO_PHYS_REG &&
|
2004-09-30 01:54:45 +00:00
|
|
|
"attempt to clear a not assigned virtual register");
|
2004-09-30 02:15:18 +00:00
|
|
|
Virt2PhysMap[virtReg] = NO_PHYS_REG;
|
2004-09-30 01:54:45 +00:00
|
|
|
}
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief clears all virtual to physical register mappings
|
2004-09-30 01:54:45 +00:00
|
|
|
void clearAllVirt() {
|
2004-09-30 02:15:18 +00:00
|
|
|
Virt2PhysMap.clear();
|
2004-09-30 01:54:45 +00:00
|
|
|
grow();
|
|
|
|
}
|
|
|
|
|
2009-06-14 20:22:55 +00:00
|
|
|
/// @brief returns the register allocation preference.
|
|
|
|
unsigned getRegAllocPref(unsigned virtReg);
|
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
/// @brief records virtReg is a split live interval from SReg.
|
|
|
|
void setIsSplitFromReg(unsigned virtReg, unsigned SReg) {
|
|
|
|
Virt2SplitMap[virtReg] = SReg;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief returns the live interval virtReg is split from.
|
|
|
|
unsigned getPreSplitReg(unsigned virtReg) {
|
|
|
|
return Virt2SplitMap[virtReg];
|
|
|
|
}
|
|
|
|
|
2008-03-12 20:50:04 +00:00
|
|
|
/// @brief returns true if the specified virtual register is not
|
2007-08-13 23:45:17 +00:00
|
|
|
/// mapped to a stack slot or rematerialized.
|
|
|
|
bool isAssignedReg(unsigned virtReg) const {
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
if (getStackSlot(virtReg) == NO_STACK_SLOT &&
|
|
|
|
getReMatId(virtReg) == NO_STACK_SLOT)
|
|
|
|
return true;
|
|
|
|
// Split register can be assigned a physical register as well as a
|
|
|
|
// stack slot or remat id.
|
|
|
|
return (Virt2SplitMap[virtReg] && Virt2PhysMap[virtReg] != NO_PHYS_REG);
|
2004-09-30 01:54:45 +00:00
|
|
|
}
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief returns the stack slot mapped to the specified virtual
|
|
|
|
/// register
|
2004-09-30 01:54:45 +00:00
|
|
|
int getStackSlot(unsigned virtReg) const {
|
2008-02-10 18:45:23 +00:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
|
2004-09-30 02:15:18 +00:00
|
|
|
return Virt2StackSlotMap[virtReg];
|
2004-09-30 01:54:45 +00:00
|
|
|
}
|
|
|
|
|
2007-08-13 23:45:17 +00:00
|
|
|
/// @brief returns the rematerialization id mapped to the specified virtual
|
|
|
|
/// register
|
|
|
|
int getReMatId(unsigned virtReg) const {
|
2008-02-10 18:45:23 +00:00
|
|
|
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
|
2007-08-13 23:45:17 +00:00
|
|
|
return Virt2ReMatIdMap[virtReg];
|
|
|
|
}
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief create a mapping for the specifed virtual register to
|
|
|
|
/// the next available stack slot
|
2004-09-30 01:54:45 +00:00
|
|
|
int assignVirt2StackSlot(unsigned virtReg);
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief create a mapping for the specified virtual register to
|
|
|
|
/// the specified stack slot
|
2004-09-30 01:54:45 +00:00
|
|
|
void assignVirt2StackSlot(unsigned virtReg, int frameIndex);
|
|
|
|
|
2007-03-20 08:13:50 +00:00
|
|
|
/// @brief assign an unique re-materialization id to the specified
|
|
|
|
/// virtual register.
|
|
|
|
int assignVirtReMatId(unsigned virtReg);
|
2007-08-13 23:45:17 +00:00
|
|
|
/// @brief assign an unique re-materialization id to the specified
|
|
|
|
/// virtual register.
|
|
|
|
void assignVirtReMatId(unsigned virtReg, int id);
|
2007-03-20 08:13:50 +00:00
|
|
|
|
|
|
|
/// @brief returns true if the specified virtual register is being
|
|
|
|
/// re-materialized.
|
|
|
|
bool isReMaterialized(unsigned virtReg) const {
|
2007-08-13 23:45:17 +00:00
|
|
|
return ReMatMap[virtReg] != NULL;
|
2007-03-20 08:13:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief returns the original machine instruction being re-issued
|
|
|
|
/// to re-materialize the specified virtual register.
|
2007-08-13 23:45:17 +00:00
|
|
|
MachineInstr *getReMaterializedMI(unsigned virtReg) const {
|
2007-03-20 08:13:50 +00:00
|
|
|
return ReMatMap[virtReg];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief records the specified virtual register will be
|
|
|
|
/// re-materialized and the original instruction which will be re-issed
|
2007-08-13 23:45:17 +00:00
|
|
|
/// for this purpose. If parameter all is true, then all uses of the
|
|
|
|
/// registers are rematerialized and it's safe to delete the definition.
|
2007-03-20 08:13:50 +00:00
|
|
|
void setVirtIsReMaterialized(unsigned virtReg, MachineInstr *def) {
|
|
|
|
ReMatMap[virtReg] = def;
|
|
|
|
}
|
|
|
|
|
2007-12-05 09:51:10 +00:00
|
|
|
/// @brief record the last use (kill) of a split virtual register.
|
2009-11-03 23:52:08 +00:00
|
|
|
void addKillPoint(unsigned virtReg, SlotIndex index) {
|
2007-12-05 10:24:35 +00:00
|
|
|
Virt2SplitKillMap[virtReg] = index;
|
2007-12-05 09:51:10 +00:00
|
|
|
}
|
|
|
|
|
2009-11-03 23:52:08 +00:00
|
|
|
SlotIndex getKillPoint(unsigned virtReg) const {
|
2007-12-05 10:24:35 +00:00
|
|
|
return Virt2SplitKillMap[virtReg];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief remove the last use (kill) of a split virtual register.
|
2007-12-05 09:51:10 +00:00
|
|
|
void removeKillPoint(unsigned virtReg) {
|
2009-11-03 23:52:08 +00:00
|
|
|
Virt2SplitKillMap[virtReg] = SlotIndex();
|
2007-12-05 09:51:10 +00:00
|
|
|
}
|
|
|
|
|
2007-11-28 01:28:46 +00:00
|
|
|
/// @brief returns true if the specified MachineInstr is a spill point.
|
|
|
|
bool isSpillPt(MachineInstr *Pt) const {
|
|
|
|
return SpillPt2VirtMap.find(Pt) != SpillPt2VirtMap.end();
|
|
|
|
}
|
|
|
|
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
/// @brief returns the virtual registers that should be spilled due to
|
|
|
|
/// splitting right after the specified MachineInstr.
|
2007-12-05 08:16:32 +00:00
|
|
|
std::vector<std::pair<unsigned,bool> > &getSpillPtSpills(MachineInstr *Pt) {
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
return SpillPt2VirtMap[Pt];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief records the specified MachineInstr as a spill point for virtReg.
|
2007-12-05 08:16:32 +00:00
|
|
|
void addSpillPoint(unsigned virtReg, bool isKill, MachineInstr *Pt) {
|
2009-05-03 18:32:42 +00:00
|
|
|
std::map<MachineInstr*, std::vector<std::pair<unsigned,bool> > >::iterator
|
|
|
|
I = SpillPt2VirtMap.find(Pt);
|
|
|
|
if (I != SpillPt2VirtMap.end())
|
|
|
|
I->second.push_back(std::make_pair(virtReg, isKill));
|
2007-11-28 01:28:46 +00:00
|
|
|
else {
|
2007-12-05 08:16:32 +00:00
|
|
|
std::vector<std::pair<unsigned,bool> > Virts;
|
|
|
|
Virts.push_back(std::make_pair(virtReg, isKill));
|
2007-11-28 01:28:46 +00:00
|
|
|
SpillPt2VirtMap.insert(std::make_pair(Pt, Virts));
|
|
|
|
}
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
}
|
|
|
|
|
2008-03-11 21:34:46 +00:00
|
|
|
/// @brief - transfer spill point information from one instruction to
|
|
|
|
/// another.
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
void transferSpillPts(MachineInstr *Old, MachineInstr *New) {
|
2009-05-03 18:32:42 +00:00
|
|
|
std::map<MachineInstr*, std::vector<std::pair<unsigned,bool> > >::iterator
|
2007-12-05 08:16:32 +00:00
|
|
|
I = SpillPt2VirtMap.find(Old);
|
2007-11-28 01:28:46 +00:00
|
|
|
if (I == SpillPt2VirtMap.end())
|
|
|
|
return;
|
|
|
|
while (!I->second.empty()) {
|
2007-12-05 08:16:32 +00:00
|
|
|
unsigned virtReg = I->second.back().first;
|
|
|
|
bool isKill = I->second.back().second;
|
2007-11-28 01:28:46 +00:00
|
|
|
I->second.pop_back();
|
2007-12-05 08:16:32 +00:00
|
|
|
addSpillPoint(virtReg, isKill, New);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
}
|
2007-11-28 01:28:46 +00:00
|
|
|
SpillPt2VirtMap.erase(I);
|
Live interval splitting:
When a live interval is being spilled, rather than creating short, non-spillable
intervals for every def / use, split the interval at BB boundaries. That is, for
every BB where the live interval is defined or used, create a new interval that
covers all the defs and uses in the BB.
This is designed to eliminate one common problem: multiple reloads of the same
value in a single basic block. Note, it does *not* decrease the number of spills
since no copies are inserted so the split intervals are *connected* through
spill and reloads (or rematerialization). The newly created intervals can be
spilled again, in that case, since it does not span multiple basic blocks, it's
spilled in the usual manner. However, it can reuse the same stack slot as the
previously split interval.
This is currently controlled by -split-intervals-at-bb.
llvm-svn: 44198
2007-11-17 00:40:40 +00:00
|
|
|
}
|
|
|
|
|
2007-11-29 01:06:25 +00:00
|
|
|
/// @brief returns true if the specified MachineInstr is a restore point.
|
|
|
|
bool isRestorePt(MachineInstr *Pt) const {
|
|
|
|
return RestorePt2VirtMap.find(Pt) != RestorePt2VirtMap.end();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief returns the virtual registers that should be restoreed due to
|
|
|
|
/// splitting right after the specified MachineInstr.
|
|
|
|
std::vector<unsigned> &getRestorePtRestores(MachineInstr *Pt) {
|
|
|
|
return RestorePt2VirtMap[Pt];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief records the specified MachineInstr as a restore point for virtReg.
|
|
|
|
void addRestorePoint(unsigned virtReg, MachineInstr *Pt) {
|
2009-05-03 18:32:42 +00:00
|
|
|
std::map<MachineInstr*, std::vector<unsigned> >::iterator I =
|
|
|
|
RestorePt2VirtMap.find(Pt);
|
|
|
|
if (I != RestorePt2VirtMap.end())
|
|
|
|
I->second.push_back(virtReg);
|
2007-11-29 01:06:25 +00:00
|
|
|
else {
|
|
|
|
std::vector<unsigned> Virts;
|
|
|
|
Virts.push_back(virtReg);
|
|
|
|
RestorePt2VirtMap.insert(std::make_pair(Pt, Virts));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-03-11 07:19:34 +00:00
|
|
|
/// @brief - transfer restore point information from one instruction to
|
|
|
|
/// another.
|
2007-11-29 01:06:25 +00:00
|
|
|
void transferRestorePts(MachineInstr *Old, MachineInstr *New) {
|
2009-05-03 18:32:42 +00:00
|
|
|
std::map<MachineInstr*, std::vector<unsigned> >::iterator I =
|
2007-11-29 01:06:25 +00:00
|
|
|
RestorePt2VirtMap.find(Old);
|
|
|
|
if (I == RestorePt2VirtMap.end())
|
|
|
|
return;
|
|
|
|
while (!I->second.empty()) {
|
|
|
|
unsigned virtReg = I->second.back();
|
|
|
|
I->second.pop_back();
|
|
|
|
addRestorePoint(virtReg, New);
|
|
|
|
}
|
|
|
|
RestorePt2VirtMap.erase(I);
|
|
|
|
}
|
|
|
|
|
2008-03-11 07:19:34 +00:00
|
|
|
/// @brief records that the specified physical register must be spilled
|
|
|
|
/// around the specified machine instr.
|
|
|
|
void addEmergencySpill(unsigned PhysReg, MachineInstr *MI) {
|
|
|
|
if (EmergencySpillMap.find(MI) != EmergencySpillMap.end())
|
|
|
|
EmergencySpillMap[MI].push_back(PhysReg);
|
|
|
|
else {
|
|
|
|
std::vector<unsigned> PhysRegs;
|
|
|
|
PhysRegs.push_back(PhysReg);
|
|
|
|
EmergencySpillMap.insert(std::make_pair(MI, PhysRegs));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief returns true if one or more physical registers must be spilled
|
|
|
|
/// around the specified instruction.
|
|
|
|
bool hasEmergencySpills(MachineInstr *MI) const {
|
|
|
|
return EmergencySpillMap.find(MI) != EmergencySpillMap.end();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief returns the physical registers to be spilled and restored around
|
|
|
|
/// the instruction.
|
|
|
|
std::vector<unsigned> &getEmergencySpills(MachineInstr *MI) {
|
|
|
|
return EmergencySpillMap[MI];
|
|
|
|
}
|
|
|
|
|
2008-03-11 21:34:46 +00:00
|
|
|
/// @brief - transfer emergency spill information from one instruction to
|
|
|
|
/// another.
|
|
|
|
void transferEmergencySpills(MachineInstr *Old, MachineInstr *New) {
|
|
|
|
std::map<MachineInstr*,std::vector<unsigned> >::iterator I =
|
|
|
|
EmergencySpillMap.find(Old);
|
|
|
|
if (I == EmergencySpillMap.end())
|
|
|
|
return;
|
|
|
|
while (!I->second.empty()) {
|
|
|
|
unsigned virtReg = I->second.back();
|
|
|
|
I->second.pop_back();
|
|
|
|
addEmergencySpill(virtReg, New);
|
|
|
|
}
|
|
|
|
EmergencySpillMap.erase(I);
|
|
|
|
}
|
|
|
|
|
2008-03-11 07:19:34 +00:00
|
|
|
/// @brief return or get a emergency spill slot for the register class.
|
|
|
|
int getEmergencySpillSlot(const TargetRegisterClass *RC);
|
|
|
|
|
2008-02-27 03:04:06 +00:00
|
|
|
/// @brief Return lowest spill slot index.
|
|
|
|
int getLowSpillSlot() const {
|
|
|
|
return LowSpillSlot;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Return highest spill slot index.
|
|
|
|
int getHighSpillSlot() const {
|
|
|
|
return HighSpillSlot;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Records a spill slot use.
|
|
|
|
void addSpillSlotUse(int FrameIndex, MachineInstr *MI);
|
|
|
|
|
|
|
|
/// @brief Returns true if spill slot has been used.
|
|
|
|
bool isSpillSlotUsed(int FrameIndex) const {
|
|
|
|
assert(FrameIndex >= 0 && "Spill slot index should not be negative!");
|
|
|
|
return !SpillSlotToUsesMap[FrameIndex-LowSpillSlot].empty();
|
|
|
|
}
|
|
|
|
|
2008-04-11 17:53:36 +00:00
|
|
|
/// @brief Mark the specified register as being implicitly defined.
|
|
|
|
void setIsImplicitlyDefined(unsigned VirtReg) {
|
|
|
|
ImplicitDefed.set(VirtReg-TargetRegisterInfo::FirstVirtualRegister);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @brief Returns true if the virtual register is implicitly defined.
|
|
|
|
bool isImplicitlyDefined(unsigned VirtReg) const {
|
|
|
|
return ImplicitDefed[VirtReg-TargetRegisterInfo::FirstVirtualRegister];
|
|
|
|
}
|
|
|
|
|
2004-10-01 23:15:36 +00:00
|
|
|
/// @brief Updates information about the specified virtual register's value
|
2007-12-02 08:30:39 +00:00
|
|
|
/// folded into newMI machine instruction.
|
|
|
|
void virtFolded(unsigned VirtReg, MachineInstr *OldMI, MachineInstr *NewMI,
|
|
|
|
ModRef MRInfo);
|
2004-09-30 01:54:45 +00:00
|
|
|
|
2007-10-13 02:50:24 +00:00
|
|
|
/// @brief Updates information about the specified virtual register's value
|
|
|
|
/// folded into the specified machine instruction.
|
|
|
|
void virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo);
|
|
|
|
|
2004-10-01 00:35:07 +00:00
|
|
|
/// @brief returns the virtual registers' values folded in memory
|
|
|
|
/// operands of this instruction
|
2004-09-30 02:15:18 +00:00
|
|
|
std::pair<MI2VirtMapTy::const_iterator, MI2VirtMapTy::const_iterator>
|
2004-09-30 01:54:45 +00:00
|
|
|
getFoldedVirts(MachineInstr* MI) const {
|
2004-09-30 02:15:18 +00:00
|
|
|
return MI2VirtMap.equal_range(MI);
|
2004-09-30 01:54:45 +00:00
|
|
|
}
|
2006-05-01 21:16:03 +00:00
|
|
|
|
2007-11-28 01:28:46 +00:00
|
|
|
/// RemoveMachineInstrFromMaps - MI is being erased, remove it from the
|
|
|
|
/// the folded instruction map and spill point map.
|
2008-02-27 03:04:06 +00:00
|
|
|
void RemoveMachineInstrFromMaps(MachineInstr *MI);
|
2004-09-30 01:54:45 +00:00
|
|
|
|
2009-05-03 18:32:42 +00:00
|
|
|
/// FindUnusedRegisters - Gather a list of allocatable registers that
|
|
|
|
/// have not been allocated to any virtual register.
|
2009-06-14 20:22:55 +00:00
|
|
|
bool FindUnusedRegisters(LiveIntervals* LIs);
|
2009-05-03 18:32:42 +00:00
|
|
|
|
|
|
|
/// HasUnusedRegisters - Return true if there are any allocatable registers
|
|
|
|
/// that have not been allocated to any virtual register.
|
|
|
|
bool HasUnusedRegisters() const {
|
|
|
|
return !UnusedRegs.none();
|
|
|
|
}
|
|
|
|
|
|
|
|
/// setRegisterUsed - Remember the physical register is now used.
|
|
|
|
void setRegisterUsed(unsigned Reg) {
|
|
|
|
UnusedRegs.reset(Reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
/// isRegisterUnused - Return true if the physical register has not been
|
|
|
|
/// used.
|
|
|
|
bool isRegisterUnused(unsigned Reg) const {
|
|
|
|
return UnusedRegs[Reg];
|
|
|
|
}
|
|
|
|
|
|
|
|
/// getFirstUnusedRegister - Return the first physical register that has not
|
|
|
|
/// been used.
|
|
|
|
unsigned getFirstUnusedRegister(const TargetRegisterClass *RC) {
|
|
|
|
int Reg = UnusedRegs.find_first();
|
|
|
|
while (Reg != -1) {
|
2009-05-04 03:30:11 +00:00
|
|
|
if (allocatableRCRegs[RC][Reg])
|
2009-05-03 18:32:42 +00:00
|
|
|
return (unsigned)Reg;
|
|
|
|
Reg = UnusedRegs.find_next(Reg);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-24 10:36:58 +00:00
|
|
|
void print(raw_ostream &OS, const Module* M = 0) const;
|
2004-09-30 01:54:45 +00:00
|
|
|
void dump() const;
|
|
|
|
};
|
|
|
|
|
2009-07-24 10:36:58 +00:00
|
|
|
inline raw_ostream &operator<<(raw_ostream &OS, const VirtRegMap &VRM) {
|
|
|
|
VRM.print(OS);
|
|
|
|
return OS;
|
|
|
|
}
|
2004-02-23 23:08:11 +00:00
|
|
|
} // End llvm namespace
|
|
|
|
|
|
|
|
#endif
|