mirror of
https://github.com/RPCS3/llvm.git
synced 2025-02-06 02:29:51 +00:00
Improve PPC VR (Altivec) register spilling
This change cleans up two issues with Altivec register spilling: 1. The spilling code was inefficient (using two instructions, and add and a load, when just one would do) 2. The code assumed that r0 would always be available (true for now, but this will change) The new code handles VR spilling just like GPR spills but forced into r+r mode. As a result, when any VR spills are present, we must now always allocate the register-scavenger spill slot. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@177231 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
ea9b914d2f
commit
3249729043
@ -193,6 +193,11 @@ static bool hasSpills(const MachineFunction &MF) {
|
||||
return FuncInfo->hasSpills();
|
||||
}
|
||||
|
||||
static bool hasNonRISpills(const MachineFunction &MF) {
|
||||
const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
|
||||
return FuncInfo->hasNonRISpills();
|
||||
}
|
||||
|
||||
/// determineFrameLayout - Determine the size of the frame and maximum call
|
||||
/// frame size.
|
||||
unsigned PPCFrameLowering::determineFrameLayout(MachineFunction &MF,
|
||||
@ -1048,7 +1053,7 @@ PPCFrameLowering::addScavengingSpillSlot(MachineFunction &MF,
|
||||
// needed alignment padding.
|
||||
unsigned StackSize = determineFrameLayout(MF, false, true);
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
if (MFI->hasVarSizedObjects() || spillsCR(MF) ||
|
||||
if (MFI->hasVarSizedObjects() || spillsCR(MF) || hasNonRISpills(MF) ||
|
||||
(hasSpills(MF) && !isInt<16>(StackSize))) {
|
||||
const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
|
||||
const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
|
||||
|
@ -439,7 +439,8 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
|
||||
unsigned SrcReg, bool isKill,
|
||||
int FrameIdx,
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const{
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs,
|
||||
bool &NonRI) const{
|
||||
DebugLoc DL;
|
||||
if (PPC::GPRCRegClass.hasSubClassEq(RC)) {
|
||||
if (SrcReg != PPC::LR) {
|
||||
@ -521,23 +522,14 @@ PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF,
|
||||
Reg = PPC::CR7;
|
||||
|
||||
return StoreRegToStackSlot(MF, Reg, isKill, FrameIdx,
|
||||
&PPC::CRRCRegClass, NewMIs);
|
||||
&PPC::CRRCRegClass, NewMIs, NonRI);
|
||||
|
||||
} else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
|
||||
// We don't have indexed addressing for vector loads. Emit:
|
||||
// R0 = ADDI FI#
|
||||
// STVX VAL, 0, R0
|
||||
//
|
||||
// FIXME: We use R0 here, because it isn't available for RA.
|
||||
bool Is64Bit = TM.getSubtargetImpl()->isPPC64();
|
||||
unsigned Instr = Is64Bit ? PPC::ADDI8 : PPC::ADDI;
|
||||
unsigned GPR0 = Is64Bit ? PPC::X0 : PPC::R0;
|
||||
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(Instr), GPR0),
|
||||
FrameIdx, 0, 0));
|
||||
NewMIs.push_back(BuildMI(MF, DL, get(PPC::STVX))
|
||||
.addReg(SrcReg, getKillRegState(isKill))
|
||||
.addReg(GPR0)
|
||||
.addReg(GPR0));
|
||||
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STVX))
|
||||
.addReg(SrcReg,
|
||||
getKillRegState(isKill)),
|
||||
FrameIdx));
|
||||
NonRI = true;
|
||||
} else {
|
||||
llvm_unreachable("Unknown regclass!");
|
||||
}
|
||||
@ -557,9 +549,13 @@ PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
|
||||
FuncInfo->setHasSpills();
|
||||
|
||||
if (StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs))
|
||||
bool NonRI = false;
|
||||
if (StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs, NonRI))
|
||||
FuncInfo->setSpillsCR();
|
||||
|
||||
if (NonRI)
|
||||
FuncInfo->setHasNonRISpills();
|
||||
|
||||
for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
|
||||
MBB.insert(MI, NewMIs[i]);
|
||||
|
||||
@ -576,7 +572,8 @@ bool
|
||||
PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
|
||||
unsigned DestReg, int FrameIdx,
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs)const{
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs,
|
||||
bool &NonRI) const{
|
||||
if (PPC::GPRCRegClass.hasSubClassEq(RC)) {
|
||||
if (DestReg != PPC::LR) {
|
||||
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ),
|
||||
@ -635,21 +632,12 @@ PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
|
||||
Reg = PPC::CR7;
|
||||
|
||||
return LoadRegFromStackSlot(MF, DL, Reg, FrameIdx,
|
||||
&PPC::CRRCRegClass, NewMIs);
|
||||
&PPC::CRRCRegClass, NewMIs, NonRI);
|
||||
|
||||
} else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
|
||||
// We don't have indexed addressing for vector loads. Emit:
|
||||
// R0 = ADDI FI#
|
||||
// Dest = LVX 0, R0
|
||||
//
|
||||
// FIXME: We use R0 here, because it isn't available for RA.
|
||||
bool Is64Bit = TM.getSubtargetImpl()->isPPC64();
|
||||
unsigned Instr = Is64Bit ? PPC::ADDI8 : PPC::ADDI;
|
||||
unsigned GPR0 = Is64Bit ? PPC::X0 : PPC::R0;
|
||||
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(Instr), GPR0),
|
||||
FrameIdx, 0, 0));
|
||||
NewMIs.push_back(BuildMI(MF, DL, get(PPC::LVX),DestReg).addReg(GPR0)
|
||||
.addReg(GPR0));
|
||||
NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LVX), DestReg),
|
||||
FrameIdx));
|
||||
NonRI = true;
|
||||
} else {
|
||||
llvm_unreachable("Unknown regclass!");
|
||||
}
|
||||
@ -667,10 +655,17 @@ PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
|
||||
SmallVector<MachineInstr*, 4> NewMIs;
|
||||
DebugLoc DL;
|
||||
if (MI != MBB.end()) DL = MI->getDebugLoc();
|
||||
if (LoadRegFromStackSlot(MF, DL, DestReg, FrameIdx, RC, NewMIs)) {
|
||||
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
|
||||
|
||||
PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
|
||||
FuncInfo->setHasSpills();
|
||||
|
||||
bool NonRI = false;
|
||||
if (LoadRegFromStackSlot(MF, DL, DestReg, FrameIdx, RC, NewMIs, NonRI))
|
||||
FuncInfo->setSpillsCR();
|
||||
}
|
||||
|
||||
if (NonRI)
|
||||
FuncInfo->setHasNonRISpills();
|
||||
|
||||
for (unsigned i = 0, e = NewMIs.size(); i != e; ++i)
|
||||
MBB.insert(MI, NewMIs[i]);
|
||||
|
||||
|
@ -71,11 +71,13 @@ class PPCInstrInfo : public PPCGenInstrInfo {
|
||||
bool StoreRegToStackSlot(MachineFunction &MF,
|
||||
unsigned SrcReg, bool isKill, int FrameIdx,
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const;
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs,
|
||||
bool &NonRI) const;
|
||||
bool LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL,
|
||||
unsigned DestReg, int FrameIdx,
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const;
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs,
|
||||
bool &NonRI) const;
|
||||
public:
|
||||
explicit PPCInstrInfo(PPCTargetMachine &TM);
|
||||
|
||||
|
@ -40,6 +40,10 @@ class PPCFunctionInfo : public MachineFunctionInfo {
|
||||
/// Does this function have any stack spills.
|
||||
bool HasSpills;
|
||||
|
||||
/// Does this function spill using instructions with only r+r (not r+i)
|
||||
/// forms.
|
||||
bool HasNonRISpills;
|
||||
|
||||
/// SpillsCR - Indicates whether CR is spilled in the current function.
|
||||
bool SpillsCR;
|
||||
|
||||
@ -82,6 +86,7 @@ public:
|
||||
: FramePointerSaveIndex(0),
|
||||
ReturnAddrSaveIndex(0),
|
||||
HasSpills(false),
|
||||
HasNonRISpills(false),
|
||||
SpillsCR(false),
|
||||
LRStoreRequired(false),
|
||||
MinReservedArea(0),
|
||||
@ -116,6 +121,9 @@ public:
|
||||
void setHasSpills() { HasSpills = true; }
|
||||
bool hasSpills() const { return HasSpills; }
|
||||
|
||||
void setHasNonRISpills() { HasNonRISpills = true; }
|
||||
bool hasNonRISpills() const { return HasNonRISpills; }
|
||||
|
||||
void setSpillsCR() { SpillsCR = true; }
|
||||
bool isCRSpilled() const { return SpillsCR; }
|
||||
|
||||
|
@ -442,7 +442,25 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
||||
isIXAddr = true;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
bool noImmForm = false;
|
||||
switch (OpC) {
|
||||
case PPC::LVEBX:
|
||||
case PPC::LVEHX:
|
||||
case PPC::LVEWX:
|
||||
case PPC::LVX:
|
||||
case PPC::LVXL:
|
||||
case PPC::LVSL:
|
||||
case PPC::LVSR:
|
||||
case PPC::STVEBX:
|
||||
case PPC::STVEHX:
|
||||
case PPC::STVEWX:
|
||||
case PPC::STVX:
|
||||
case PPC::STVXL:
|
||||
noImmForm = true;
|
||||
break;
|
||||
}
|
||||
|
||||
// Now add the frame object offset to the offset from r1.
|
||||
int Offset = MFI->getObjectOffset(FrameIndex);
|
||||
if (!isIXAddr)
|
||||
@ -466,7 +484,8 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
||||
// only "std" to a stack slot that is at least 4-byte aligned, but it can
|
||||
// happen in invalid code.
|
||||
if (OpC == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm
|
||||
(isInt<16>(Offset) && (!isIXAddr || (Offset & 3) == 0))) {
|
||||
(!noImmForm &&
|
||||
isInt<16>(Offset) && (!isIXAddr || (Offset & 3) == 0))) {
|
||||
if (isIXAddr)
|
||||
Offset >>= 2; // The actual encoded value has the low two bits zero.
|
||||
MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
|
||||
@ -493,7 +512,9 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
||||
// addi 0:rA 1:rB, 2, imm ==> add 0:rA, 1:rB, 2:r0
|
||||
unsigned OperandBase;
|
||||
|
||||
if (OpC != TargetOpcode::INLINEASM) {
|
||||
if (noImmForm)
|
||||
OperandBase = 1;
|
||||
else if (OpC != TargetOpcode::INLINEASM) {
|
||||
assert(ImmToIdxMap.count(OpC) &&
|
||||
"No indexed form of load or store available!");
|
||||
unsigned NewOpcode = ImmToIdxMap.find(OpC)->second;
|
||||
|
@ -13,7 +13,7 @@ entry:
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: stvx 2, 0, 0
|
||||
; CHECK: lvx 2, 0, 0
|
||||
; CHECK: stvx 2, 1,
|
||||
; CHECK: lvx 2, 1,
|
||||
|
||||
declare void @foo(i32*)
|
||||
|
Loading…
x
Reference in New Issue
Block a user