PR2985 / <rdar://problem/6584986>

When compiling in Thumb mode, only the low (R0-R7) registers are available
for most instructions. Breaking the low registers into a new register class
handles this. Uses of R12, SP, etc, are handled explicitly where needed
with copies inserted to move results into low registers where the rest of
the code generator can deal with them.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68545 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Jim Grosbach 2009-04-07 20:34:09 +00:00
parent f23e809be0
commit 30eae3c022
8 changed files with 319 additions and 195 deletions

View File

@ -273,8 +273,8 @@ public:
/// getPhysicalRegisterRegClass - Returns the Register Class of a physical
/// register of the given type. If type is MVT::Other, then just return any
/// register class the register belongs to.
const TargetRegisterClass *getPhysicalRegisterRegClass(unsigned Reg,
MVT VT = MVT::Other) const;
virtual const TargetRegisterClass *
getPhysicalRegisterRegClass(unsigned Reg, MVT VT = MVT::Other) const;
/// getAllocatableSet - Returns a bitset indexed by register number
/// indicating if a register is allocatable or not. If a register class is

View File

@ -584,10 +584,10 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
// Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
int FI = cast<FrameIndexSDNode>(N)->getIndex();
SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
if (Subtarget->isThumb())
if (Subtarget->isThumb()) {
return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI,
CurDAG->getTargetConstant(0, MVT::i32));
else {
} else {
SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
CurDAG->getRegister(0, MVT::i32) };
@ -607,7 +607,9 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
std::swap(LHSR, RHSR);
}
if (RHSR && RHSR->getReg() == ARM::SP) {
return CurDAG->SelectNodeTo(N, ARM::tADDhirr, Op.getValueType(), N0, N1);
SDValue Val = SDValue(CurDAG->getTargetNode(ARM::tMOVlor2hir, dl,
Op.getValueType(), N0, N0), 0);
return CurDAG->SelectNodeTo(N, ARM::tADDhirr, Op.getValueType(), Val, N1);
}
break;
}

View File

@ -117,7 +117,10 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
}
}
addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
if (Subtarget->isThumb())
addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
else
addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb()) {
addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
@ -937,6 +940,7 @@ static SDValue LowerFORMAL_ARGUMENT(SDValue Op, SelectionDAG &DAG,
MVT ObjectVT = Op.getValue(ArgNo).getValueType();
SDValue Root = Op.getOperand(0);
MachineRegisterInfo &RegInfo = MF.getRegInfo();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
static const unsigned GPRArgRegs[] = {
ARM::R0, ARM::R1, ARM::R2, ARM::R3
@ -955,17 +959,28 @@ static SDValue LowerFORMAL_ARGUMENT(SDValue Op, SelectionDAG &DAG,
SDValue ArgValue;
if (ObjGPRs == 1) {
unsigned VReg = RegInfo.createVirtualRegister(&ARM::GPRRegClass);
unsigned VReg;
if (AFI->isThumbFunction())
VReg = RegInfo.createVirtualRegister(ARM::tGPRRegisterClass);
else
VReg = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
RegInfo.addLiveIn(GPRArgRegs[NumGPRs], VReg);
ArgValue = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32);
if (ObjectVT == MVT::f32)
ArgValue = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::f32, ArgValue);
} else if (ObjGPRs == 2) {
unsigned VReg = RegInfo.createVirtualRegister(&ARM::GPRRegClass);
unsigned VReg;
if (AFI->isThumbFunction())
VReg = RegInfo.createVirtualRegister(ARM::tGPRRegisterClass);
else
VReg = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
RegInfo.addLiveIn(GPRArgRegs[NumGPRs], VReg);
ArgValue = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32);
VReg = RegInfo.createVirtualRegister(&ARM::GPRRegClass);
if (AFI->isThumbFunction())
VReg = RegInfo.createVirtualRegister(ARM::tGPRRegisterClass);
else
VReg = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
RegInfo.addLiveIn(GPRArgRegs[NumGPRs+1], VReg);
SDValue ArgValue2 = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32);
@ -1029,7 +1044,11 @@ ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
SmallVector<SDValue, 4> MemOps;
for (; NumGPRs < 4; ++NumGPRs) {
unsigned VReg = RegInfo.createVirtualRegister(&ARM::GPRRegClass);
unsigned VReg;
if (AFI->isThumbFunction())
VReg = RegInfo.createVirtualRegister(ARM::tGPRRegisterClass);
else
VReg = RegInfo.createVirtualRegister(ARM::GPRRegisterClass);
RegInfo.addLiveIn(GPRArgRegs[NumGPRs], VReg);
SDValue Val = DAG.getCopyFromReg(Root, dl, VReg, MVT::i32);
SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0);
@ -1965,8 +1984,10 @@ ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
// GCC RS6000 Constraint Letters
switch (Constraint[0]) {
case 'l':
// FIXME: in thumb mode, 'l' is only low-regs.
// FALL THROUGH.
if (Subtarget->isThumb())
return std::make_pair(0U, ARM::tGPRRegisterClass);
else
return std::make_pair(0U, ARM::GPRRegisterClass);
case 'r':
return std::make_pair(0U, ARM::GPRRegisterClass);
case 'w':
@ -1989,6 +2010,9 @@ getRegClassForInlineAsmConstraint(const std::string &Constraint,
switch (Constraint[0]) { // GCC ARM Constraint Letters
default: break;
case 'l':
return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
ARM::R4, ARM::R5, ARM::R6, ARM::R7,
0);
case 'r':
return make_vector<unsigned>(ARM::R0, ARM::R1, ARM::R2, ARM::R3,
ARM::R4, ARM::R5, ARM::R6, ARM::R7,

View File

@ -64,6 +64,9 @@ bool ARMInstrInfo::isMoveInstr(const MachineInstr &MI,
return true;
case ARM::MOVr:
case ARM::tMOVr:
case ARM::tMOVhir2lor:
case ARM::tMOVlor2hir:
case ARM::tMOVhir2hir:
assert(MI.getDesc().getNumOperands() >= 2 &&
MI.getOperand(0).isReg() &&
MI.getOperand(1).isReg() &&
@ -483,23 +486,43 @@ bool ARMInstrInfo::copyRegToReg(MachineBasicBlock &MBB,
unsigned DestReg, unsigned SrcReg,
const TargetRegisterClass *DestRC,
const TargetRegisterClass *SrcRC) const {
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
DebugLoc DL = DebugLoc::getUnknownLoc();
if (I != MBB.end()) DL = I->getDebugLoc();
if (!AFI->isThumbFunction()) {
if (DestRC == ARM::GPRRegisterClass) {
AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
.addReg(SrcReg)));
return true;
}
} else {
if (DestRC == ARM::GPRRegisterClass) {
if (SrcRC == ARM::GPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVhir2hir), DestReg).addReg(SrcReg);
return true;
} else if (SrcRC == ARM::tGPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVlor2hir), DestReg).addReg(SrcReg);
return true;
}
} else if (DestRC == ARM::tGPRRegisterClass) {
if (SrcRC == ARM::GPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVhir2lor), DestReg).addReg(SrcReg);
return true;
} else if (SrcRC == ARM::tGPRRegisterClass) {
BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg).addReg(SrcReg);
return true;
}
}
}
if (DestRC != SrcRC) {
// Not yet supported!
return false;
}
DebugLoc DL = DebugLoc::getUnknownLoc();
if (I != MBB.end()) DL = I->getDebugLoc();
if (DestRC == ARM::GPRRegisterClass) {
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
if (AFI->isThumbFunction())
BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg).addReg(SrcReg);
else
AddDefaultCC(AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::MOVr), DestReg)
.addReg(SrcReg)));
} else if (DestRC == ARM::SPRRegisterClass)
if (DestRC == ARM::SPRRegisterClass)
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FCPYS), DestReg)
.addReg(SrcReg));
else if (DestRC == ARM::DPRRegisterClass)
@ -521,14 +544,17 @@ storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
if (RC == ARM::GPRRegisterClass) {
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
if (AFI->isThumbFunction())
BuildMI(MBB, I, DL, get(ARM::tSpill))
.addReg(SrcReg, false, false, isKill)
.addFrameIndex(FI).addImm(0);
else
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
.addReg(SrcReg, false, false, isKill)
.addFrameIndex(FI).addReg(0).addImm(0));
assert (!AFI->isThumbFunction());
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::STR))
.addReg(SrcReg, false, false, isKill)
.addFrameIndex(FI).addReg(0).addImm(0));
} else if (RC == ARM::tGPRRegisterClass) {
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
assert (AFI->isThumbFunction());
BuildMI(MBB, I, DL, get(ARM::tSpill))
.addReg(SrcReg, false, false, isKill)
.addFrameIndex(FI).addImm(0);
} else if (RC == ARM::DPRRegisterClass) {
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FSTD))
.addReg(SrcReg, false, false, isKill)
@ -586,12 +612,15 @@ loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
if (RC == ARM::GPRRegisterClass) {
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
if (AFI->isThumbFunction())
BuildMI(MBB, I, DL, get(ARM::tRestore), DestReg)
.addFrameIndex(FI).addImm(0);
else
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
.addFrameIndex(FI).addReg(0).addImm(0));
assert (!AFI->isThumbFunction());
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::LDR), DestReg)
.addFrameIndex(FI).addReg(0).addImm(0));
} else if (RC == ARM::tGPRRegisterClass) {
MachineFunction &MF = *MBB.getParent();
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
assert (AFI->isThumbFunction());
BuildMI(MBB, I, DL, get(ARM::tRestore), DestReg)
.addFrameIndex(FI).addImm(0);
} else if (RC == ARM::DPRRegisterClass) {
AddDefaultPred(BuildMI(MBB, I, DL, get(ARM::FLDD), DestReg)
.addFrameIndex(FI).addImm(0));
@ -715,7 +744,10 @@ foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
}
break;
}
case ARM::tMOVr: {
case ARM::tMOVr:
case ARM::tMOVlor2hir:
case ARM::tMOVhir2lor:
case ARM::tMOVhir2hir: {
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
bool isKill = MI->getOperand(1).isKill();
@ -788,7 +820,10 @@ canFoldMemoryOperand(const MachineInstr *MI,
case ARM::MOVr:
// If it is updating CPSR, then it cannot be folded.
return MI->getOperand(4).getReg() != ARM::CPSR;
case ARM::tMOVr: {
case ARM::tMOVr:
case ARM::tMOVlor2hir:
case ARM::tMOVhir2lor:
case ARM::tMOVhir2hir: {
if (OpNum == 0) { // move -> store
unsigned SrcReg = MI->getOperand(1).getReg();
if (RI.isPhysicalRegister(SrcReg) && !RI.isLowRegister(SrcReg))

View File

@ -73,7 +73,7 @@ def thumb_immshifted_shamt : SDNodeXForm<imm, [{
def t_addrmode_rr : Operand<i32>,
ComplexPattern<i32, 2, "SelectThumbAddrModeRR", []> {
let PrintMethod = "printThumbAddrModeRROperand";
let MIOperandInfo = (ops GPR:$base, GPR:$offsreg);
let MIOperandInfo = (ops tGPR:$base, tGPR:$offsreg);
}
// t_addrmode_s4 := reg + reg
@ -82,7 +82,7 @@ def t_addrmode_rr : Operand<i32>,
def t_addrmode_s4 : Operand<i32>,
ComplexPattern<i32, 3, "SelectThumbAddrModeS4", []> {
let PrintMethod = "printThumbAddrModeS4Operand";
let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm, GPR:$offsreg);
let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm, tGPR:$offsreg);
}
// t_addrmode_s2 := reg + reg
@ -91,7 +91,7 @@ def t_addrmode_s4 : Operand<i32>,
def t_addrmode_s2 : Operand<i32>,
ComplexPattern<i32, 3, "SelectThumbAddrModeS2", []> {
let PrintMethod = "printThumbAddrModeS2Operand";
let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm, GPR:$offsreg);
let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm, tGPR:$offsreg);
}
// t_addrmode_s1 := reg + reg
@ -100,7 +100,7 @@ def t_addrmode_s2 : Operand<i32>,
def t_addrmode_s1 : Operand<i32>,
ComplexPattern<i32, 3, "SelectThumbAddrModeS1", []> {
let PrintMethod = "printThumbAddrModeS1Operand";
let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm, GPR:$offsreg);
let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm, tGPR:$offsreg);
}
// t_addrmode_sp := sp + imm8 * 4
@ -108,7 +108,7 @@ def t_addrmode_s1 : Operand<i32>,
def t_addrmode_sp : Operand<i32>,
ComplexPattern<i32, 2, "SelectThumbAddrModeSP", []> {
let PrintMethod = "printThumbAddrModeSPOperand";
let MIOperandInfo = (ops GPR:$base, i32imm:$offsimm);
let MIOperandInfo = (ops tGPR:$base, i32imm:$offsimm);
}
//===----------------------------------------------------------------------===//
@ -128,9 +128,9 @@ PseudoInst<(outs), (ins i32imm:$amt),
}
let isNotDuplicable = 1 in
def tPICADD : TIt<(outs GPR:$dst), (ins GPR:$lhs, pclabel:$cp),
def tPICADD : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, pclabel:$cp),
"$cp:\n\tadd $dst, pc",
[(set GPR:$dst, (ARMpic_add GPR:$lhs, imm:$cp))]>;
[(set tGPR:$dst, (ARMpic_add tGPR:$lhs, imm:$cp))]>;
//===----------------------------------------------------------------------===//
// Control Flow Instructions.
@ -139,7 +139,7 @@ def tPICADD : TIt<(outs GPR:$dst), (ins GPR:$lhs, pclabel:$cp),
let isReturn = 1, isTerminator = 1 in {
def tBX_RET : TI<(outs), (ins), "bx lr", [(ARMretflag)]>;
// Alternative return instruction used by vararg functions.
def tBX_RET_vararg : TI<(outs), (ins GPR:$target), "bx $target", []>;
def tBX_RET_vararg : TI<(outs), (ins tGPR:$target), "bx $target", []>;
}
// FIXME: remove when we have a way to marking a MI with these properties.
@ -157,13 +157,13 @@ let isCall = 1,
def tBLXi : TIx2<(outs), (ins i32imm:$func, variable_ops),
"blx ${func:call}",
[(ARMcall tglobaladdr:$func)]>, Requires<[HasV5T]>;
def tBLXr : TI<(outs), (ins GPR:$func, variable_ops),
def tBLXr : TI<(outs), (ins tGPR:$func, variable_ops),
"blx $func",
[(ARMtcall GPR:$func)]>, Requires<[HasV5T]>;
[(ARMtcall tGPR:$func)]>, Requires<[HasV5T]>;
// ARMv4T
def tBX : TIx2<(outs), (ins GPR:$func, variable_ops),
def tBX : TIx2<(outs), (ins tGPR:$func, variable_ops),
"cpy lr, pc\n\tbx $func",
[(ARMcall_nolink GPR:$func)]>;
[(ARMcall_nolink tGPR:$func)]>;
}
let isBranch = 1, isTerminator = 1 in {
@ -176,9 +176,9 @@ let isBranch = 1, isTerminator = 1 in {
def tBfar : TIx2<(outs), (ins brtarget:$target), "bl $target\t@ far jump",[]>;
def tBR_JTr : TJTI<(outs),
(ins GPR:$target, jtblock_operand:$jt, i32imm:$id),
(ins tGPR:$target, jtblock_operand:$jt, i32imm:$id),
"cpy pc, $target \n\t.align\t2\n$jt",
[(ARMbrjt GPR:$target, tjumptable:$jt, imm:$id)]>;
[(ARMbrjt tGPR:$target, tjumptable:$jt, imm:$id)]>;
}
}
@ -193,68 +193,68 @@ let isBranch = 1, isTerminator = 1 in
//
let canFoldAsLoad = 1 in
def tLDR : TI4<(outs GPR:$dst), (ins t_addrmode_s4:$addr),
def tLDR : TI4<(outs tGPR:$dst), (ins t_addrmode_s4:$addr),
"ldr $dst, $addr",
[(set GPR:$dst, (load t_addrmode_s4:$addr))]>;
[(set tGPR:$dst, (load t_addrmode_s4:$addr))]>;
def tLDRB : TI1<(outs GPR:$dst), (ins t_addrmode_s1:$addr),
def tLDRB : TI1<(outs tGPR:$dst), (ins t_addrmode_s1:$addr),
"ldrb $dst, $addr",
[(set GPR:$dst, (zextloadi8 t_addrmode_s1:$addr))]>;
[(set tGPR:$dst, (zextloadi8 t_addrmode_s1:$addr))]>;
def tLDRH : TI2<(outs GPR:$dst), (ins t_addrmode_s2:$addr),
def tLDRH : TI2<(outs tGPR:$dst), (ins t_addrmode_s2:$addr),
"ldrh $dst, $addr",
[(set GPR:$dst, (zextloadi16 t_addrmode_s2:$addr))]>;
[(set tGPR:$dst, (zextloadi16 t_addrmode_s2:$addr))]>;
def tLDRSB : TI1<(outs GPR:$dst), (ins t_addrmode_rr:$addr),
def tLDRSB : TI1<(outs tGPR:$dst), (ins t_addrmode_rr:$addr),
"ldrsb $dst, $addr",
[(set GPR:$dst, (sextloadi8 t_addrmode_rr:$addr))]>;
[(set tGPR:$dst, (sextloadi8 t_addrmode_rr:$addr))]>;
def tLDRSH : TI2<(outs GPR:$dst), (ins t_addrmode_rr:$addr),
def tLDRSH : TI2<(outs tGPR:$dst), (ins t_addrmode_rr:$addr),
"ldrsh $dst, $addr",
[(set GPR:$dst, (sextloadi16 t_addrmode_rr:$addr))]>;
[(set tGPR:$dst, (sextloadi16 t_addrmode_rr:$addr))]>;
let canFoldAsLoad = 1 in
def tLDRspi : TIs<(outs GPR:$dst), (ins t_addrmode_sp:$addr),
def tLDRspi : TIs<(outs tGPR:$dst), (ins t_addrmode_sp:$addr),
"ldr $dst, $addr",
[(set GPR:$dst, (load t_addrmode_sp:$addr))]>;
[(set tGPR:$dst, (load t_addrmode_sp:$addr))]>;
// Special instruction for restore. It cannot clobber condition register
// when it's expanded by eliminateCallFramePseudoInstr().
let canFoldAsLoad = 1, mayLoad = 1 in
def tRestore : TIs<(outs GPR:$dst), (ins t_addrmode_sp:$addr),
def tRestore : TIs<(outs tGPR:$dst), (ins t_addrmode_sp:$addr),
"ldr $dst, $addr", []>;
// Load tconstpool
let canFoldAsLoad = 1 in
def tLDRpci : TIs<(outs GPR:$dst), (ins i32imm:$addr),
def tLDRpci : TIs<(outs tGPR:$dst), (ins i32imm:$addr),
"ldr $dst, $addr",
[(set GPR:$dst, (load (ARMWrapper tconstpool:$addr)))]>;
[(set tGPR:$dst, (load (ARMWrapper tconstpool:$addr)))]>;
// Special LDR for loads from non-pc-relative constpools.
let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1 in
def tLDRcp : TIs<(outs GPR:$dst), (ins i32imm:$addr),
def tLDRcp : TIs<(outs tGPR:$dst), (ins i32imm:$addr),
"ldr $dst, $addr", []>;
def tSTR : TI4<(outs), (ins GPR:$src, t_addrmode_s4:$addr),
def tSTR : TI4<(outs), (ins tGPR:$src, t_addrmode_s4:$addr),
"str $src, $addr",
[(store GPR:$src, t_addrmode_s4:$addr)]>;
[(store tGPR:$src, t_addrmode_s4:$addr)]>;
def tSTRB : TI1<(outs), (ins GPR:$src, t_addrmode_s1:$addr),
def tSTRB : TI1<(outs), (ins tGPR:$src, t_addrmode_s1:$addr),
"strb $src, $addr",
[(truncstorei8 GPR:$src, t_addrmode_s1:$addr)]>;
[(truncstorei8 tGPR:$src, t_addrmode_s1:$addr)]>;
def tSTRH : TI2<(outs), (ins GPR:$src, t_addrmode_s2:$addr),
def tSTRH : TI2<(outs), (ins tGPR:$src, t_addrmode_s2:$addr),
"strh $src, $addr",
[(truncstorei16 GPR:$src, t_addrmode_s2:$addr)]>;
[(truncstorei16 tGPR:$src, t_addrmode_s2:$addr)]>;
def tSTRspi : TIs<(outs), (ins GPR:$src, t_addrmode_sp:$addr),
def tSTRspi : TIs<(outs), (ins tGPR:$src, t_addrmode_sp:$addr),
"str $src, $addr",
[(store GPR:$src, t_addrmode_sp:$addr)]>;
[(store tGPR:$src, t_addrmode_sp:$addr)]>;
let mayStore = 1 in {
// Special instruction for spill. It cannot clobber condition register
// when it's expanded by eliminateCallFramePseudoInstr().
def tSpill : TIs<(outs), (ins GPR:$src, t_addrmode_sp:$addr),
def tSpill : TIs<(outs), (ins tGPR:$src, t_addrmode_sp:$addr),
"str $src, $addr", []>;
}
@ -277,205 +277,213 @@ def tPUSH : TI<(outs), (ins reglist:$src1, variable_ops),
//
// Add with carry
def tADC : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tADC : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"adc $dst, $rhs",
[(set GPR:$dst, (adde GPR:$lhs, GPR:$rhs))]>;
[(set tGPR:$dst, (adde tGPR:$lhs, tGPR:$rhs))]>;
def tADDS : TI<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tADDS : TI<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"add $dst, $lhs, $rhs",
[(set GPR:$dst, (addc GPR:$lhs, GPR:$rhs))]>;
[(set tGPR:$dst, (addc tGPR:$lhs, tGPR:$rhs))]>;
def tADDi3 : TI<(outs GPR:$dst), (ins GPR:$lhs, i32imm:$rhs),
def tADDi3 : TI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs),
"add $dst, $lhs, $rhs",
[(set GPR:$dst, (add GPR:$lhs, imm0_7:$rhs))]>;
[(set tGPR:$dst, (add tGPR:$lhs, imm0_7:$rhs))]>;
def tADDi8 : TIt<(outs GPR:$dst), (ins GPR:$lhs, i32imm:$rhs),
def tADDi8 : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs),
"add $dst, $rhs",
[(set GPR:$dst, (add GPR:$lhs, imm8_255:$rhs))]>;
[(set tGPR:$dst, (add tGPR:$lhs, imm8_255:$rhs))]>;
def tADDrr : TI<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tADDrr : TI<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"add $dst, $lhs, $rhs",
[(set GPR:$dst, (add GPR:$lhs, GPR:$rhs))]>;
[(set tGPR:$dst, (add tGPR:$lhs, tGPR:$rhs))]>;
def tADDhirr : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
"add $dst, $rhs", []>;
def tADDhirr : TIt<(outs tGPR:$dst), (ins GPR:$lhs, GPR:$rhs),
"add $dst, $rhs @ addhirr", []>;
def tADDrPCi : TI<(outs GPR:$dst), (ins i32imm:$rhs),
def tADDrPCi : TI<(outs tGPR:$dst), (ins i32imm:$rhs),
"add $dst, pc, $rhs * 4", []>;
def tADDrSPi : TI<(outs GPR:$dst), (ins GPR:$sp, i32imm:$rhs),
"add $dst, $sp, $rhs * 4", []>;
def tADDspi : TIt<(outs GPR:$dst), (ins GPR:$lhs, i32imm:$rhs),
def tADDrSPi : TI<(outs tGPR:$dst), (ins GPR:$sp, i32imm:$rhs),
"add $dst, $sp, $rhs * 4 @ addrspi", []>;
def tADDspi : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs),
"add $dst, $rhs * 4", []>;
def tAND : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tAND : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"and $dst, $rhs",
[(set GPR:$dst, (and GPR:$lhs, GPR:$rhs))]>;
[(set tGPR:$dst, (and tGPR:$lhs, tGPR:$rhs))]>;
def tASRri : TI<(outs GPR:$dst), (ins GPR:$lhs, i32imm:$rhs),
def tASRri : TI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs),
"asr $dst, $lhs, $rhs",
[(set GPR:$dst, (sra GPR:$lhs, imm:$rhs))]>;
[(set tGPR:$dst, (sra tGPR:$lhs, imm:$rhs))]>;
def tASRrr : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tASRrr : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"asr $dst, $rhs",
[(set GPR:$dst, (sra GPR:$lhs, GPR:$rhs))]>;
[(set tGPR:$dst, (sra tGPR:$lhs, tGPR:$rhs))]>;
def tBIC : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tBIC : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"bic $dst, $rhs",
[(set GPR:$dst, (and GPR:$lhs, (not GPR:$rhs)))]>;
[(set tGPR:$dst, (and tGPR:$lhs, (not tGPR:$rhs)))]>;
def tCMN : TI<(outs), (ins GPR:$lhs, GPR:$rhs),
def tCMN : TI<(outs), (ins tGPR:$lhs, tGPR:$rhs),
"cmn $lhs, $rhs",
[(ARMcmp GPR:$lhs, (ineg GPR:$rhs))]>;
[(ARMcmp tGPR:$lhs, (ineg tGPR:$rhs))]>;
def tCMPi8 : TI<(outs), (ins GPR:$lhs, i32imm:$rhs),
def tCMPi8 : TI<(outs), (ins tGPR:$lhs, i32imm:$rhs),
"cmp $lhs, $rhs",
[(ARMcmp GPR:$lhs, imm0_255:$rhs)]>;
[(ARMcmp tGPR:$lhs, imm0_255:$rhs)]>;
def tCMPr : TI<(outs), (ins GPR:$lhs, GPR:$rhs),
def tCMPr : TI<(outs), (ins tGPR:$lhs, tGPR:$rhs),
"cmp $lhs, $rhs",
[(ARMcmp GPR:$lhs, GPR:$rhs)]>;
[(ARMcmp tGPR:$lhs, tGPR:$rhs)]>;
def tTST : TI<(outs), (ins GPR:$lhs, GPR:$rhs),
def tTST : TI<(outs), (ins tGPR:$lhs, tGPR:$rhs),
"tst $lhs, $rhs",
[(ARMcmpNZ (and GPR:$lhs, GPR:$rhs), 0)]>;
[(ARMcmpNZ (and tGPR:$lhs, tGPR:$rhs), 0)]>;
def tCMNNZ : TI<(outs), (ins GPR:$lhs, GPR:$rhs),
def tCMNNZ : TI<(outs), (ins tGPR:$lhs, tGPR:$rhs),
"cmn $lhs, $rhs",
[(ARMcmpNZ GPR:$lhs, (ineg GPR:$rhs))]>;
[(ARMcmpNZ tGPR:$lhs, (ineg tGPR:$rhs))]>;
def tCMPNZi8 : TI<(outs), (ins GPR:$lhs, i32imm:$rhs),
def tCMPNZi8 : TI<(outs), (ins tGPR:$lhs, i32imm:$rhs),
"cmp $lhs, $rhs",
[(ARMcmpNZ GPR:$lhs, imm0_255:$rhs)]>;
[(ARMcmpNZ tGPR:$lhs, imm0_255:$rhs)]>;
def tCMPNZr : TI<(outs), (ins GPR:$lhs, GPR:$rhs),
def tCMPNZr : TI<(outs), (ins tGPR:$lhs, tGPR:$rhs),
"cmp $lhs, $rhs",
[(ARMcmpNZ GPR:$lhs, GPR:$rhs)]>;
[(ARMcmpNZ tGPR:$lhs, tGPR:$rhs)]>;
// TODO: A7-37: CMP(3) - cmp hi regs
def tEOR : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tEOR : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"eor $dst, $rhs",
[(set GPR:$dst, (xor GPR:$lhs, GPR:$rhs))]>;
[(set tGPR:$dst, (xor tGPR:$lhs, tGPR:$rhs))]>;
def tLSLri : TI<(outs GPR:$dst), (ins GPR:$lhs, i32imm:$rhs),
def tLSLri : TI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs),
"lsl $dst, $lhs, $rhs",
[(set GPR:$dst, (shl GPR:$lhs, imm:$rhs))]>;
[(set tGPR:$dst, (shl tGPR:$lhs, imm:$rhs))]>;
def tLSLrr : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tLSLrr : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"lsl $dst, $rhs",
[(set GPR:$dst, (shl GPR:$lhs, GPR:$rhs))]>;
[(set tGPR:$dst, (shl tGPR:$lhs, tGPR:$rhs))]>;
def tLSRri : TI<(outs GPR:$dst), (ins GPR:$lhs, i32imm:$rhs),
def tLSRri : TI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs),
"lsr $dst, $lhs, $rhs",
[(set GPR:$dst, (srl GPR:$lhs, imm:$rhs))]>;
[(set tGPR:$dst, (srl tGPR:$lhs, imm:$rhs))]>;
def tLSRrr : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tLSRrr : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"lsr $dst, $rhs",
[(set GPR:$dst, (srl GPR:$lhs, GPR:$rhs))]>;
[(set tGPR:$dst, (srl tGPR:$lhs, tGPR:$rhs))]>;
// FIXME: This is not rematerializable because mov changes the condition code.
def tMOVi8 : TI<(outs GPR:$dst), (ins i32imm:$src),
def tMOVi8 : TI<(outs tGPR:$dst), (ins i32imm:$src),
"mov $dst, $src",
[(set GPR:$dst, imm0_255:$src)]>;
[(set tGPR:$dst, imm0_255:$src)]>;
// TODO: A7-73: MOV(2) - mov setting flag.
// Note: MOV(2) of two low regs updates the flags, so we emit this as 'cpy',
// which is MOV(3). This also supports high registers.
def tMOVr : TI<(outs GPR:$dst), (ins GPR:$src),
"cpy $dst, $src", []>;
def tMOVr : TI<(outs tGPR:$dst), (ins tGPR:$src),
"cpy $dst, $src", []>;
def tMOVhir2lor : TI<(outs tGPR:$dst), (ins GPR:$src),
"cpy $dst, $src\t@ hir2lor", []>;
def tMOVlor2hir : TI<(outs GPR:$dst), (ins tGPR:$src),
"cpy $dst, $src\t@ lor2hir", []>;
def tMOVhir2hir : TI<(outs GPR:$dst), (ins GPR:$src),
"cpy $dst, $src\t@ hir2hir", []>;
def tMUL : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tMUL : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"mul $dst, $rhs",
[(set GPR:$dst, (mul GPR:$lhs, GPR:$rhs))]>;
[(set tGPR:$dst, (mul tGPR:$lhs, tGPR:$rhs))]>;
def tMVN : TI<(outs GPR:$dst), (ins GPR:$src),
def tMVN : TI<(outs tGPR:$dst), (ins tGPR:$src),
"mvn $dst, $src",
[(set GPR:$dst, (not GPR:$src))]>;
[(set tGPR:$dst, (not tGPR:$src))]>;
def tNEG : TI<(outs GPR:$dst), (ins GPR:$src),
def tNEG : TI<(outs tGPR:$dst), (ins tGPR:$src),
"neg $dst, $src",
[(set GPR:$dst, (ineg GPR:$src))]>;
[(set tGPR:$dst, (ineg tGPR:$src))]>;
def tORR : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tORR : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"orr $dst, $rhs",
[(set GPR:$dst, (or GPR:$lhs, GPR:$rhs))]>;
[(set tGPR:$dst, (or tGPR:$lhs, tGPR:$rhs))]>;
def tREV : TI<(outs GPR:$dst), (ins GPR:$src),
def tREV : TI<(outs tGPR:$dst), (ins tGPR:$src),
"rev $dst, $src",
[(set GPR:$dst, (bswap GPR:$src))]>,
[(set tGPR:$dst, (bswap tGPR:$src))]>,
Requires<[IsThumb, HasV6]>;
def tREV16 : TI<(outs GPR:$dst), (ins GPR:$src),
def tREV16 : TI<(outs tGPR:$dst), (ins tGPR:$src),
"rev16 $dst, $src",
[(set GPR:$dst,
(or (and (srl GPR:$src, 8), 0xFF),
(or (and (shl GPR:$src, 8), 0xFF00),
(or (and (srl GPR:$src, 8), 0xFF0000),
(and (shl GPR:$src, 8), 0xFF000000)))))]>,
[(set tGPR:$dst,
(or (and (srl tGPR:$src, 8), 0xFF),
(or (and (shl tGPR:$src, 8), 0xFF00),
(or (and (srl tGPR:$src, 8), 0xFF0000),
(and (shl tGPR:$src, 8), 0xFF000000)))))]>,
Requires<[IsThumb, HasV6]>;
def tREVSH : TI<(outs GPR:$dst), (ins GPR:$src),
def tREVSH : TI<(outs tGPR:$dst), (ins tGPR:$src),
"revsh $dst, $src",
[(set GPR:$dst,
[(set tGPR:$dst,
(sext_inreg
(or (srl (and GPR:$src, 0xFFFF), 8),
(shl GPR:$src, 8)), i16))]>,
(or (srl (and tGPR:$src, 0xFFFF), 8),
(shl tGPR:$src, 8)), i16))]>,
Requires<[IsThumb, HasV6]>;
def tROR : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tROR : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"ror $dst, $rhs",
[(set GPR:$dst, (rotr GPR:$lhs, GPR:$rhs))]>;
[(set tGPR:$dst, (rotr tGPR:$lhs, tGPR:$rhs))]>;
// Subtract with carry
def tSBC : TIt<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tSBC : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"sbc $dst, $rhs",
[(set GPR:$dst, (sube GPR:$lhs, GPR:$rhs))]>;
[(set tGPR:$dst, (sube tGPR:$lhs, tGPR:$rhs))]>;
def tSUBS : TI<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tSUBS : TI<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"sub $dst, $lhs, $rhs",
[(set GPR:$dst, (subc GPR:$lhs, GPR:$rhs))]>;
[(set tGPR:$dst, (subc tGPR:$lhs, tGPR:$rhs))]>;
// TODO: A7-96: STMIA - store multiple.
def tSUBi3 : TI<(outs GPR:$dst), (ins GPR:$lhs, i32imm:$rhs),
def tSUBi3 : TI<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs),
"sub $dst, $lhs, $rhs",
[(set GPR:$dst, (add GPR:$lhs, imm0_7_neg:$rhs))]>;
[(set tGPR:$dst, (add tGPR:$lhs, imm0_7_neg:$rhs))]>;
def tSUBi8 : TIt<(outs GPR:$dst), (ins GPR:$lhs, i32imm:$rhs),
def tSUBi8 : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs),
"sub $dst, $rhs",
[(set GPR:$dst, (add GPR:$lhs, imm8_255_neg:$rhs))]>;
[(set tGPR:$dst, (add tGPR:$lhs, imm8_255_neg:$rhs))]>;
def tSUBrr : TI<(outs GPR:$dst), (ins GPR:$lhs, GPR:$rhs),
def tSUBrr : TI<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
"sub $dst, $lhs, $rhs",
[(set GPR:$dst, (sub GPR:$lhs, GPR:$rhs))]>;
[(set tGPR:$dst, (sub tGPR:$lhs, tGPR:$rhs))]>;
def tSUBspi : TIt<(outs GPR:$dst), (ins GPR:$lhs, i32imm:$rhs),
def tSUBspi : TIt<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs),
"sub $dst, $rhs * 4", []>;
def tSXTB : TI<(outs GPR:$dst), (ins GPR:$src),
def tSXTB : TI<(outs tGPR:$dst), (ins tGPR:$src),
"sxtb $dst, $src",
[(set GPR:$dst, (sext_inreg GPR:$src, i8))]>,
[(set tGPR:$dst, (sext_inreg tGPR:$src, i8))]>,
Requires<[IsThumb, HasV6]>;
def tSXTH : TI<(outs GPR:$dst), (ins GPR:$src),
def tSXTH : TI<(outs tGPR:$dst), (ins tGPR:$src),
"sxth $dst, $src",
[(set GPR:$dst, (sext_inreg GPR:$src, i16))]>,
[(set tGPR:$dst, (sext_inreg tGPR:$src, i16))]>,
Requires<[IsThumb, HasV6]>;
def tUXTB : TI<(outs GPR:$dst), (ins GPR:$src),
def tUXTB : TI<(outs tGPR:$dst), (ins tGPR:$src),
"uxtb $dst, $src",
[(set GPR:$dst, (and GPR:$src, 0xFF))]>,
[(set tGPR:$dst, (and tGPR:$src, 0xFF))]>,
Requires<[IsThumb, HasV6]>;
def tUXTH : TI<(outs GPR:$dst), (ins GPR:$src),
def tUXTH : TI<(outs tGPR:$dst), (ins tGPR:$src),
"uxth $dst, $src",
[(set GPR:$dst, (and GPR:$src, 0xFFFF))]>,
[(set tGPR:$dst, (and tGPR:$src, 0xFFFF))]>,
Requires<[IsThumb, HasV6]>;
@ -483,20 +491,20 @@ def tUXTH : TI<(outs GPR:$dst), (ins GPR:$src),
// Expanded by the scheduler into a branch sequence.
let usesCustomDAGSchedInserter = 1 in // Expanded by the scheduler.
def tMOVCCr :
PseudoInst<(outs GPR:$dst), (ins GPR:$false, GPR:$true, pred:$cc),
PseudoInst<(outs tGPR:$dst), (ins tGPR:$false, tGPR:$true, pred:$cc),
"@ tMOVCCr $cc",
[/*(set GPR:$dst, (ARMcmov GPR:$false, GPR:$true, imm:$cc))*/]>;
[/*(set tGPR:$dst, (ARMcmov tGPR:$false, tGPR:$true, imm:$cc))*/]>;
// tLEApcrel - Load a pc-relative address into a register without offending the
// assembler.
def tLEApcrel : TIx2<(outs GPR:$dst), (ins i32imm:$label),
def tLEApcrel : TIx2<(outs tGPR:$dst), (ins i32imm:$label),
!strconcat(!strconcat(".set PCRELV${:uid}, ($label-(",
"${:private}PCRELL${:uid}+4))\n"),
!strconcat("\tmov $dst, #PCRELV${:uid}\n",
"${:private}PCRELL${:uid}:\n\tadd $dst, pc")),
[]>;
def tLEApcrelJT : TIx2<(outs GPR:$dst), (ins i32imm:$label, i32imm:$id),
def tLEApcrelJT : TIx2<(outs tGPR:$dst), (ins i32imm:$label, i32imm:$id),
!strconcat(!strconcat(".set PCRELV${:uid}, (${label}_${id:no_hash}-(",
"${:private}PCRELL${:uid}+4))\n"),
!strconcat("\tmov $dst, #PCRELV${:uid}\n",
@ -532,7 +540,7 @@ def : ThumbPat<(ARMtcall texternalsym:$func), (tBL texternalsym:$func)>;
def : ThumbV5Pat<(ARMcall texternalsym:$func), (tBLXi texternalsym:$func)>;
// Indirect calls to ARM routines
def : ThumbV5Pat<(ARMcall GPR:$dst), (tBLXr GPR:$dst)>;
def : ThumbV5Pat<(ARMcall tGPR:$dst), (tBLXr tGPR:$dst)>;
// zextload i1 -> zextload i8
def : ThumbPat<(zextloadi1 t_addrmode_s1:$addr),

View File

@ -211,6 +211,22 @@ bool ARMRegisterInfo::isLowRegister(unsigned Reg) const {
}
}
const TargetRegisterClass*
ARMRegisterInfo::getPhysicalRegisterRegClass(unsigned Reg, MVT VT) const {
if (STI.isThumb()) {
if (isLowRegister(Reg))
return ARM::tGPRRegisterClass;
switch (Reg) {
default:
break;
case ARM::R8: case ARM::R9: case ARM::R10: case ARM::R11:
case ARM::R12: case ARM::SP: case ARM::LR: case ARM::PC:
return ARM::GPRRegisterClass;
}
}
return TargetRegisterInfo::getPhysicalRegisterRegClass(Reg, VT);
}
const unsigned*
ARMRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
static const unsigned CalleeSavedRegs[] = {
@ -244,7 +260,16 @@ ARMRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
&ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
0
};
return CalleeSavedRegClasses;
static const TargetRegisterClass * const ThumbCalleeSavedRegClasses[] = {
&ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::GPRRegClass,
&ARM::GPRRegClass, &ARM::GPRRegClass, &ARM::tGPRRegClass,
&ARM::tGPRRegClass,&ARM::tGPRRegClass,&ARM::tGPRRegClass,
&ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
&ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass, &ARM::DPRRegClass,
0
};
return STI.isThumb() ? ThumbCalleeSavedRegClasses : CalleeSavedRegClasses;
}
BitVector ARMRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
@ -400,7 +425,7 @@ void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
if (DestReg == ARM::SP) {
assert(BaseReg == ARM::SP && "Unexpected!");
LdReg = ARM::R3;
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::R12)
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVlor2hir), ARM::R12)
.addReg(ARM::R3, false, false, true);
}
@ -423,7 +448,7 @@ void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
else
MIB.addReg(LdReg).addReg(BaseReg, false, false, true);
if (DestReg == ARM::SP)
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::R3)
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVhir2lor), ARM::R3)
.addReg(ARM::R12, false, false, true);
}
@ -616,6 +641,7 @@ static
unsigned findScratchRegister(RegScavenger *RS, const TargetRegisterClass *RC,
ARMFunctionInfo *AFI) {
unsigned Reg = RS ? RS->FindUnusedReg(RC, true) : (unsigned) ARM::R12;
assert (!AFI->isThumbFunction());
if (Reg == 0)
// Try a already spilled CS register.
Reg = RS->FindUnusedReg(RC, AFI->getSpilledCSRegisters());
@ -717,7 +743,7 @@ void ARMRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
if (Offset == 0) {
// Turn it into a move.
MI.setDesc(TII.get(ARM::tMOVr));
MI.setDesc(TII.get(ARM::tMOVhir2lor));
MI.getOperand(i).ChangeToRegister(FrameReg, false);
MI.RemoveOperand(i+1);
return;
@ -891,12 +917,12 @@ void ARMRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
unsigned TmpReg = ARM::R3;
bool UseRR = false;
if (ValReg == ARM::R3) {
BuildMI(MBB, II, dl, TII.get(ARM::tMOVr), ARM::R12)
BuildMI(MBB, II, dl, TII.get(ARM::tMOVlor2hir), ARM::R12)
.addReg(ARM::R2, false, false, true);
TmpReg = ARM::R2;
}
if (TmpReg == ARM::R3 && AFI->isR3LiveIn())
BuildMI(MBB, II, dl, TII.get(ARM::tMOVr), ARM::R12)
BuildMI(MBB, II, dl, TII.get(ARM::tMOVlor2hir), ARM::R12)
.addReg(ARM::R3, false, false, true);
if (Opcode == ARM::tSpill) {
if (FrameReg == ARM::SP)
@ -919,10 +945,10 @@ void ARMRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
MachineBasicBlock::iterator NII = next(II);
if (ValReg == ARM::R3)
BuildMI(MBB, NII, dl, TII.get(ARM::tMOVr), ARM::R2)
BuildMI(MBB, NII, dl, TII.get(ARM::tMOVhir2lor), ARM::R2)
.addReg(ARM::R12, false, false, true);
if (TmpReg == ARM::R3 && AFI->isR3LiveIn())
BuildMI(MBB, NII, dl, TII.get(ARM::tMOVr), ARM::R3)
BuildMI(MBB, NII, dl, TII.get(ARM::tMOVhir2lor), ARM::R3)
.addReg(ARM::R12, false, false, true);
} else
assert(false && "Unexpected opcode!");
@ -1401,7 +1427,8 @@ void ARMRegisterInfo::emitEpilogue(MachineFunction &MF,
emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, FramePtr, -NumBytes,
TII, *this, dl);
else
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), ARM::SP).addReg(FramePtr);
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVlor2hir), ARM::SP)
.addReg(FramePtr);
} else {
if (MBBI->getOpcode() == ARM::tBX_RET &&
&MBB.front() != MBBI &&

View File

@ -54,6 +54,8 @@ public:
const TargetRegisterClass *getPointerRegClass() const;
/// Code Generation virtual methods...
const TargetRegisterClass *
getPhysicalRegisterRegClass(unsigned Reg, MVT VT = MVT::Other) const;
const unsigned *getCalleeSavedRegs(const MachineFunction *MF = 0) const;
const TargetRegisterClass* const*

View File

@ -130,17 +130,10 @@ def GPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6,
ARM::R8, ARM::R10,ARM::R11,
ARM::R7 };
// FP is R7, only low registers available.
static const unsigned THUMB_GPR_AO[] = {
ARM::R2, ARM::R1, ARM::R0,
ARM::R4, ARM::R5, ARM::R6, ARM::R7 };
GPRClass::iterator
GPRClass::allocation_order_begin(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
if (Subtarget.isThumb())
return THUMB_GPR_AO;
if (Subtarget.useThumbBacktraces()) {
if (Subtarget.isR9Reserved())
return ARM_GPR_AO_4;
@ -160,9 +153,8 @@ def GPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6,
const TargetRegisterInfo *RI = TM.getRegisterInfo();
const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
GPRClass::iterator I;
if (Subtarget.isThumb())
I = THUMB_GPR_AO + (sizeof(THUMB_GPR_AO)/sizeof(unsigned));
else if (Subtarget.useThumbBacktraces()) {
if (Subtarget.useThumbBacktraces()) {
if (Subtarget.isR9Reserved()) {
I = ARM_GPR_AO_4 + (sizeof(ARM_GPR_AO_4)/sizeof(unsigned));
} else {
@ -182,6 +174,40 @@ def GPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6,
}];
}
// Thumb registers are R0-R7 normally. Some instructions can still use
// the general GPR register class above (MOV, e.g.)
def tGPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6, R7]> {
let MethodProtos = [{
iterator allocation_order_begin(const MachineFunction &MF) const;
iterator allocation_order_end(const MachineFunction &MF) const;
}];
// FIXME: We are reserving r3 in Thumb mode in case the PEI needs to use it
// to generate large stack offset. Make it available once we have register
// scavenging.
let MethodBodies = [{
static const unsigned THUMB_tGPR_AO[] = {
ARM::R2, ARM::R1, ARM::R0,
ARM::R4, ARM::R5, ARM::R6, ARM::R7 };
// FP is R7, only low registers available.
tGPRClass::iterator
tGPRClass::allocation_order_begin(const MachineFunction &MF) const {
return THUMB_tGPR_AO;
}
tGPRClass::iterator
tGPRClass::allocation_order_end(const MachineFunction &MF) const {
const TargetMachine &TM = MF.getTarget();
const TargetRegisterInfo *RI = TM.getRegisterInfo();
const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
tGPRClass::iterator I =
THUMB_tGPR_AO + (sizeof(THUMB_tGPR_AO)/sizeof(unsigned));
// Mac OS X requires FP not to be clobbered for backtracing purpose.
return (Subtarget.isTargetDarwin() || RI->hasFP(MF)) ? I-1 : I;
}
}];
}
def SPR : RegisterClass<"ARM", [f32], 32, [S0, S1, S2, S3, S4, S5, S6, S7, S8,
S9, S10, S11, S12, S13, S14, S15, S16, S17, S18, S19, S20, S21, S22,
S23, S24, S25, S26, S27, S28, S29, S30, S31]>;