mirror of
https://github.com/RPCSX/llvm.git
synced 2024-11-29 14:40:25 +00:00
Use common code for both ARM and Thumb-2 instruction and register info.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@75067 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
8529d28ee3
commit
f1daf7d8ab
@ -215,8 +215,11 @@ static void EmitLiveInCopy(MachineBasicBlock *MBB,
|
||||
--Pos;
|
||||
}
|
||||
|
||||
TII.copyRegToReg(*MBB, Pos, VirtReg, PhysReg, RC, RC);
|
||||
CopyRegMap.insert(std::make_pair(prior(Pos), VirtReg));
|
||||
bool Emitted = TII.copyRegToReg(*MBB, Pos, VirtReg, PhysReg, RC, RC);
|
||||
assert(Emitted && "Unable to issue a live-in copy instruction!\n");
|
||||
(void) Emitted;
|
||||
|
||||
CopyRegMap.insert(std::make_pair(prior(Pos), VirtReg));
|
||||
if (Coalesced) {
|
||||
if (&*InsertPos == UseMI) ++InsertPos;
|
||||
MBB->erase(UseMI);
|
||||
@ -247,8 +250,10 @@ static void EmitLiveInCopies(MachineBasicBlock *EntryMBB,
|
||||
E = MRI.livein_end(); LI != E; ++LI)
|
||||
if (LI->second) {
|
||||
const TargetRegisterClass *RC = MRI.getRegClass(LI->second);
|
||||
TII.copyRegToReg(*EntryMBB, EntryMBB->begin(),
|
||||
LI->second, LI->first, RC, RC);
|
||||
bool Emitted = TII.copyRegToReg(*EntryMBB, EntryMBB->begin(),
|
||||
LI->second, LI->first, RC, RC);
|
||||
assert(Emitted && "Unable to issue a live-in copy instruction!\n");
|
||||
(void) Emitted;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -227,7 +227,7 @@ ARMBaseRegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
|
||||
0
|
||||
};
|
||||
|
||||
if (STI.isThumb()) {
|
||||
if (STI.isThumb1Only()) {
|
||||
return STI.isTargetDarwin()
|
||||
? DarwinThumbCalleeSavedRegClasses : ThumbCalleeSavedRegClasses;
|
||||
}
|
||||
@ -565,7 +565,7 @@ ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
|
||||
}
|
||||
|
||||
bool ForceLRSpill = false;
|
||||
if (!LRSpilled && AFI->isThumbFunction()) {
|
||||
if (!LRSpilled && AFI->isThumb1OnlyFunction()) {
|
||||
unsigned FnSize = TII.GetFunctionSizeInBytes(MF);
|
||||
// Force LR to be spilled if the Thumb function size is > 2048. This enables
|
||||
// use of BL to implement far jump. If it turns out that it's not needed
|
||||
@ -607,8 +607,8 @@ ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
|
||||
if (CS1Spilled && !UnspilledCS1GPRs.empty()) {
|
||||
for (unsigned i = 0, e = UnspilledCS1GPRs.size(); i != e; ++i) {
|
||||
unsigned Reg = UnspilledCS1GPRs[i];
|
||||
// Don't spiil high register if the function is thumb
|
||||
if (!AFI->isThumbFunction() ||
|
||||
// Don't spill high register if the function is thumb1
|
||||
if (!AFI->isThumb1OnlyFunction() ||
|
||||
isARMLowRegister(Reg) || Reg == ARM::LR) {
|
||||
MF.getRegInfo().setPhysRegUsed(Reg);
|
||||
AFI->setCSRegisterIsSpilled(Reg);
|
||||
@ -618,7 +618,7 @@ ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
|
||||
}
|
||||
}
|
||||
} else if (!UnspilledCS2GPRs.empty() &&
|
||||
!AFI->isThumbFunction()) {
|
||||
!AFI->isThumb1OnlyFunction()) {
|
||||
unsigned Reg = UnspilledCS2GPRs.front();
|
||||
MF.getRegInfo().setPhysRegUsed(Reg);
|
||||
AFI->setCSRegisterIsSpilled(Reg);
|
||||
@ -631,7 +631,7 @@ ARMBaseRegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
|
||||
// to materialize a stack offset. If so, either spill one additional
|
||||
// callee-saved register or reserve a special spill slot to facilitate
|
||||
// register scavenging.
|
||||
if (RS && !ExtraCSSpill && !AFI->isThumbFunction()) {
|
||||
if (RS && !ExtraCSSpill && !AFI->isThumb1OnlyFunction()) {
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
unsigned Size = estimateStackSize(MF, MFI);
|
||||
unsigned Limit = (1 << 12) - 1;
|
||||
@ -730,7 +730,7 @@ unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
|
||||
return ARM::R0;
|
||||
case ARM::R3:
|
||||
// FIXME!
|
||||
return STI.isThumb() ? 0 : ARM::R2;
|
||||
return STI.isThumb1Only() ? 0 : ARM::R2;
|
||||
case ARM::R5:
|
||||
return ARM::R4;
|
||||
case ARM::R7:
|
||||
@ -804,7 +804,7 @@ unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
|
||||
return ARM::R1;
|
||||
case ARM::R2:
|
||||
// FIXME!
|
||||
return STI.isThumb() ? 0 : ARM::R3;
|
||||
return STI.isThumb1Only() ? 0 : ARM::R3;
|
||||
case ARM::R4:
|
||||
return ARM::R5;
|
||||
case ARM::R6:
|
||||
@ -1003,7 +1003,7 @@ static
|
||||
unsigned findScratchRegister(RegScavenger *RS, const TargetRegisterClass *RC,
|
||||
ARMFunctionInfo *AFI) {
|
||||
unsigned Reg = RS ? RS->FindUnusedReg(RC, true) : (unsigned) ARM::R12;
|
||||
assert (!AFI->isThumbFunction());
|
||||
assert (!AFI->isThumb1OnlyFunction());
|
||||
if (Reg == 0)
|
||||
// Try a already spilled CS register.
|
||||
Reg = RS->FindUnusedReg(RC, AFI->getSpilledCSRegisters());
|
||||
|
@ -124,6 +124,7 @@ namespace {
|
||||
const TargetInstrInfo *TII;
|
||||
ARMFunctionInfo *AFI;
|
||||
bool isThumb;
|
||||
bool isThumb1Only;
|
||||
bool isThumb2;
|
||||
public:
|
||||
static char ID;
|
||||
@ -214,6 +215,7 @@ bool ARMConstantIslands::runOnMachineFunction(MachineFunction &Fn) {
|
||||
TII = Fn.getTarget().getInstrInfo();
|
||||
AFI = Fn.getInfo<ARMFunctionInfo>();
|
||||
isThumb = AFI->isThumbFunction();
|
||||
isThumb1Only = AFI->isThumb1OnlyFunction();
|
||||
isThumb2 = AFI->isThumb2Function();
|
||||
|
||||
HasFarJump = false;
|
||||
|
@ -870,7 +870,7 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
||||
TLI.getPointerTy());
|
||||
|
||||
SDNode *ResNode;
|
||||
if (Subtarget->isThumb())
|
||||
if (Subtarget->isThumb1Only())
|
||||
ResNode = CurDAG->getTargetNode(ARM::tLDRcp, dl, MVT::i32, MVT::Other,
|
||||
CPIdx, CurDAG->getEntryNode());
|
||||
else {
|
||||
@ -896,18 +896,19 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
||||
// Selects to ADDri FI, 0 which in turn will become ADDri SP, imm.
|
||||
int FI = cast<FrameIndexSDNode>(N)->getIndex();
|
||||
SDValue TFI = CurDAG->getTargetFrameIndex(FI, TLI.getPointerTy());
|
||||
if (Subtarget->isThumb()) {
|
||||
if (Subtarget->isThumb1Only()) {
|
||||
return CurDAG->SelectNodeTo(N, ARM::tADDrSPi, MVT::i32, TFI,
|
||||
CurDAG->getTargetConstant(0, MVT::i32));
|
||||
} else {
|
||||
SDValue Ops[] = { TFI, CurDAG->getTargetConstant(0, MVT::i32),
|
||||
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
|
||||
CurDAG->getRegister(0, MVT::i32) };
|
||||
return CurDAG->SelectNodeTo(N, ARM::ADDri, MVT::i32, Ops, 5);
|
||||
return CurDAG->SelectNodeTo(N, (Subtarget->hasThumb2()) ? ARM::t2ADDri : ARM::ADDri,
|
||||
MVT::i32, Ops, 5);
|
||||
}
|
||||
}
|
||||
case ISD::ADD: {
|
||||
if (!Subtarget->isThumb())
|
||||
if (!Subtarget->isThumb1Only())
|
||||
break;
|
||||
// Select add sp, c to tADDhirr.
|
||||
SDValue N0 = Op.getOperand(0);
|
||||
@ -938,7 +939,8 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
||||
CurDAG->getTargetConstant(ShImm, MVT::i32),
|
||||
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
|
||||
CurDAG->getRegister(0, MVT::i32) };
|
||||
return CurDAG->SelectNodeTo(N, ARM::ADDrs, MVT::i32, Ops, 7);
|
||||
return CurDAG->SelectNodeTo(N, (Subtarget->hasThumb2()) ?
|
||||
ARM::t2ADDrs : ARM::ADDrs, MVT::i32, Ops, 7);
|
||||
}
|
||||
if (isPowerOf2_32(RHSV+1)) { // 2^n-1?
|
||||
SDValue V = Op.getOperand(0);
|
||||
@ -947,7 +949,8 @@ SDNode *ARMDAGToDAGISel::Select(SDValue Op) {
|
||||
CurDAG->getTargetConstant(ShImm, MVT::i32),
|
||||
getAL(CurDAG), CurDAG->getRegister(0, MVT::i32),
|
||||
CurDAG->getRegister(0, MVT::i32) };
|
||||
return CurDAG->SelectNodeTo(N, ARM::RSBrs, MVT::i32, Ops, 7);
|
||||
return CurDAG->SelectNodeTo(N, (Subtarget->hasThumb2()) ?
|
||||
ARM::t2RSBrs : ARM::RSBrs, MVT::i32, Ops, 7);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -189,11 +189,11 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
||||
setLibcallName(RTLIB::SRL_I128, 0);
|
||||
setLibcallName(RTLIB::SRA_I128, 0);
|
||||
|
||||
if (Subtarget->isThumb())
|
||||
if (Subtarget->isThumb1Only())
|
||||
addRegisterClass(MVT::i32, ARM::tGPRRegisterClass);
|
||||
else
|
||||
addRegisterClass(MVT::i32, ARM::GPRRegisterClass);
|
||||
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb()) {
|
||||
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
|
||||
addRegisterClass(MVT::f32, ARM::SPRRegisterClass);
|
||||
addRegisterClass(MVT::f64, ARM::DPRRegisterClass);
|
||||
|
||||
@ -256,7 +256,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
||||
} else {
|
||||
setOperationAction(ISD::MUL, MVT::i64, Expand);
|
||||
setOperationAction(ISD::MULHU, MVT::i32, Expand);
|
||||
if (!Subtarget->isThumb() && !Subtarget->hasV6Ops())
|
||||
if (!Subtarget->isThumb1Only() && !Subtarget->hasV6Ops())
|
||||
setOperationAction(ISD::MULHS, MVT::i32, Expand);
|
||||
}
|
||||
setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
|
||||
@ -310,7 +310,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
||||
}
|
||||
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
|
||||
|
||||
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb())
|
||||
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only())
|
||||
// Turn f64->i64 into FMRRD, i64 -> f64 to FMDRR iff target supports vfp2.
|
||||
setOperationAction(ISD::BIT_CONVERT, MVT::i64, Custom);
|
||||
|
||||
@ -340,7 +340,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
||||
setOperationAction(ISD::FCOS, MVT::f64, Expand);
|
||||
setOperationAction(ISD::FREM, MVT::f64, Expand);
|
||||
setOperationAction(ISD::FREM, MVT::f32, Expand);
|
||||
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb()) {
|
||||
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
|
||||
setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
|
||||
setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
|
||||
}
|
||||
@ -348,7 +348,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM)
|
||||
setOperationAction(ISD::FPOW, MVT::f32, Expand);
|
||||
|
||||
// int <-> fp are custom expanded into bit_convert + ARMISD ops.
|
||||
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb()) {
|
||||
if (!UseSoftFloat && Subtarget->hasVFP2() && !Subtarget->isThumb1Only()) {
|
||||
setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
|
||||
setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
|
||||
setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
|
||||
@ -942,7 +942,7 @@ SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
|
||||
// ARM call to a local ARM function is predicable.
|
||||
isLocalARMFunc = !Subtarget->isThumb() && !isExt;
|
||||
// tBX takes a register source operand.
|
||||
if (isARMFunc && Subtarget->isThumb() && !Subtarget->hasV5TOps()) {
|
||||
if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
|
||||
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(GV, ARMPCLabelIndex,
|
||||
ARMCP::CPStub, 4);
|
||||
SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
|
||||
@ -961,7 +961,7 @@ SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
|
||||
isARMFunc = !Subtarget->isThumb() || isStub;
|
||||
// tBX takes a register source operand.
|
||||
const char *Sym = S->getSymbol();
|
||||
if (isARMFunc && Subtarget->isThumb() && !Subtarget->hasV5TOps()) {
|
||||
if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
|
||||
ARMConstantPoolValue *CPV = new ARMConstantPoolValue(Sym, ARMPCLabelIndex,
|
||||
ARMCP::CPStub, 4);
|
||||
SDValue CPAddr = DAG.getTargetConstantPool(CPV, getPointerTy(), 4);
|
||||
@ -977,7 +977,7 @@ SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
|
||||
|
||||
// FIXME: handle tail calls differently.
|
||||
unsigned CallOpc;
|
||||
if (Subtarget->isThumb()) {
|
||||
if (Subtarget->isThumb1Only()) {
|
||||
if (!Subtarget->hasV5TOps() && (!isDirect || isARMFunc))
|
||||
CallOpc = ARMISD::CALL_NOLINK;
|
||||
else
|
||||
@ -987,7 +987,7 @@ SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
|
||||
? (isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL)
|
||||
: ARMISD::CALL_NOLINK;
|
||||
}
|
||||
if (CallOpc == ARMISD::CALL_NOLINK && !Subtarget->isThumb()) {
|
||||
if (CallOpc == ARMISD::CALL_NOLINK && !Subtarget->isThumb1Only()) {
|
||||
// implicit def LR - LR mustn't be allocated as GRP:$dst of CALL_NOLINK
|
||||
Chain = DAG.getCopyToReg(Chain, dl, ARM::LR, DAG.getUNDEF(MVT::i32),InFlag);
|
||||
InFlag = Chain.getValue(1);
|
||||
@ -1345,7 +1345,7 @@ ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
|
||||
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
||||
|
||||
TargetRegisterClass *RC;
|
||||
if (AFI->isThumbFunction())
|
||||
if (AFI->isThumb1OnlyFunction())
|
||||
RC = ARM::tGPRRegisterClass;
|
||||
else
|
||||
RC = ARM::GPRRegisterClass;
|
||||
@ -1423,7 +1423,7 @@ ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
|
||||
RC = ARM::SPRRegisterClass;
|
||||
else if (FloatABIType == FloatABI::Hard && RegVT == MVT::f64)
|
||||
RC = ARM::DPRRegisterClass;
|
||||
else if (AFI->isThumbFunction())
|
||||
else if (AFI->isThumb1OnlyFunction())
|
||||
RC = ARM::tGPRRegisterClass;
|
||||
else
|
||||
RC = ARM::GPRRegisterClass;
|
||||
@ -1501,7 +1501,7 @@ ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
|
||||
SmallVector<SDValue, 4> MemOps;
|
||||
for (; NumGPRs < 4; ++NumGPRs) {
|
||||
TargetRegisterClass *RC;
|
||||
if (AFI->isThumbFunction())
|
||||
if (AFI->isThumb1OnlyFunction())
|
||||
RC = ARM::tGPRRegisterClass;
|
||||
else
|
||||
RC = ARM::GPRRegisterClass;
|
||||
@ -1544,46 +1544,46 @@ static bool isFloatingPointZero(SDValue Op) {
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool isLegalCmpImmediate(unsigned C, bool isThumb) {
|
||||
return ( isThumb && (C & ~255U) == 0) ||
|
||||
(!isThumb && ARM_AM::getSOImmVal(C) != -1);
|
||||
static bool isLegalCmpImmediate(unsigned C, bool isThumb1Only) {
|
||||
return ( isThumb1Only && (C & ~255U) == 0) ||
|
||||
(!isThumb1Only && ARM_AM::getSOImmVal(C) != -1);
|
||||
}
|
||||
|
||||
/// Returns appropriate ARM CMP (cmp) and corresponding condition code for
|
||||
/// the given operands.
|
||||
static SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
|
||||
SDValue &ARMCC, SelectionDAG &DAG, bool isThumb,
|
||||
SDValue &ARMCC, SelectionDAG &DAG, bool isThumb1Only,
|
||||
DebugLoc dl) {
|
||||
if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
|
||||
unsigned C = RHSC->getZExtValue();
|
||||
if (!isLegalCmpImmediate(C, isThumb)) {
|
||||
if (!isLegalCmpImmediate(C, isThumb1Only)) {
|
||||
// Constant does not fit, try adjusting it by one?
|
||||
switch (CC) {
|
||||
default: break;
|
||||
case ISD::SETLT:
|
||||
case ISD::SETGE:
|
||||
if (isLegalCmpImmediate(C-1, isThumb)) {
|
||||
if (isLegalCmpImmediate(C-1, isThumb1Only)) {
|
||||
CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
|
||||
RHS = DAG.getConstant(C-1, MVT::i32);
|
||||
}
|
||||
break;
|
||||
case ISD::SETULT:
|
||||
case ISD::SETUGE:
|
||||
if (C > 0 && isLegalCmpImmediate(C-1, isThumb)) {
|
||||
if (C > 0 && isLegalCmpImmediate(C-1, isThumb1Only)) {
|
||||
CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
|
||||
RHS = DAG.getConstant(C-1, MVT::i32);
|
||||
}
|
||||
break;
|
||||
case ISD::SETLE:
|
||||
case ISD::SETGT:
|
||||
if (isLegalCmpImmediate(C+1, isThumb)) {
|
||||
if (isLegalCmpImmediate(C+1, isThumb1Only)) {
|
||||
CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
|
||||
RHS = DAG.getConstant(C+1, MVT::i32);
|
||||
}
|
||||
break;
|
||||
case ISD::SETULE:
|
||||
case ISD::SETUGT:
|
||||
if (C < 0xffffffff && isLegalCmpImmediate(C+1, isThumb)) {
|
||||
if (C < 0xffffffff && isLegalCmpImmediate(C+1, isThumb1Only)) {
|
||||
CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
|
||||
RHS = DAG.getConstant(C+1, MVT::i32);
|
||||
}
|
||||
@ -1632,7 +1632,7 @@ static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
|
||||
if (LHS.getValueType() == MVT::i32) {
|
||||
SDValue ARMCC;
|
||||
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
|
||||
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb(), dl);
|
||||
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb1Only(), dl);
|
||||
return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMCC, CCR,Cmp);
|
||||
}
|
||||
|
||||
@ -1667,7 +1667,7 @@ static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
|
||||
if (LHS.getValueType() == MVT::i32) {
|
||||
SDValue ARMCC;
|
||||
SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
|
||||
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb(), dl);
|
||||
SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMCC, DAG, ST->isThumb1Only(), dl);
|
||||
return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
|
||||
Chain, Dest, ARMCC, CCR,Cmp);
|
||||
}
|
||||
@ -1970,7 +1970,7 @@ static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
|
||||
return SDValue();
|
||||
|
||||
// If we are in thumb mode, we don't have RRX.
|
||||
if (ST->isThumb()) return SDValue();
|
||||
if (ST->isThumb1Only()) return SDValue();
|
||||
|
||||
// Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
|
||||
SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
|
||||
@ -2810,7 +2810,7 @@ static bool isLegalAddressImmediate(int64_t V, MVT VT,
|
||||
if (!VT.isSimple())
|
||||
return false;
|
||||
|
||||
if (Subtarget->isThumb()) {
|
||||
if (Subtarget->isThumb()) { // FIXME for thumb2
|
||||
if (V < 0)
|
||||
return false;
|
||||
|
||||
@ -2876,7 +2876,7 @@ bool ARMTargetLowering::isLegalAddressingMode(const AddrMode &AM,
|
||||
case 0: // no scale reg, must be "r+i" or "r", or "i".
|
||||
break;
|
||||
case 1:
|
||||
if (Subtarget->isThumb())
|
||||
if (Subtarget->isThumb()) // FIXME for thumb2
|
||||
return false;
|
||||
// FALL THROUGH.
|
||||
default:
|
||||
@ -3130,7 +3130,7 @@ ARMTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
|
||||
// GCC RS6000 Constraint Letters
|
||||
switch (Constraint[0]) {
|
||||
case 'l':
|
||||
if (Subtarget->isThumb())
|
||||
if (Subtarget->isThumb1Only())
|
||||
return std::make_pair(0U, ARM::tGPRRegisterClass);
|
||||
else
|
||||
return std::make_pair(0U, ARM::GPRRegisterClass);
|
||||
@ -3211,10 +3211,16 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
||||
|
||||
switch (Constraint) {
|
||||
case 'I':
|
||||
if (Subtarget->isThumb()) {
|
||||
// This must be a constant between 0 and 255, for ADD immediates.
|
||||
if (Subtarget->isThumb1Only()) {
|
||||
// This must be a constant between 0 and 255, for ADD
|
||||
// immediates.
|
||||
if (CVal >= 0 && CVal <= 255)
|
||||
break;
|
||||
} else if (Subtarget->isThumb2()) {
|
||||
// A constant that can be used as an immediate value in a
|
||||
// data-processing instruction.
|
||||
if (ARM_AM::getT2SOImmVal(CVal) != -1)
|
||||
break;
|
||||
} else {
|
||||
// A constant that can be used as an immediate value in a
|
||||
// data-processing instruction.
|
||||
@ -3224,7 +3230,7 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
||||
return;
|
||||
|
||||
case 'J':
|
||||
if (Subtarget->isThumb()) {
|
||||
if (Subtarget->isThumb()) { // FIXME thumb2
|
||||
// This must be a constant between -255 and -1, for negated ADD
|
||||
// immediates. This can be used in GCC with an "n" modifier that
|
||||
// prints the negated value, for use with SUB instructions. It is
|
||||
@ -3241,13 +3247,21 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
||||
return;
|
||||
|
||||
case 'K':
|
||||
if (Subtarget->isThumb()) {
|
||||
if (Subtarget->isThumb1Only()) {
|
||||
// A 32-bit value where only one byte has a nonzero value. Exclude
|
||||
// zero to match GCC. This constraint is used by GCC internally for
|
||||
// constants that can be loaded with a move/shift combination.
|
||||
// It is not useful otherwise but is implemented for compatibility.
|
||||
if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
|
||||
break;
|
||||
} else if (Subtarget->isThumb2()) {
|
||||
// A constant whose bitwise inverse can be used as an immediate
|
||||
// value in a data-processing instruction. This can be used in GCC
|
||||
// with a "B" modifier that prints the inverted value, for use with
|
||||
// BIC and MVN instructions. It is not useful otherwise but is
|
||||
// implemented for compatibility.
|
||||
if (ARM_AM::getT2SOImmVal(~CVal) != -1)
|
||||
break;
|
||||
} else {
|
||||
// A constant whose bitwise inverse can be used as an immediate
|
||||
// value in a data-processing instruction. This can be used in GCC
|
||||
@ -3260,11 +3274,19 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
||||
return;
|
||||
|
||||
case 'L':
|
||||
if (Subtarget->isThumb()) {
|
||||
if (Subtarget->isThumb1Only()) {
|
||||
// This must be a constant between -7 and 7,
|
||||
// for 3-operand ADD/SUB immediate instructions.
|
||||
if (CVal >= -7 && CVal < 7)
|
||||
break;
|
||||
} else if (Subtarget->isThumb2()) {
|
||||
// A constant whose negation can be used as an immediate value in a
|
||||
// data-processing instruction. This can be used in GCC with an "n"
|
||||
// modifier that prints the negated value, for use with SUB
|
||||
// instructions. It is not useful otherwise but is implemented for
|
||||
// compatibility.
|
||||
if (ARM_AM::getT2SOImmVal(-CVal) != -1)
|
||||
break;
|
||||
} else {
|
||||
// A constant whose negation can be used as an immediate value in a
|
||||
// data-processing instruction. This can be used in GCC with an "n"
|
||||
@ -3277,7 +3299,7 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
||||
return;
|
||||
|
||||
case 'M':
|
||||
if (Subtarget->isThumb()) {
|
||||
if (Subtarget->isThumb()) { // FIXME thumb2
|
||||
// This must be a multiple of 4 between 0 and 1020, for
|
||||
// ADD sp + immediate.
|
||||
if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
|
||||
@ -3292,7 +3314,7 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
||||
return;
|
||||
|
||||
case 'N':
|
||||
if (Subtarget->isThumb()) {
|
||||
if (Subtarget->isThumb()) { // FIXME thumb2
|
||||
// This must be a constant between 0 and 31, for shift amounts.
|
||||
if (CVal >= 0 && CVal <= 31)
|
||||
break;
|
||||
@ -3300,7 +3322,7 @@ void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
||||
return;
|
||||
|
||||
case 'O':
|
||||
if (Subtarget->isThumb()) {
|
||||
if (Subtarget->isThumb()) { // FIXME thumb2
|
||||
// This must be a multiple of 4 between -508 and 508, for
|
||||
// ADD/SUB sp = sp + immediate.
|
||||
if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
|
||||
|
@ -119,12 +119,12 @@ let Defs = [SP], Uses = [SP] in {
|
||||
def tADJCALLSTACKUP :
|
||||
PseudoInst<(outs), (ins i32imm:$amt1, i32imm:$amt2),
|
||||
"@ tADJCALLSTACKUP $amt1",
|
||||
[(ARMcallseq_end imm:$amt1, imm:$amt2)]>, Requires<[IsThumb]>;
|
||||
[(ARMcallseq_end imm:$amt1, imm:$amt2)]>, Requires<[IsThumb1Only]>;
|
||||
|
||||
def tADJCALLSTACKDOWN :
|
||||
PseudoInst<(outs), (ins i32imm:$amt),
|
||||
"@ tADJCALLSTACKDOWN $amt",
|
||||
[(ARMcallseq_start imm:$amt)]>, Requires<[IsThumb]>;
|
||||
[(ARMcallseq_start imm:$amt)]>, Requires<[IsThumb1Only]>;
|
||||
}
|
||||
|
||||
let isNotDuplicable = 1 in
|
||||
@ -155,7 +155,7 @@ def tADDspi : T1It<(outs GPR:$dst), (ins GPR:$lhs, i32imm:$rhs),
|
||||
//
|
||||
|
||||
let isReturn = 1, isTerminator = 1 in {
|
||||
def tBX_RET : TI<(outs), (ins), "bx lr", [(ARMretflag)]>;
|
||||
def tBX_RET : T1I<(outs), (ins), "bx lr", [(ARMretflag)]>;
|
||||
// Alternative return instruction used by vararg functions.
|
||||
def tBX_RET_vararg : T1I<(outs), (ins tGPR:$target), "bx $target", []>;
|
||||
}
|
||||
@ -471,7 +471,7 @@ def tORR : T1It<(outs tGPR:$dst), (ins tGPR:$lhs, tGPR:$rhs),
|
||||
def tREV : T1I<(outs tGPR:$dst), (ins tGPR:$src),
|
||||
"rev $dst, $src",
|
||||
[(set tGPR:$dst, (bswap tGPR:$src))]>,
|
||||
Requires<[IsThumb, HasV6]>;
|
||||
Requires<[IsThumb1Only, HasV6]>;
|
||||
|
||||
def tREV16 : T1I<(outs tGPR:$dst), (ins tGPR:$src),
|
||||
"rev16 $dst, $src",
|
||||
@ -480,7 +480,7 @@ def tREV16 : T1I<(outs tGPR:$dst), (ins tGPR:$src),
|
||||
(or (and (shl tGPR:$src, (i32 8)), 0xFF00),
|
||||
(or (and (srl tGPR:$src, (i32 8)), 0xFF0000),
|
||||
(and (shl tGPR:$src, (i32 8)), 0xFF000000)))))]>,
|
||||
Requires<[IsThumb, HasV6]>;
|
||||
Requires<[IsThumb1Only, HasV6]>;
|
||||
|
||||
def tREVSH : T1I<(outs tGPR:$dst), (ins tGPR:$src),
|
||||
"revsh $dst, $src",
|
||||
@ -488,7 +488,7 @@ def tREVSH : T1I<(outs tGPR:$dst), (ins tGPR:$src),
|
||||
(sext_inreg
|
||||
(or (srl (and tGPR:$src, 0xFFFF), (i32 8)),
|
||||
(shl tGPR:$src, (i32 8))), i16))]>,
|
||||
Requires<[IsThumb, HasV6]>;
|
||||
Requires<[IsThumb1Only, HasV6]>;
|
||||
|
||||
// rotate right register
|
||||
let Defs = [CPSR] in
|
||||
@ -540,13 +540,13 @@ def tSUBspi : T1It<(outs tGPR:$dst), (ins tGPR:$lhs, i32imm:$rhs),
|
||||
def tSXTB : T1I<(outs tGPR:$dst), (ins tGPR:$src),
|
||||
"sxtb $dst, $src",
|
||||
[(set tGPR:$dst, (sext_inreg tGPR:$src, i8))]>,
|
||||
Requires<[IsThumb, HasV6]>;
|
||||
Requires<[IsThumb1Only, HasV6]>;
|
||||
|
||||
// sign-extend short
|
||||
def tSXTH : T1I<(outs tGPR:$dst), (ins tGPR:$src),
|
||||
"sxth $dst, $src",
|
||||
[(set tGPR:$dst, (sext_inreg tGPR:$src, i16))]>,
|
||||
Requires<[IsThumb, HasV6]>;
|
||||
Requires<[IsThumb1Only, HasV6]>;
|
||||
|
||||
// test
|
||||
let isCommutable = 1, Defs = [CPSR] in
|
||||
@ -558,13 +558,13 @@ def tTST : T1I<(outs), (ins tGPR:$lhs, tGPR:$rhs),
|
||||
def tUXTB : T1I<(outs tGPR:$dst), (ins tGPR:$src),
|
||||
"uxtb $dst, $src",
|
||||
[(set tGPR:$dst, (and tGPR:$src, 0xFF))]>,
|
||||
Requires<[IsThumb, HasV6]>;
|
||||
Requires<[IsThumb1Only, HasV6]>;
|
||||
|
||||
// zero-extend short
|
||||
def tUXTH : T1I<(outs tGPR:$dst), (ins tGPR:$src),
|
||||
"uxth $dst, $src",
|
||||
[(set tGPR:$dst, (and tGPR:$src, 0xFFFF))]>,
|
||||
Requires<[IsThumb, HasV6]>;
|
||||
Requires<[IsThumb1Only, HasV6]>;
|
||||
|
||||
|
||||
// Conditional move tMOVCCr - Used to implement the Thumb SELECT_CC DAG operation.
|
||||
|
@ -432,9 +432,9 @@ multiclass T2I_bin_rrot<string opc, PatFrag opnode> {
|
||||
//
|
||||
|
||||
let isNotDuplicable = 1 in
|
||||
def t2PICADD : T2XI<(outs tGPR:$dst), (ins tGPR:$lhs, pclabel:$cp),
|
||||
def t2PICADD : T2XI<(outs GPR:$dst), (ins GPR:$lhs, pclabel:$cp),
|
||||
"$cp:\n\tadd $dst, pc",
|
||||
[(set tGPR:$dst, (ARMpic_add tGPR:$lhs, imm:$cp))]>;
|
||||
[(set GPR:$dst, (ARMpic_add GPR:$lhs, imm:$cp))]>;
|
||||
|
||||
|
||||
// LEApcrel - Load a pc-relative address into a register without offending the
|
||||
|
@ -119,6 +119,7 @@ public:
|
||||
JumpTableUId(0), ConstPoolEntryUId(0) {}
|
||||
|
||||
bool isThumbFunction() const { return isThumb; }
|
||||
bool isThumb1OnlyFunction() const { return isThumb && !hasThumb2; }
|
||||
bool isThumb2Function() const { return isThumb && hasThumb2; }
|
||||
|
||||
unsigned getAlign() const { return Align; }
|
||||
|
@ -224,7 +224,7 @@ unsigned ARMTargetAsmInfo<BaseTAI>::getInlineAsmLength(const char *s) const {
|
||||
} else if (inTextSection) {
|
||||
// An instruction
|
||||
atInsnStart = false;
|
||||
if (Subtarget->isThumb()) {
|
||||
if (Subtarget->isThumb()) { // FIXME thumb2
|
||||
// BL and BLX <non-reg> are 4 bytes, all others 2.
|
||||
if (strncmp(Str, "blx", strlen("blx"))==0) {
|
||||
const char* p = Str+3;
|
||||
|
@ -71,7 +71,7 @@ Thumb2InstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
|
||||
|
||||
// FIXME
|
||||
switch (MBB.back().getOpcode()) {
|
||||
//case ARM::t2BX_RET:
|
||||
case ARM::t2BX_RET:
|
||||
// case ARM::LDM_RET:
|
||||
case ARM::t2B: // Uncond branch.
|
||||
case ARM::t2BR_JTr: // Jumptable branch.
|
||||
@ -90,285 +90,3 @@ Thumb2InstrInfo::BlockHasNoFallThrough(const MachineBasicBlock &MBB) const {
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool Thumb2InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
unsigned DestReg, unsigned SrcReg,
|
||||
const TargetRegisterClass *DestRC,
|
||||
const TargetRegisterClass *SrcRC) const {
|
||||
DebugLoc DL = DebugLoc::getUnknownLoc();
|
||||
if (I != MBB.end()) DL = I->getDebugLoc();
|
||||
|
||||
if (DestRC == ARM::GPRRegisterClass) {
|
||||
if (SrcRC == ARM::GPRRegisterClass) {
|
||||
return ARMBaseInstrInfo::copyRegToReg(MBB, I, DestReg, SrcReg, DestRC, SrcRC);
|
||||
} else if (SrcRC == ARM::tGPRRegisterClass) {
|
||||
BuildMI(MBB, I, DL, get(ARM::tMOVlor2hir), DestReg).addReg(SrcReg);
|
||||
return true;
|
||||
}
|
||||
} else if (DestRC == ARM::tGPRRegisterClass) {
|
||||
if (SrcRC == ARM::GPRRegisterClass) {
|
||||
BuildMI(MBB, I, DL, get(ARM::tMOVhir2lor), DestReg).addReg(SrcReg);
|
||||
return true;
|
||||
} else if (SrcRC == ARM::tGPRRegisterClass) {
|
||||
BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg).addReg(SrcReg);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
bool Thumb2InstrInfo::isMoveInstr(const MachineInstr &MI,
|
||||
unsigned &SrcReg, unsigned &DstReg,
|
||||
unsigned& SrcSubIdx, unsigned& DstSubIdx) const {
|
||||
SrcSubIdx = DstSubIdx = 0; // No sub-registers.
|
||||
|
||||
unsigned oc = MI.getOpcode();
|
||||
switch (oc) {
|
||||
default:
|
||||
return false;
|
||||
case ARM::tMOVr:
|
||||
case ARM::tMOVhir2lor:
|
||||
case ARM::tMOVlor2hir:
|
||||
case ARM::tMOVhir2hir:
|
||||
assert(MI.getDesc().getNumOperands() >= 2 &&
|
||||
MI.getOperand(0).isReg() &&
|
||||
MI.getOperand(1).isReg() &&
|
||||
"Invalid Thumb MOV instruction");
|
||||
SrcReg = MI.getOperand(1).getReg();
|
||||
DstReg = MI.getOperand(0).getReg();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
unsigned Thumb2InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const {
|
||||
switch (MI->getOpcode()) {
|
||||
default: break;
|
||||
case ARM::tRestore:
|
||||
if (MI->getOperand(1).isFI() &&
|
||||
MI->getOperand(2).isImm() &&
|
||||
MI->getOperand(2).getImm() == 0) {
|
||||
FrameIndex = MI->getOperand(1).getIndex();
|
||||
return MI->getOperand(0).getReg();
|
||||
}
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned Thumb2InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const {
|
||||
switch (MI->getOpcode()) {
|
||||
default: break;
|
||||
case ARM::tSpill:
|
||||
if (MI->getOperand(1).isFI() &&
|
||||
MI->getOperand(2).isImm() &&
|
||||
MI->getOperand(2).getImm() == 0) {
|
||||
FrameIndex = MI->getOperand(1).getIndex();
|
||||
return MI->getOperand(0).getReg();
|
||||
}
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool Thumb2InstrInfo::
|
||||
canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const {
|
||||
if (Ops.size() != 1) return false;
|
||||
|
||||
unsigned OpNum = Ops[0];
|
||||
unsigned Opc = MI->getOpcode();
|
||||
switch (Opc) {
|
||||
default: break;
|
||||
case ARM::tMOVr:
|
||||
case ARM::tMOVlor2hir:
|
||||
case ARM::tMOVhir2lor:
|
||||
case ARM::tMOVhir2hir: {
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned SrcReg = MI->getOperand(1).getReg();
|
||||
if (RI.isPhysicalRegister(SrcReg) && !isARMLowRegister(SrcReg))
|
||||
// tSpill cannot take a high register operand.
|
||||
return false;
|
||||
} else { // move -> load
|
||||
unsigned DstReg = MI->getOperand(0).getReg();
|
||||
if (RI.isPhysicalRegister(DstReg) && !isARMLowRegister(DstReg))
|
||||
// tRestore cannot target a high register operand.
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void Thumb2InstrInfo::
|
||||
storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
||||
unsigned SrcReg, bool isKill, int FI,
|
||||
const TargetRegisterClass *RC) const {
|
||||
DebugLoc DL = DebugLoc::getUnknownLoc();
|
||||
if (I != MBB.end()) DL = I->getDebugLoc();
|
||||
|
||||
assert(RC == ARM::tGPRRegisterClass && "Unknown regclass!");
|
||||
|
||||
if (RC == ARM::tGPRRegisterClass) {
|
||||
BuildMI(MBB, I, DL, get(ARM::tSpill))
|
||||
.addReg(SrcReg, getKillRegState(isKill))
|
||||
.addFrameIndex(FI).addImm(0);
|
||||
}
|
||||
}
|
||||
|
||||
void Thumb2InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
|
||||
bool isKill,
|
||||
SmallVectorImpl<MachineOperand> &Addr,
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const{
|
||||
DebugLoc DL = DebugLoc::getUnknownLoc();
|
||||
unsigned Opc = 0;
|
||||
|
||||
assert(RC == ARM::GPRRegisterClass && "Unknown regclass!");
|
||||
if (RC == ARM::GPRRegisterClass) {
|
||||
Opc = Addr[0].isFI() ? ARM::tSpill : ARM::tSTR;
|
||||
}
|
||||
|
||||
MachineInstrBuilder MIB =
|
||||
BuildMI(MF, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill));
|
||||
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
|
||||
MIB.addOperand(Addr[i]);
|
||||
NewMIs.push_back(MIB);
|
||||
return;
|
||||
}
|
||||
|
||||
void Thumb2InstrInfo::
|
||||
loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
|
||||
unsigned DestReg, int FI,
|
||||
const TargetRegisterClass *RC) const {
|
||||
DebugLoc DL = DebugLoc::getUnknownLoc();
|
||||
if (I != MBB.end()) DL = I->getDebugLoc();
|
||||
|
||||
assert(RC == ARM::tGPRRegisterClass && "Unknown regclass!");
|
||||
|
||||
if (RC == ARM::tGPRRegisterClass) {
|
||||
BuildMI(MBB, I, DL, get(ARM::tRestore), DestReg)
|
||||
.addFrameIndex(FI).addImm(0);
|
||||
}
|
||||
}
|
||||
|
||||
void Thumb2InstrInfo::
|
||||
loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
|
||||
SmallVectorImpl<MachineOperand> &Addr,
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const {
|
||||
DebugLoc DL = DebugLoc::getUnknownLoc();
|
||||
unsigned Opc = 0;
|
||||
|
||||
if (RC == ARM::GPRRegisterClass) {
|
||||
Opc = Addr[0].isFI() ? ARM::tRestore : ARM::tLDR;
|
||||
}
|
||||
|
||||
MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
|
||||
for (unsigned i = 0, e = Addr.size(); i != e; ++i)
|
||||
MIB.addOperand(Addr[i]);
|
||||
NewMIs.push_back(MIB);
|
||||
return;
|
||||
}
|
||||
|
||||
bool Thumb2InstrInfo::
|
||||
spillCalleeSavedRegisters(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
const std::vector<CalleeSavedInfo> &CSI) const {
|
||||
if (CSI.empty())
|
||||
return false;
|
||||
|
||||
DebugLoc DL = DebugLoc::getUnknownLoc();
|
||||
if (MI != MBB.end()) DL = MI->getDebugLoc();
|
||||
|
||||
MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, get(ARM::tPUSH));
|
||||
for (unsigned i = CSI.size(); i != 0; --i) {
|
||||
unsigned Reg = CSI[i-1].getReg();
|
||||
// Add the callee-saved register as live-in. It's killed at the spill.
|
||||
MBB.addLiveIn(Reg);
|
||||
MIB.addReg(Reg, RegState::Kill);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool Thumb2InstrInfo::
|
||||
restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
const std::vector<CalleeSavedInfo> &CSI) const {
|
||||
MachineFunction &MF = *MBB.getParent();
|
||||
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
||||
if (CSI.empty())
|
||||
return false;
|
||||
|
||||
bool isVarArg = AFI->getVarArgsRegSaveSize() > 0;
|
||||
MachineInstr *PopMI = MF.CreateMachineInstr(get(ARM::tPOP),MI->getDebugLoc());
|
||||
for (unsigned i = CSI.size(); i != 0; --i) {
|
||||
unsigned Reg = CSI[i-1].getReg();
|
||||
if (Reg == ARM::LR) {
|
||||
// Special epilogue for vararg functions. See emitEpilogue
|
||||
if (isVarArg)
|
||||
continue;
|
||||
Reg = ARM::PC;
|
||||
PopMI->setDesc(get(ARM::tPOP_RET));
|
||||
MI = MBB.erase(MI);
|
||||
}
|
||||
PopMI->addOperand(MachineOperand::CreateReg(Reg, true));
|
||||
}
|
||||
|
||||
// It's illegal to emit pop instruction without operands.
|
||||
if (PopMI->getNumOperands() > 0)
|
||||
MBB.insert(MI, PopMI);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
MachineInstr *Thumb2InstrInfo::
|
||||
foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops, int FI) const {
|
||||
if (Ops.size() != 1) return NULL;
|
||||
|
||||
unsigned OpNum = Ops[0];
|
||||
unsigned Opc = MI->getOpcode();
|
||||
MachineInstr *NewMI = NULL;
|
||||
switch (Opc) {
|
||||
default: break;
|
||||
case ARM::tMOVr:
|
||||
case ARM::tMOVlor2hir:
|
||||
case ARM::tMOVhir2lor:
|
||||
case ARM::tMOVhir2hir: {
|
||||
if (OpNum == 0) { // move -> store
|
||||
unsigned SrcReg = MI->getOperand(1).getReg();
|
||||
bool isKill = MI->getOperand(1).isKill();
|
||||
if (RI.isPhysicalRegister(SrcReg) && !isARMLowRegister(SrcReg))
|
||||
// tSpill cannot take a high register operand.
|
||||
break;
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::tSpill))
|
||||
.addReg(SrcReg, getKillRegState(isKill))
|
||||
.addFrameIndex(FI).addImm(0);
|
||||
} else { // move -> load
|
||||
unsigned DstReg = MI->getOperand(0).getReg();
|
||||
if (RI.isPhysicalRegister(DstReg) && !isARMLowRegister(DstReg))
|
||||
// tRestore cannot target a high register operand.
|
||||
break;
|
||||
bool isDead = MI->getOperand(0).isDead();
|
||||
NewMI = BuildMI(MF, MI->getDebugLoc(), get(ARM::tRestore))
|
||||
.addReg(DstReg, RegState::Define | getDeadRegState(isDead))
|
||||
.addFrameIndex(FI).addImm(0);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return NewMI;
|
||||
}
|
||||
|
@ -42,67 +42,6 @@ public:
|
||||
/// always be able to get register info as well (through this method).
|
||||
///
|
||||
const Thumb2RegisterInfo &getRegisterInfo() const { return RI; }
|
||||
|
||||
bool copyRegToReg(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I,
|
||||
unsigned DestReg, unsigned SrcReg,
|
||||
const TargetRegisterClass *DestRC,
|
||||
const TargetRegisterClass *SrcRC) const;
|
||||
|
||||
|
||||
|
||||
|
||||
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
const std::vector<CalleeSavedInfo> &CSI) const;
|
||||
bool restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
const std::vector<CalleeSavedInfo> &CSI) const;
|
||||
|
||||
bool isMoveInstr(const MachineInstr &MI,
|
||||
unsigned &SrcReg, unsigned &DstReg,
|
||||
unsigned &SrcSubIdx, unsigned &DstSubIdx) const;
|
||||
unsigned isLoadFromStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const;
|
||||
unsigned isStoreToStackSlot(const MachineInstr *MI,
|
||||
int &FrameIndex) const;
|
||||
|
||||
void storeRegToStackSlot(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MBBI,
|
||||
unsigned SrcReg, bool isKill, int FrameIndex,
|
||||
const TargetRegisterClass *RC) const;
|
||||
|
||||
void storeRegToAddr(MachineFunction &MF, unsigned SrcReg, bool isKill,
|
||||
SmallVectorImpl<MachineOperand> &Addr,
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const;
|
||||
|
||||
void loadRegFromStackSlot(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MBBI,
|
||||
unsigned DestReg, int FrameIndex,
|
||||
const TargetRegisterClass *RC) const;
|
||||
|
||||
void loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
|
||||
SmallVectorImpl<MachineOperand> &Addr,
|
||||
const TargetRegisterClass *RC,
|
||||
SmallVectorImpl<MachineInstr*> &NewMIs) const;
|
||||
|
||||
bool canFoldMemoryOperand(const MachineInstr *MI,
|
||||
const SmallVectorImpl<unsigned> &Ops) const;
|
||||
|
||||
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
int FrameIndex) const;
|
||||
|
||||
MachineInstr* foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineInstr* MI,
|
||||
const SmallVectorImpl<unsigned> &Ops,
|
||||
MachineInstr* LoadMI) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -34,11 +34,6 @@
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
using namespace llvm;
|
||||
|
||||
static cl::opt<bool>
|
||||
Thumb2RegScavenging("enable-thumb2-reg-scavenging",
|
||||
cl::Hidden,
|
||||
cl::desc("Enable register scavenging on Thumb-2"));
|
||||
|
||||
Thumb2RegisterInfo::Thumb2RegisterInfo(const ARMBaseInstrInfo &tii,
|
||||
const ARMSubtarget &sti)
|
||||
: ARMBaseRegisterInfo(tii, sti) {
|
||||
@ -58,699 +53,11 @@ void Thumb2RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB,
|
||||
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
|
||||
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci), DestReg)
|
||||
.addConstantPoolIndex(Idx).addImm(Pred).addReg(PredReg);
|
||||
.addConstantPoolIndex(Idx).addImm((int64_t)ARMCC::AL).addReg(0);
|
||||
}
|
||||
|
||||
const TargetRegisterClass*
|
||||
Thumb2RegisterInfo::getPhysicalRegisterRegClass(unsigned Reg, MVT VT) const {
|
||||
if (isARMLowRegister(Reg))
|
||||
return ARM::tGPRRegisterClass;
|
||||
switch (Reg) {
|
||||
default:
|
||||
break;
|
||||
case ARM::R8: case ARM::R9: case ARM::R10: case ARM::R11:
|
||||
case ARM::R12: case ARM::SP: case ARM::LR: case ARM::PC:
|
||||
return ARM::GPRRegisterClass;
|
||||
}
|
||||
|
||||
return TargetRegisterInfo::getPhysicalRegisterRegClass(Reg, VT);
|
||||
}
|
||||
|
||||
bool
|
||||
Thumb2RegisterInfo::requiresRegisterScavenging(const MachineFunction &MF) const {
|
||||
return Thumb2RegScavenging;
|
||||
}
|
||||
|
||||
bool Thumb2RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const {
|
||||
const MachineFrameInfo *FFI = MF.getFrameInfo();
|
||||
unsigned CFSize = FFI->getMaxCallFrameSize();
|
||||
// It's not always a good idea to include the call frame as part of the
|
||||
// stack frame. ARM (especially Thumb) has small immediate offset to
|
||||
// address the stack frame. So a large call frame can cause poor codegen
|
||||
// and may even makes it impossible to scavenge a register.
|
||||
if (CFSize >= ((1 << 8) - 1) * 4 / 2) // Half of imm8 * 4
|
||||
return false;
|
||||
|
||||
return !MF.getFrameInfo()->hasVarSizedObjects();
|
||||
}
|
||||
|
||||
/// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
|
||||
/// a destreg = basereg + immediate in Thumb code. Materialize the immediate
|
||||
/// in a register using mov / mvn sequences or load the immediate from a
|
||||
/// constpool entry.
|
||||
static
|
||||
void emitThumbRegPlusImmInReg(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator &MBBI,
|
||||
unsigned DestReg, unsigned BaseReg,
|
||||
int NumBytes, bool CanChangeCC,
|
||||
const TargetInstrInfo &TII,
|
||||
const Thumb2RegisterInfo& MRI,
|
||||
DebugLoc dl) {
|
||||
bool isHigh = !isARMLowRegister(DestReg) ||
|
||||
(BaseReg != 0 && !isARMLowRegister(BaseReg));
|
||||
bool isSub = false;
|
||||
// Subtract doesn't have high register version. Load the negative value
|
||||
// if either base or dest register is a high register. Also, if do not
|
||||
// issue sub as part of the sequence if condition register is to be
|
||||
// preserved.
|
||||
if (NumBytes < 0 && !isHigh && CanChangeCC) {
|
||||
isSub = true;
|
||||
NumBytes = -NumBytes;
|
||||
}
|
||||
unsigned LdReg = DestReg;
|
||||
if (DestReg == ARM::SP) {
|
||||
assert(BaseReg == ARM::SP && "Unexpected!");
|
||||
LdReg = ARM::R3;
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVlor2hir), ARM::R12)
|
||||
.addReg(ARM::R3, RegState::Kill);
|
||||
}
|
||||
|
||||
if (NumBytes <= 255 && NumBytes >= 0)
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg).addImm(NumBytes);
|
||||
else if (NumBytes < 0 && NumBytes >= -255) {
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg).addImm(NumBytes);
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::tNEG), LdReg)
|
||||
.addReg(LdReg, RegState::Kill);
|
||||
} else
|
||||
MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, NumBytes);
|
||||
|
||||
// Emit add / sub.
|
||||
int Opc = (isSub) ? ARM::tSUBrr : (isHigh ? ARM::tADDhirr : ARM::tADDrr);
|
||||
const MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl,
|
||||
TII.get(Opc), DestReg);
|
||||
if (DestReg == ARM::SP || isSub)
|
||||
MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill);
|
||||
else
|
||||
MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill);
|
||||
if (DestReg == ARM::SP)
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVhir2lor), ARM::R3)
|
||||
.addReg(ARM::R12, RegState::Kill);
|
||||
}
|
||||
|
||||
/// calcNumMI - Returns the number of instructions required to materialize
|
||||
/// the specific add / sub r, c instruction.
|
||||
static unsigned calcNumMI(int Opc, int ExtraOpc, unsigned Bytes,
|
||||
unsigned NumBits, unsigned Scale) {
|
||||
unsigned NumMIs = 0;
|
||||
unsigned Chunk = ((1 << NumBits) - 1) * Scale;
|
||||
|
||||
if (Opc == ARM::tADDrSPi) {
|
||||
unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
|
||||
Bytes -= ThisVal;
|
||||
NumMIs++;
|
||||
NumBits = 8;
|
||||
Scale = 1; // Followed by a number of tADDi8.
|
||||
Chunk = ((1 << NumBits) - 1) * Scale;
|
||||
}
|
||||
|
||||
NumMIs += Bytes / Chunk;
|
||||
if ((Bytes % Chunk) != 0)
|
||||
NumMIs++;
|
||||
if (ExtraOpc)
|
||||
NumMIs++;
|
||||
return NumMIs;
|
||||
}
|
||||
|
||||
/// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
|
||||
/// a destreg = basereg + immediate in Thumb code.
|
||||
static
|
||||
void emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator &MBBI,
|
||||
unsigned DestReg, unsigned BaseReg,
|
||||
int NumBytes, const TargetInstrInfo &TII,
|
||||
const Thumb2RegisterInfo& MRI,
|
||||
DebugLoc dl) {
|
||||
bool isSub = NumBytes < 0;
|
||||
unsigned Bytes = (unsigned)NumBytes;
|
||||
if (isSub) Bytes = -NumBytes;
|
||||
bool isMul4 = (Bytes & 3) == 0;
|
||||
bool isTwoAddr = false;
|
||||
bool DstNotEqBase = false;
|
||||
unsigned NumBits = 1;
|
||||
unsigned Scale = 1;
|
||||
int Opc = 0;
|
||||
int ExtraOpc = 0;
|
||||
|
||||
if (DestReg == BaseReg && BaseReg == ARM::SP) {
|
||||
assert(isMul4 && "Thumb sp inc / dec size must be multiple of 4!");
|
||||
NumBits = 7;
|
||||
Scale = 4;
|
||||
Opc = isSub ? ARM::tSUBspi : ARM::tADDspi;
|
||||
isTwoAddr = true;
|
||||
} else if (!isSub && BaseReg == ARM::SP) {
|
||||
// r1 = add sp, 403
|
||||
// =>
|
||||
// r1 = add sp, 100 * 4
|
||||
// r1 = add r1, 3
|
||||
if (!isMul4) {
|
||||
Bytes &= ~3;
|
||||
ExtraOpc = ARM::tADDi3;
|
||||
}
|
||||
NumBits = 8;
|
||||
Scale = 4;
|
||||
Opc = ARM::tADDrSPi;
|
||||
} else {
|
||||
// sp = sub sp, c
|
||||
// r1 = sub sp, c
|
||||
// r8 = sub sp, c
|
||||
if (DestReg != BaseReg)
|
||||
DstNotEqBase = true;
|
||||
NumBits = 8;
|
||||
Opc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
|
||||
isTwoAddr = true;
|
||||
}
|
||||
|
||||
unsigned NumMIs = calcNumMI(Opc, ExtraOpc, Bytes, NumBits, Scale);
|
||||
unsigned Threshold = (DestReg == ARM::SP) ? 3 : 2;
|
||||
if (NumMIs > Threshold) {
|
||||
// This will expand into too many instructions. Load the immediate from a
|
||||
// constpool entry.
|
||||
emitThumbRegPlusImmInReg(MBB, MBBI, DestReg, BaseReg, NumBytes, true, TII,
|
||||
MRI, dl);
|
||||
return;
|
||||
}
|
||||
|
||||
if (DstNotEqBase) {
|
||||
if (isARMLowRegister(DestReg) && isARMLowRegister(BaseReg)) {
|
||||
// If both are low registers, emit DestReg = add BaseReg, max(Imm, 7)
|
||||
unsigned Chunk = (1 << 3) - 1;
|
||||
unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
|
||||
Bytes -= ThisVal;
|
||||
BuildMI(MBB, MBBI, dl,TII.get(isSub ? ARM::tSUBi3 : ARM::tADDi3), DestReg)
|
||||
.addReg(BaseReg, RegState::Kill).addImm(ThisVal);
|
||||
} else {
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
|
||||
.addReg(BaseReg, RegState::Kill);
|
||||
}
|
||||
BaseReg = DestReg;
|
||||
}
|
||||
|
||||
unsigned Chunk = ((1 << NumBits) - 1) * Scale;
|
||||
while (Bytes) {
|
||||
unsigned ThisVal = (Bytes > Chunk) ? Chunk : Bytes;
|
||||
Bytes -= ThisVal;
|
||||
ThisVal /= Scale;
|
||||
// Build the new tADD / tSUB.
|
||||
if (isTwoAddr)
|
||||
BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
|
||||
.addReg(DestReg).addImm(ThisVal);
|
||||
else {
|
||||
bool isKill = BaseReg != ARM::SP;
|
||||
BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
|
||||
.addReg(BaseReg, getKillRegState(isKill)).addImm(ThisVal);
|
||||
BaseReg = DestReg;
|
||||
|
||||
if (Opc == ARM::tADDrSPi) {
|
||||
// r4 = add sp, imm
|
||||
// r4 = add r4, imm
|
||||
// ...
|
||||
NumBits = 8;
|
||||
Scale = 1;
|
||||
Chunk = ((1 << NumBits) - 1) * Scale;
|
||||
Opc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
|
||||
isTwoAddr = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ExtraOpc)
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ExtraOpc), DestReg)
|
||||
.addReg(DestReg, RegState::Kill)
|
||||
.addImm(((unsigned)NumBytes) & 3);
|
||||
}
|
||||
|
||||
static void emitSPUpdate(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator &MBBI,
|
||||
const TargetInstrInfo &TII, DebugLoc dl,
|
||||
const Thumb2RegisterInfo &MRI,
|
||||
int NumBytes) {
|
||||
emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, ARM::SP, NumBytes, TII,
|
||||
MRI, dl);
|
||||
}
|
||||
|
||||
void Thumb2RegisterInfo::
|
||||
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I) const {
|
||||
if (!hasReservedCallFrame(MF)) {
|
||||
// If we have alloca, convert as follows:
|
||||
// ADJCALLSTACKDOWN -> sub, sp, sp, amount
|
||||
// ADJCALLSTACKUP -> add, sp, sp, amount
|
||||
MachineInstr *Old = I;
|
||||
DebugLoc dl = Old->getDebugLoc();
|
||||
unsigned Amount = Old->getOperand(0).getImm();
|
||||
if (Amount != 0) {
|
||||
// We need to keep the stack aligned properly. To do this, we round the
|
||||
// amount of space needed for the outgoing arguments up to the next
|
||||
// alignment boundary.
|
||||
unsigned Align = MF.getTarget().getFrameInfo()->getStackAlignment();
|
||||
Amount = (Amount+Align-1)/Align*Align;
|
||||
|
||||
// Replace the pseudo instruction with a new instruction...
|
||||
unsigned Opc = Old->getOpcode();
|
||||
if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
|
||||
emitSPUpdate(MBB, I, TII, dl, *this, -Amount);
|
||||
} else {
|
||||
assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
|
||||
emitSPUpdate(MBB, I, TII, dl, *this, Amount);
|
||||
}
|
||||
}
|
||||
}
|
||||
MBB.erase(I);
|
||||
}
|
||||
|
||||
/// emitThumbConstant - Emit a series of instructions to materialize a
|
||||
/// constant.
|
||||
static void emitThumbConstant(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator &MBBI,
|
||||
unsigned DestReg, int Imm,
|
||||
const TargetInstrInfo &TII,
|
||||
const Thumb2RegisterInfo& MRI,
|
||||
DebugLoc dl) {
|
||||
bool isSub = Imm < 0;
|
||||
if (isSub) Imm = -Imm;
|
||||
|
||||
int Chunk = (1 << 8) - 1;
|
||||
int ThisVal = (Imm > Chunk) ? Chunk : Imm;
|
||||
Imm -= ThisVal;
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), DestReg).addImm(ThisVal);
|
||||
if (Imm > 0)
|
||||
emitThumbRegPlusImmediate(MBB, MBBI, DestReg, DestReg, Imm, TII, MRI, dl);
|
||||
if (isSub)
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::tNEG), DestReg)
|
||||
.addReg(DestReg, RegState::Kill);
|
||||
}
|
||||
|
||||
void Thumb2RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
||||
int SPAdj, RegScavenger *RS) const{
|
||||
unsigned i = 0;
|
||||
MachineInstr &MI = *II;
|
||||
MachineBasicBlock &MBB = *MI.getParent();
|
||||
MachineFunction &MF = *MBB.getParent();
|
||||
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
||||
DebugLoc dl = MI.getDebugLoc();
|
||||
|
||||
while (!MI.getOperand(i).isFI()) {
|
||||
++i;
|
||||
assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
|
||||
}
|
||||
|
||||
unsigned FrameReg = ARM::SP;
|
||||
int FrameIndex = MI.getOperand(i).getIndex();
|
||||
int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
|
||||
MF.getFrameInfo()->getStackSize() + SPAdj;
|
||||
|
||||
if (AFI->isGPRCalleeSavedArea1Frame(FrameIndex))
|
||||
Offset -= AFI->getGPRCalleeSavedArea1Offset();
|
||||
else if (AFI->isGPRCalleeSavedArea2Frame(FrameIndex))
|
||||
Offset -= AFI->getGPRCalleeSavedArea2Offset();
|
||||
else if (hasFP(MF)) {
|
||||
assert(SPAdj == 0 && "Unexpected");
|
||||
// There is alloca()'s in this function, must reference off the frame
|
||||
// pointer instead.
|
||||
FrameReg = getFrameRegister(MF);
|
||||
Offset -= AFI->getFramePtrSpillOffset();
|
||||
}
|
||||
|
||||
unsigned Opcode = MI.getOpcode();
|
||||
const TargetInstrDesc &Desc = MI.getDesc();
|
||||
unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
|
||||
|
||||
if (Opcode == ARM::tADDrSPi) {
|
||||
Offset += MI.getOperand(i+1).getImm();
|
||||
|
||||
// Can't use tADDrSPi if it's based off the frame pointer.
|
||||
unsigned NumBits = 0;
|
||||
unsigned Scale = 1;
|
||||
if (FrameReg != ARM::SP) {
|
||||
Opcode = ARM::tADDi3;
|
||||
MI.setDesc(TII.get(ARM::tADDi3));
|
||||
NumBits = 3;
|
||||
} else {
|
||||
NumBits = 8;
|
||||
Scale = 4;
|
||||
assert((Offset & 3) == 0 &&
|
||||
"Thumb add/sub sp, #imm immediate must be multiple of 4!");
|
||||
}
|
||||
|
||||
if (Offset == 0) {
|
||||
// Turn it into a move.
|
||||
MI.setDesc(TII.get(ARM::tMOVhir2lor));
|
||||
MI.getOperand(i).ChangeToRegister(FrameReg, false);
|
||||
MI.RemoveOperand(i+1);
|
||||
return;
|
||||
}
|
||||
|
||||
// Common case: small offset, fits into instruction.
|
||||
unsigned Mask = (1 << NumBits) - 1;
|
||||
if (((Offset / Scale) & ~Mask) == 0) {
|
||||
// Replace the FrameIndex with sp / fp
|
||||
MI.getOperand(i).ChangeToRegister(FrameReg, false);
|
||||
MI.getOperand(i+1).ChangeToImmediate(Offset / Scale);
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned DestReg = MI.getOperand(0).getReg();
|
||||
unsigned Bytes = (Offset > 0) ? Offset : -Offset;
|
||||
unsigned NumMIs = calcNumMI(Opcode, 0, Bytes, NumBits, Scale);
|
||||
// MI would expand into a large number of instructions. Don't try to
|
||||
// simplify the immediate.
|
||||
if (NumMIs > 2) {
|
||||
emitThumbRegPlusImmediate(MBB, II, DestReg, FrameReg, Offset, TII,
|
||||
*this, dl);
|
||||
MBB.erase(II);
|
||||
return;
|
||||
}
|
||||
|
||||
if (Offset > 0) {
|
||||
// Translate r0 = add sp, imm to
|
||||
// r0 = add sp, 255*4
|
||||
// r0 = add r0, (imm - 255*4)
|
||||
MI.getOperand(i).ChangeToRegister(FrameReg, false);
|
||||
MI.getOperand(i+1).ChangeToImmediate(Mask);
|
||||
Offset = (Offset - Mask * Scale);
|
||||
MachineBasicBlock::iterator NII = next(II);
|
||||
emitThumbRegPlusImmediate(MBB, NII, DestReg, DestReg, Offset, TII,
|
||||
*this, dl);
|
||||
} else {
|
||||
// Translate r0 = add sp, -imm to
|
||||
// r0 = -imm (this is then translated into a series of instructons)
|
||||
// r0 = add r0, sp
|
||||
emitThumbConstant(MBB, II, DestReg, Offset, TII, *this, dl);
|
||||
MI.setDesc(TII.get(ARM::tADDhirr));
|
||||
MI.getOperand(i).ChangeToRegister(DestReg, false, false, true);
|
||||
MI.getOperand(i+1).ChangeToRegister(FrameReg, false);
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
unsigned ImmIdx = 0;
|
||||
int InstrOffs = 0;
|
||||
unsigned NumBits = 0;
|
||||
unsigned Scale = 1;
|
||||
switch (AddrMode) {
|
||||
case ARMII::AddrModeT1_s: {
|
||||
ImmIdx = i+1;
|
||||
InstrOffs = MI.getOperand(ImmIdx).getImm();
|
||||
NumBits = (FrameReg == ARM::SP) ? 8 : 5;
|
||||
Scale = 4;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
LLVM_UNREACHABLE("Unsupported addressing mode!");
|
||||
}
|
||||
|
||||
Offset += InstrOffs * Scale;
|
||||
assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
|
||||
|
||||
// Common case: small offset, fits into instruction.
|
||||
MachineOperand &ImmOp = MI.getOperand(ImmIdx);
|
||||
int ImmedOffset = Offset / Scale;
|
||||
unsigned Mask = (1 << NumBits) - 1;
|
||||
if ((unsigned)Offset <= Mask * Scale) {
|
||||
// Replace the FrameIndex with sp
|
||||
MI.getOperand(i).ChangeToRegister(FrameReg, false);
|
||||
ImmOp.ChangeToImmediate(ImmedOffset);
|
||||
return;
|
||||
}
|
||||
|
||||
bool isThumSpillRestore = Opcode == ARM::tRestore || Opcode == ARM::tSpill;
|
||||
if (AddrMode == ARMII::AddrModeT1_s) {
|
||||
// Thumb tLDRspi, tSTRspi. These will change to instructions that use
|
||||
// a different base register.
|
||||
NumBits = 5;
|
||||
Mask = (1 << NumBits) - 1;
|
||||
}
|
||||
// If this is a thumb spill / restore, we will be using a constpool load to
|
||||
// materialize the offset.
|
||||
if (AddrMode == ARMII::AddrModeT1_s && isThumSpillRestore)
|
||||
ImmOp.ChangeToImmediate(0);
|
||||
else {
|
||||
// Otherwise, it didn't fit. Pull in what we can to simplify the immed.
|
||||
ImmedOffset = ImmedOffset & Mask;
|
||||
ImmOp.ChangeToImmediate(ImmedOffset);
|
||||
Offset &= ~(Mask*Scale);
|
||||
}
|
||||
}
|
||||
|
||||
// If we get here, the immediate doesn't fit into the instruction. We folded
|
||||
// as much as possible above, handle the rest, providing a register that is
|
||||
// SP+LargeImm.
|
||||
assert(Offset && "This code isn't needed if offset already handled!");
|
||||
|
||||
if (Desc.mayLoad()) {
|
||||
// Use the destination register to materialize sp + offset.
|
||||
unsigned TmpReg = MI.getOperand(0).getReg();
|
||||
bool UseRR = false;
|
||||
if (Opcode == ARM::tRestore) {
|
||||
if (FrameReg == ARM::SP)
|
||||
emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,
|
||||
Offset, false, TII, *this, dl);
|
||||
else {
|
||||
emitLoadConstPool(MBB, II, dl, TmpReg, Offset);
|
||||
UseRR = true;
|
||||
}
|
||||
} else
|
||||
emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII,
|
||||
*this, dl);
|
||||
MI.setDesc(TII.get(ARM::tLDR));
|
||||
MI.getOperand(i).ChangeToRegister(TmpReg, false, false, true);
|
||||
if (UseRR)
|
||||
// Use [reg, reg] addrmode.
|
||||
MI.addOperand(MachineOperand::CreateReg(FrameReg, false));
|
||||
else // tLDR has an extra register operand.
|
||||
MI.addOperand(MachineOperand::CreateReg(0, false));
|
||||
} else if (Desc.mayStore()) {
|
||||
// FIXME! This is horrific!!! We need register scavenging.
|
||||
// Our temporary workaround has marked r3 unavailable. Of course, r3 is
|
||||
// also a ABI register so it's possible that is is the register that is
|
||||
// being storing here. If that's the case, we do the following:
|
||||
// r12 = r2
|
||||
// Use r2 to materialize sp + offset
|
||||
// str r3, r2
|
||||
// r2 = r12
|
||||
unsigned ValReg = MI.getOperand(0).getReg();
|
||||
unsigned TmpReg = ARM::R3;
|
||||
bool UseRR = false;
|
||||
if (ValReg == ARM::R3) {
|
||||
BuildMI(MBB, II, dl, TII.get(ARM::tMOVlor2hir), ARM::R12)
|
||||
.addReg(ARM::R2, RegState::Kill);
|
||||
TmpReg = ARM::R2;
|
||||
}
|
||||
if (TmpReg == ARM::R3 && AFI->isR3LiveIn())
|
||||
BuildMI(MBB, II, dl, TII.get(ARM::tMOVlor2hir), ARM::R12)
|
||||
.addReg(ARM::R3, RegState::Kill);
|
||||
if (Opcode == ARM::tSpill) {
|
||||
if (FrameReg == ARM::SP)
|
||||
emitThumbRegPlusImmInReg(MBB, II, TmpReg, FrameReg,
|
||||
Offset, false, TII, *this, dl);
|
||||
else {
|
||||
emitLoadConstPool(MBB, II, dl, TmpReg, Offset);
|
||||
UseRR = true;
|
||||
}
|
||||
} else
|
||||
emitThumbRegPlusImmediate(MBB, II, TmpReg, FrameReg, Offset, TII,
|
||||
*this, dl);
|
||||
MI.setDesc(TII.get(ARM::tSTR));
|
||||
MI.getOperand(i).ChangeToRegister(TmpReg, false, false, true);
|
||||
if (UseRR) // Use [reg, reg] addrmode.
|
||||
MI.addOperand(MachineOperand::CreateReg(FrameReg, false));
|
||||
else // tSTR has an extra register operand.
|
||||
MI.addOperand(MachineOperand::CreateReg(0, false));
|
||||
|
||||
MachineBasicBlock::iterator NII = next(II);
|
||||
if (ValReg == ARM::R3)
|
||||
BuildMI(MBB, NII, dl, TII.get(ARM::tMOVhir2lor), ARM::R2)
|
||||
.addReg(ARM::R12, RegState::Kill);
|
||||
if (TmpReg == ARM::R3 && AFI->isR3LiveIn())
|
||||
BuildMI(MBB, NII, dl, TII.get(ARM::tMOVhir2lor), ARM::R3)
|
||||
.addReg(ARM::R12, RegState::Kill);
|
||||
} else
|
||||
assert(false && "Unexpected opcode!");
|
||||
}
|
||||
|
||||
void Thumb2RegisterInfo::emitPrologue(MachineFunction &MF) const {
|
||||
MachineBasicBlock &MBB = MF.front();
|
||||
MachineBasicBlock::iterator MBBI = MBB.begin();
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
||||
unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
|
||||
unsigned NumBytes = MFI->getStackSize();
|
||||
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
|
||||
DebugLoc dl = (MBBI != MBB.end() ?
|
||||
MBBI->getDebugLoc() : DebugLoc::getUnknownLoc());
|
||||
|
||||
// Check if R3 is live in. It might have to be used as a scratch register.
|
||||
for (MachineRegisterInfo::livein_iterator I =MF.getRegInfo().livein_begin(),
|
||||
E = MF.getRegInfo().livein_end(); I != E; ++I) {
|
||||
if (I->first == ARM::R3) {
|
||||
AFI->setR3IsLiveIn(true);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Thumb add/sub sp, imm8 instructions implicitly multiply the offset by 4.
|
||||
NumBytes = (NumBytes + 3) & ~3;
|
||||
MFI->setStackSize(NumBytes);
|
||||
|
||||
// Determine the sizes of each callee-save spill areas and record which frame
|
||||
// belongs to which callee-save spill areas.
|
||||
unsigned GPRCS1Size = 0, GPRCS2Size = 0, DPRCSSize = 0;
|
||||
int FramePtrSpillFI = 0;
|
||||
|
||||
if (VARegSaveSize)
|
||||
emitSPUpdate(MBB, MBBI, TII, dl, *this, -VARegSaveSize);
|
||||
|
||||
if (!AFI->hasStackFrame()) {
|
||||
if (NumBytes != 0)
|
||||
emitSPUpdate(MBB, MBBI, TII, dl, *this, -NumBytes);
|
||||
return;
|
||||
}
|
||||
|
||||
for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
|
||||
unsigned Reg = CSI[i].getReg();
|
||||
int FI = CSI[i].getFrameIdx();
|
||||
switch (Reg) {
|
||||
case ARM::R4:
|
||||
case ARM::R5:
|
||||
case ARM::R6:
|
||||
case ARM::R7:
|
||||
case ARM::LR:
|
||||
if (Reg == FramePtr)
|
||||
FramePtrSpillFI = FI;
|
||||
AFI->addGPRCalleeSavedArea1Frame(FI);
|
||||
GPRCS1Size += 4;
|
||||
break;
|
||||
case ARM::R8:
|
||||
case ARM::R9:
|
||||
case ARM::R10:
|
||||
case ARM::R11:
|
||||
if (Reg == FramePtr)
|
||||
FramePtrSpillFI = FI;
|
||||
if (STI.isTargetDarwin()) {
|
||||
AFI->addGPRCalleeSavedArea2Frame(FI);
|
||||
GPRCS2Size += 4;
|
||||
} else {
|
||||
AFI->addGPRCalleeSavedArea1Frame(FI);
|
||||
GPRCS1Size += 4;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
AFI->addDPRCalleeSavedAreaFrame(FI);
|
||||
DPRCSSize += 8;
|
||||
}
|
||||
}
|
||||
|
||||
if (MBBI != MBB.end() && MBBI->getOpcode() == ARM::tPUSH) {
|
||||
++MBBI;
|
||||
if (MBBI != MBB.end())
|
||||
dl = MBBI->getDebugLoc();
|
||||
}
|
||||
|
||||
// Darwin ABI requires FP to point to the stack slot that contains the
|
||||
// previous FP.
|
||||
if (STI.isTargetDarwin() || hasFP(MF)) {
|
||||
MachineInstrBuilder MIB =
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::tADDrSPi), FramePtr)
|
||||
.addFrameIndex(FramePtrSpillFI).addImm(0);
|
||||
}
|
||||
|
||||
// Determine starting offsets of spill areas.
|
||||
unsigned DPRCSOffset = NumBytes - (GPRCS1Size + GPRCS2Size + DPRCSSize);
|
||||
unsigned GPRCS2Offset = DPRCSOffset + DPRCSSize;
|
||||
unsigned GPRCS1Offset = GPRCS2Offset + GPRCS2Size;
|
||||
AFI->setFramePtrSpillOffset(MFI->getObjectOffset(FramePtrSpillFI) + NumBytes);
|
||||
AFI->setGPRCalleeSavedArea1Offset(GPRCS1Offset);
|
||||
AFI->setGPRCalleeSavedArea2Offset(GPRCS2Offset);
|
||||
AFI->setDPRCalleeSavedAreaOffset(DPRCSOffset);
|
||||
|
||||
NumBytes = DPRCSOffset;
|
||||
if (NumBytes) {
|
||||
// Insert it after all the callee-save spills.
|
||||
emitSPUpdate(MBB, MBBI, TII, dl, *this, -NumBytes);
|
||||
}
|
||||
|
||||
if (STI.isTargetELF() && hasFP(MF)) {
|
||||
MFI->setOffsetAdjustment(MFI->getOffsetAdjustment() -
|
||||
AFI->getFramePtrSpillOffset());
|
||||
}
|
||||
|
||||
AFI->setGPRCalleeSavedArea1Size(GPRCS1Size);
|
||||
AFI->setGPRCalleeSavedArea2Size(GPRCS2Size);
|
||||
AFI->setDPRCalleeSavedAreaSize(DPRCSSize);
|
||||
}
|
||||
|
||||
static bool isCalleeSavedRegister(unsigned Reg, const unsigned *CSRegs) {
|
||||
for (unsigned i = 0; CSRegs[i]; ++i)
|
||||
if (Reg == CSRegs[i])
|
||||
return true;
|
||||
bool Thumb2RegisterInfo::
|
||||
requiresRegisterScavenging(const MachineFunction &MF) const {
|
||||
// FIXME
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool isCSRestore(MachineInstr *MI, const unsigned *CSRegs) {
|
||||
return (MI->getOpcode() == ARM::tRestore &&
|
||||
MI->getOperand(1).isFI() &&
|
||||
isCalleeSavedRegister(MI->getOperand(0).getReg(), CSRegs));
|
||||
}
|
||||
|
||||
void Thumb2RegisterInfo::emitEpilogue(MachineFunction &MF,
|
||||
MachineBasicBlock &MBB) const {
|
||||
MachineBasicBlock::iterator MBBI = prior(MBB.end());
|
||||
assert((MBBI->getOpcode() == ARM::tBX_RET ||
|
||||
MBBI->getOpcode() == ARM::tPOP_RET) &&
|
||||
"Can only insert epilog into returning blocks");
|
||||
DebugLoc dl = MBBI->getDebugLoc();
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
||||
unsigned VARegSaveSize = AFI->getVarArgsRegSaveSize();
|
||||
int NumBytes = (int)MFI->getStackSize();
|
||||
|
||||
if (!AFI->hasStackFrame()) {
|
||||
if (NumBytes != 0)
|
||||
emitSPUpdate(MBB, MBBI, TII, dl, *this, NumBytes);
|
||||
} else {
|
||||
// Unwind MBBI to point to first LDR / FLDD.
|
||||
const unsigned *CSRegs = getCalleeSavedRegs();
|
||||
if (MBBI != MBB.begin()) {
|
||||
do
|
||||
--MBBI;
|
||||
while (MBBI != MBB.begin() && isCSRestore(MBBI, CSRegs));
|
||||
if (!isCSRestore(MBBI, CSRegs))
|
||||
++MBBI;
|
||||
}
|
||||
|
||||
// Move SP to start of FP callee save spill area.
|
||||
NumBytes -= (AFI->getGPRCalleeSavedArea1Size() +
|
||||
AFI->getGPRCalleeSavedArea2Size() +
|
||||
AFI->getDPRCalleeSavedAreaSize());
|
||||
|
||||
if (hasFP(MF)) {
|
||||
NumBytes = AFI->getFramePtrSpillOffset() - NumBytes;
|
||||
// Reset SP based on frame pointer only if the stack frame extends beyond
|
||||
// frame pointer stack slot or target is ELF and the function has FP.
|
||||
if (NumBytes)
|
||||
emitThumbRegPlusImmediate(MBB, MBBI, ARM::SP, FramePtr, -NumBytes,
|
||||
TII, *this, dl);
|
||||
else
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVlor2hir), ARM::SP)
|
||||
.addReg(FramePtr);
|
||||
} else {
|
||||
if (MBBI->getOpcode() == ARM::tBX_RET &&
|
||||
&MBB.front() != MBBI &&
|
||||
prior(MBBI)->getOpcode() == ARM::tPOP) {
|
||||
MachineBasicBlock::iterator PMBBI = prior(MBBI);
|
||||
emitSPUpdate(MBB, PMBBI, TII, dl, *this, NumBytes);
|
||||
} else
|
||||
emitSPUpdate(MBB, MBBI, TII, dl, *this, NumBytes);
|
||||
}
|
||||
}
|
||||
|
||||
if (VARegSaveSize) {
|
||||
// Epilogue for vararg functions: pop LR to R3 and branch off it.
|
||||
// FIXME: Verify this is still ok when R3 is no longer being reserved.
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::tPOP)).addReg(ARM::R3);
|
||||
|
||||
emitSPUpdate(MBB, MBBI, TII, dl, *this, VARegSaveSize);
|
||||
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::tBX_RET_vararg)).addReg(ARM::R3);
|
||||
MBB.erase(MBBI);
|
||||
}
|
||||
}
|
||||
|
@ -36,23 +36,7 @@ public:
|
||||
ARMCC::CondCodes Pred = ARMCC::AL,
|
||||
unsigned PredReg = 0) const;
|
||||
|
||||
/// Code Generation virtual methods...
|
||||
const TargetRegisterClass *
|
||||
getPhysicalRegisterRegClass(unsigned Reg, MVT VT = MVT::Other) const;
|
||||
|
||||
bool requiresRegisterScavenging(const MachineFunction &MF) const;
|
||||
|
||||
bool hasReservedCallFrame(MachineFunction &MF) const;
|
||||
|
||||
void eliminateCallFramePseudoInstr(MachineFunction &MF,
|
||||
MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator I) const;
|
||||
|
||||
void eliminateFrameIndex(MachineBasicBlock::iterator II,
|
||||
int SPAdj, RegScavenger *RS = NULL) const;
|
||||
|
||||
void emitPrologue(MachineFunction &MF) const;
|
||||
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const;
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep smmul | count 1
|
||||
; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep smull | count 1
|
||||
; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep umull | count 1
|
||||
|
||||
define i32 @smulhi(i32 %x, i32 %y) {
|
||||
|
@ -1,5 +1,3 @@
|
||||
; XFAIL: *
|
||||
; fixme
|
||||
; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep mov | count 3
|
||||
; RUN: llvm-as < %s | llc -march=thumb -mattr=+thumb2 | grep mvn | count 1
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user