mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-16 08:29:43 +00:00
[PowerPC] Use true offset value in "memrix" machine operands
This is the second part of the change to always return "true" offset values from getPreIndexedAddressParts, tackling the case of "memrix" type operands. This is about instructions like LD/STD that only have a 14-bit field to encode immediate offsets, which are implicitly extended by two zero bits by the machine, so that in effect we can access 16-bit offsets as long as they are a multiple of 4. The PowerPC back end currently handles such instructions by carrying the 14-bit value (as it will get encoded into the actual machine instructions) in the machine operand fields for such instructions. This means that those values are in fact not the true offset, but rather the offset divided by 4 (and then truncated to an unsigned 14-bit value). Like in the case fixed in r182012, this makes common code operations on such offset values not work as expected. Furthermore, there doesn't really appear to be any strong reason why we should encode machine operands this way. This patch therefore changes the encoding of "memrix" type machine operands to simply contain the "true" offset value as a signed immediate value, while enforcing the rules that it must fit in a 16-bit signed value and must also be a multiple of 4. This change must be made simultaneously in all places that access machine operands of this type. However, just about all those changes make the code simpler; in many cases we can now just share the same code for memri and memrix operands. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@182032 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
2a5e8c328e
commit
347a5079e1
@ -346,22 +346,6 @@ public:
|
||||
Inst.addOperand(MCOperand::CreateExpr(getExpr()));
|
||||
}
|
||||
|
||||
void addDispRIOperands(MCInst &Inst, unsigned N) const {
|
||||
assert(N == 1 && "Invalid number of operands!");
|
||||
if (Kind == Immediate)
|
||||
Inst.addOperand(MCOperand::CreateImm(getImm()));
|
||||
else
|
||||
Inst.addOperand(MCOperand::CreateExpr(getExpr()));
|
||||
}
|
||||
|
||||
void addDispRIXOperands(MCInst &Inst, unsigned N) const {
|
||||
assert(N == 1 && "Invalid number of operands!");
|
||||
if (Kind == Immediate)
|
||||
Inst.addOperand(MCOperand::CreateImm(getImm() / 4));
|
||||
else
|
||||
Inst.addOperand(MCOperand::CreateExpr(getExpr()));
|
||||
}
|
||||
|
||||
StringRef getToken() const {
|
||||
assert(Kind == Token && "Invalid access!");
|
||||
return StringRef(Tok.Data, Tok.Length);
|
||||
|
@ -137,14 +137,6 @@ void PPCInstPrinter::printU16ImmOperand(const MCInst *MI, unsigned OpNo,
|
||||
O << (unsigned short)MI->getOperand(OpNo).getImm();
|
||||
}
|
||||
|
||||
void PPCInstPrinter::printS16X4ImmOperand(const MCInst *MI, unsigned OpNo,
|
||||
raw_ostream &O) {
|
||||
if (MI->getOperand(OpNo).isImm())
|
||||
O << (short)(MI->getOperand(OpNo).getImm()*4);
|
||||
else
|
||||
printOperand(MI, OpNo, O);
|
||||
}
|
||||
|
||||
void PPCInstPrinter::printBranchOperand(const MCInst *MI, unsigned OpNo,
|
||||
raw_ostream &O) {
|
||||
if (!MI->getOperand(OpNo).isImm())
|
||||
@ -191,22 +183,6 @@ void PPCInstPrinter::printMemRegImm(const MCInst *MI, unsigned OpNo,
|
||||
O << ')';
|
||||
}
|
||||
|
||||
void PPCInstPrinter::printMemRegImmShifted(const MCInst *MI, unsigned OpNo,
|
||||
raw_ostream &O) {
|
||||
if (MI->getOperand(OpNo).isImm())
|
||||
printS16X4ImmOperand(MI, OpNo, O);
|
||||
else
|
||||
printSymbolLo(MI, OpNo, O);
|
||||
O << '(';
|
||||
|
||||
if (MI->getOperand(OpNo+1).getReg() == PPC::R0)
|
||||
O << "0";
|
||||
else
|
||||
printOperand(MI, OpNo+1, O);
|
||||
O << ')';
|
||||
}
|
||||
|
||||
|
||||
void PPCInstPrinter::printMemRegReg(const MCInst *MI, unsigned OpNo,
|
||||
raw_ostream &O) {
|
||||
// When used as the base register, r0 reads constant zero rather than
|
||||
|
@ -50,14 +50,12 @@ public:
|
||||
void printU6ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
void printS16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
void printU16ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
void printS16X4ImmOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
void printBranchOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
void printAbsAddrOperand(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
|
||||
void printcrbitm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
|
||||
void printMemRegImm(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
void printMemRegImmShifted(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
void printMemRegReg(const MCInst *MI, unsigned OpNo, raw_ostream &O);
|
||||
|
||||
// FIXME: Remove
|
||||
|
@ -185,7 +185,7 @@ unsigned PPCMCCodeEmitter::getMemRIXEncoding(const MCInst &MI, unsigned OpNo,
|
||||
|
||||
const MCOperand &MO = MI.getOperand(OpNo);
|
||||
if (MO.isImm())
|
||||
return (getMachineOpValue(MI, MO, Fixups) & 0x3FFF) | RegBits;
|
||||
return ((getMachineOpValue(MI, MO, Fixups) >> 2) & 0x3FFF) | RegBits;
|
||||
|
||||
// Add a fixup for the displacement field.
|
||||
Fixups.push_back(MCFixup::Create(2, MO.getExpr(),
|
||||
|
@ -237,7 +237,7 @@ unsigned PPCCodeEmitter::getMemRIXEncoding(const MachineInstr &MI,
|
||||
|
||||
const MachineOperand &MO = MI.getOperand(OpNo);
|
||||
if (MO.isImm())
|
||||
return (getMachineOpValue(MI, MO) & 0x3FFF) | RegBits;
|
||||
return ((getMachineOpValue(MI, MO) >> 2) & 0x3FFF) | RegBits;
|
||||
|
||||
MCE.addRelocation(GetRelocation(MO, PPC::reloc_absolute_low_ix));
|
||||
return RegBits;
|
||||
|
@ -400,13 +400,13 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
|
||||
if (HasFP)
|
||||
BuildMI(MBB, MBBI, dl, TII.get(PPC::STD))
|
||||
.addReg(PPC::X31)
|
||||
.addImm(FPOffset/4)
|
||||
.addImm(FPOffset)
|
||||
.addReg(PPC::X1);
|
||||
|
||||
if (MustSaveLR)
|
||||
BuildMI(MBB, MBBI, dl, TII.get(PPC::STD))
|
||||
.addReg(PPC::X0)
|
||||
.addImm(LROffset / 4)
|
||||
.addImm(LROffset)
|
||||
.addReg(PPC::X1);
|
||||
|
||||
if (!MustSaveCRs.empty())
|
||||
@ -500,7 +500,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
|
||||
} else if (isInt<16>(NegFrameSize)) {
|
||||
BuildMI(MBB, MBBI, dl, TII.get(PPC::STDU), PPC::X1)
|
||||
.addReg(PPC::X1)
|
||||
.addImm(NegFrameSize / 4)
|
||||
.addImm(NegFrameSize)
|
||||
.addReg(PPC::X1);
|
||||
} else {
|
||||
BuildMI(MBB, MBBI, dl, TII.get(PPC::LIS8), PPC::X0)
|
||||
@ -741,7 +741,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
|
||||
if (isPPC64) {
|
||||
if (MustSaveLR)
|
||||
BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X0)
|
||||
.addImm(LROffset/4).addReg(PPC::X1);
|
||||
.addImm(LROffset).addReg(PPC::X1);
|
||||
|
||||
if (!MustSaveCRs.empty())
|
||||
BuildMI(MBB, MBBI, dl, TII.get(PPC::LWZ8), PPC::X12)
|
||||
@ -749,7 +749,7 @@ void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
|
||||
|
||||
if (HasFP)
|
||||
BuildMI(MBB, MBBI, dl, TII.get(PPC::LD), PPC::X31)
|
||||
.addImm(FPOffset/4).addReg(PPC::X1);
|
||||
.addImm(FPOffset).addReg(PPC::X1);
|
||||
|
||||
if (!MustSaveCRs.empty())
|
||||
for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i)
|
||||
|
@ -116,7 +116,7 @@ namespace {
|
||||
/// a base register plus a signed 16-bit displacement [r+imm].
|
||||
bool SelectAddrImm(SDValue N, SDValue &Disp,
|
||||
SDValue &Base) {
|
||||
return PPCLowering.SelectAddressRegImm(N, Disp, Base, *CurDAG);
|
||||
return PPCLowering.SelectAddressRegImm(N, Disp, Base, *CurDAG, false);
|
||||
}
|
||||
|
||||
/// SelectAddrImmOffs - Return true if the operand is valid for a preinc
|
||||
@ -145,11 +145,11 @@ namespace {
|
||||
return PPCLowering.SelectAddressRegRegOnly(N, Base, Index, *CurDAG);
|
||||
}
|
||||
|
||||
/// SelectAddrImmShift - Returns true if the address N can be represented by
|
||||
/// a base register plus a signed 14-bit displacement [r+imm*4]. Suitable
|
||||
/// for use by STD and friends.
|
||||
bool SelectAddrImmShift(SDValue N, SDValue &Disp, SDValue &Base) {
|
||||
return PPCLowering.SelectAddressRegImmShift(N, Disp, Base, *CurDAG);
|
||||
/// SelectAddrImmX4 - Returns true if the address N can be represented by
|
||||
/// a base register plus a signed 16-bit displacement that is a multiple of 4.
|
||||
/// Suitable for use by STD and friends.
|
||||
bool SelectAddrImmX4(SDValue N, SDValue &Disp, SDValue &Base) {
|
||||
return PPCLowering.SelectAddressRegImm(N, Disp, Base, *CurDAG, true);
|
||||
}
|
||||
|
||||
// Select an address into a single register.
|
||||
|
@ -1048,10 +1048,12 @@ bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
|
||||
|
||||
/// Returns true if the address N can be represented by a base register plus
|
||||
/// a signed 16-bit displacement [r+imm], and if it is not better
|
||||
/// represented as reg+reg.
|
||||
/// represented as reg+reg. If Aligned is true, only accept displacements
|
||||
/// suitable for STD and friends, i.e. multiples of 4.
|
||||
bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
|
||||
SDValue &Base,
|
||||
SelectionDAG &DAG) const {
|
||||
SelectionDAG &DAG,
|
||||
bool Aligned) const {
|
||||
// FIXME dl should come from parent load or store, not from address
|
||||
DebugLoc dl = N.getDebugLoc();
|
||||
// If this can be more profitably realized as r+r, fail.
|
||||
@ -1060,7 +1062,8 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
|
||||
|
||||
if (N.getOpcode() == ISD::ADD) {
|
||||
short imm = 0;
|
||||
if (isIntS16Immediate(N.getOperand(1), imm)) {
|
||||
if (isIntS16Immediate(N.getOperand(1), imm) &&
|
||||
(!Aligned || (imm & 3) == 0)) {
|
||||
Disp = DAG.getTargetConstant(imm, N.getValueType());
|
||||
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
|
||||
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
|
||||
@ -1082,7 +1085,8 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
|
||||
}
|
||||
} else if (N.getOpcode() == ISD::OR) {
|
||||
short imm = 0;
|
||||
if (isIntS16Immediate(N.getOperand(1), imm)) {
|
||||
if (isIntS16Immediate(N.getOperand(1), imm) &&
|
||||
(!Aligned || (imm & 3) == 0)) {
|
||||
// If this is an or of disjoint bitfields, we can codegen this as an add
|
||||
// (for better address arithmetic) if the LHS and RHS of the OR are
|
||||
// provably disjoint.
|
||||
@ -1103,7 +1107,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
|
||||
// If this address fits entirely in a 16-bit sext immediate field, codegen
|
||||
// this as "d, 0"
|
||||
short Imm;
|
||||
if (isIntS16Immediate(CN, Imm)) {
|
||||
if (isIntS16Immediate(CN, Imm) && (!Aligned || (Imm & 3) == 0)) {
|
||||
Disp = DAG.getTargetConstant(Imm, CN->getValueType(0));
|
||||
Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
|
||||
CN->getValueType(0));
|
||||
@ -1111,8 +1115,9 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
|
||||
}
|
||||
|
||||
// Handle 32-bit sext immediates with LIS + addr mode.
|
||||
if (CN->getValueType(0) == MVT::i32 ||
|
||||
(int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
|
||||
if ((CN->getValueType(0) == MVT::i32 ||
|
||||
(int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
|
||||
(!Aligned || (CN->getZExtValue() & 3) == 0)) {
|
||||
int Addr = (int)CN->getZExtValue();
|
||||
|
||||
// Otherwise, break this down into an LIS + disp.
|
||||
@ -1160,91 +1165,6 @@ bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
|
||||
return true;
|
||||
}
|
||||
|
||||
/// SelectAddressRegImmShift - Returns true if the address N can be
|
||||
/// represented by a base register plus a signed 14-bit displacement
|
||||
/// [r+imm*4]. Suitable for use by STD and friends.
|
||||
bool PPCTargetLowering::SelectAddressRegImmShift(SDValue N, SDValue &Disp,
|
||||
SDValue &Base,
|
||||
SelectionDAG &DAG) const {
|
||||
// FIXME dl should come from the parent load or store, not the address
|
||||
DebugLoc dl = N.getDebugLoc();
|
||||
// If this can be more profitably realized as r+r, fail.
|
||||
if (SelectAddressRegReg(N, Disp, Base, DAG))
|
||||
return false;
|
||||
|
||||
if (N.getOpcode() == ISD::ADD) {
|
||||
short imm = 0;
|
||||
if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
|
||||
Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
|
||||
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
|
||||
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
|
||||
} else {
|
||||
Base = N.getOperand(0);
|
||||
}
|
||||
return true; // [r+i]
|
||||
} else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
|
||||
// Match LOAD (ADD (X, Lo(G))).
|
||||
assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
|
||||
&& "Cannot handle constant offsets yet!");
|
||||
Disp = N.getOperand(1).getOperand(0); // The global address.
|
||||
assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
|
||||
Disp.getOpcode() == ISD::TargetConstantPool ||
|
||||
Disp.getOpcode() == ISD::TargetJumpTable);
|
||||
Base = N.getOperand(0);
|
||||
return true; // [&g+r]
|
||||
}
|
||||
} else if (N.getOpcode() == ISD::OR) {
|
||||
short imm = 0;
|
||||
if (isIntS16Immediate(N.getOperand(1), imm) && (imm & 3) == 0) {
|
||||
// If this is an or of disjoint bitfields, we can codegen this as an add
|
||||
// (for better address arithmetic) if the LHS and RHS of the OR are
|
||||
// provably disjoint.
|
||||
APInt LHSKnownZero, LHSKnownOne;
|
||||
DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
|
||||
if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
|
||||
// If all of the bits are known zero on the LHS or RHS, the add won't
|
||||
// carry.
|
||||
Base = N.getOperand(0);
|
||||
Disp = DAG.getTargetConstant(((int)imm & 0xFFFF) >> 2, MVT::i32);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
|
||||
// Loading from a constant address. Verify low two bits are clear.
|
||||
if ((CN->getZExtValue() & 3) == 0) {
|
||||
// If this address fits entirely in a 14-bit sext immediate field, codegen
|
||||
// this as "d, 0"
|
||||
short Imm;
|
||||
if (isIntS16Immediate(CN, Imm)) {
|
||||
Disp = DAG.getTargetConstant((unsigned short)Imm >> 2, getPointerTy());
|
||||
Base = DAG.getRegister(PPCSubTarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
|
||||
CN->getValueType(0));
|
||||
return true;
|
||||
}
|
||||
|
||||
// Fold the low-part of 32-bit absolute addresses into addr mode.
|
||||
if (CN->getValueType(0) == MVT::i32 ||
|
||||
(int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) {
|
||||
int Addr = (int)CN->getZExtValue();
|
||||
|
||||
// Otherwise, break this down into an LIS + disp.
|
||||
Disp = DAG.getTargetConstant((short)Addr >> 2, MVT::i32);
|
||||
Base = DAG.getTargetConstant((Addr-(signed short)Addr) >> 16, MVT::i32);
|
||||
unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
|
||||
Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base),0);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Disp = DAG.getTargetConstant(0, getPointerTy());
|
||||
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N))
|
||||
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
|
||||
else
|
||||
Base = N;
|
||||
return true; // [r+0]
|
||||
}
|
||||
|
||||
|
||||
/// getPreIndexedAddressParts - returns true by value, base pointer and
|
||||
/// offset pointer and addressing mode by reference if the node's address
|
||||
@ -1298,18 +1218,16 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
|
||||
return true;
|
||||
}
|
||||
|
||||
// LDU/STU use reg+imm*4, others use reg+imm.
|
||||
// LDU/STU can only handle immediates that are a multiple of 4.
|
||||
if (VT != MVT::i64) {
|
||||
// reg + imm
|
||||
if (!SelectAddressRegImm(Ptr, Offset, Base, DAG))
|
||||
if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, false))
|
||||
return false;
|
||||
} else {
|
||||
// LDU/STU need an address with at least 4-byte alignment.
|
||||
if (Alignment < 4)
|
||||
return false;
|
||||
|
||||
// reg + imm * 4.
|
||||
if (!SelectAddressRegImmShift(Ptr, Offset, Base, DAG))
|
||||
if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, true))
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -6130,7 +6048,7 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
|
||||
if (PPCSubTarget.isPPC64() && PPCSubTarget.isSVR4ABI()) {
|
||||
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
|
||||
.addReg(PPC::X2)
|
||||
.addImm(TOCOffset / 4)
|
||||
.addImm(TOCOffset)
|
||||
.addReg(BufReg);
|
||||
|
||||
MIB.setMemRefs(MMOBegin, MMOEnd);
|
||||
@ -6158,7 +6076,7 @@ PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
|
||||
if (PPCSubTarget.isPPC64()) {
|
||||
MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
|
||||
.addReg(LabelReg)
|
||||
.addImm(LabelOffset / 4)
|
||||
.addImm(LabelOffset)
|
||||
.addReg(BufReg);
|
||||
} else {
|
||||
MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
|
||||
@ -6231,7 +6149,7 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
|
||||
// Reload IP
|
||||
if (PVT == MVT::i64) {
|
||||
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
|
||||
.addImm(LabelOffset / 4)
|
||||
.addImm(LabelOffset)
|
||||
.addReg(BufReg);
|
||||
} else {
|
||||
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
|
||||
@ -6243,7 +6161,7 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
|
||||
// Reload SP
|
||||
if (PVT == MVT::i64) {
|
||||
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
|
||||
.addImm(SPOffset / 4)
|
||||
.addImm(SPOffset)
|
||||
.addReg(BufReg);
|
||||
} else {
|
||||
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
|
||||
@ -6258,7 +6176,7 @@ PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
|
||||
// Reload TOC
|
||||
if (PVT == MVT::i64 && PPCSubTarget.isSVR4ABI()) {
|
||||
MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
|
||||
.addImm(TOCOffset / 4)
|
||||
.addImm(TOCOffset)
|
||||
.addReg(BufReg);
|
||||
|
||||
MIB.setMemRefs(MMOBegin, MMOEnd);
|
||||
|
@ -366,21 +366,16 @@ namespace llvm {
|
||||
|
||||
/// SelectAddressRegImm - Returns true if the address N can be represented
|
||||
/// by a base register plus a signed 16-bit displacement [r+imm], and if it
|
||||
/// is not better represented as reg+reg.
|
||||
/// is not better represented as reg+reg. If Aligned is true, only accept
|
||||
/// displacements suitable for STD and friends, i.e. multiples of 4.
|
||||
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
|
||||
SelectionDAG &DAG) const;
|
||||
SelectionDAG &DAG, bool Aligned) const;
|
||||
|
||||
/// SelectAddressRegRegOnly - Given the specified addressed, force it to be
|
||||
/// represented as an indexed [r+r] operation.
|
||||
bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
|
||||
SelectionDAG &DAG) const;
|
||||
|
||||
/// SelectAddressRegImmShift - Returns true if the address N can be
|
||||
/// represented by a base register plus a signed 14-bit displacement
|
||||
/// [r+imm*4]. Suitable for use by STD and friends.
|
||||
bool SelectAddressRegImmShift(SDValue N, SDValue &Disp, SDValue &Base,
|
||||
SelectionDAG &DAG) const;
|
||||
|
||||
Sched::Preference getSchedulingPreference(SDNode *N) const;
|
||||
|
||||
/// LowerOperation - Provide custom lowering hooks for some operations.
|
||||
|
@ -287,7 +287,7 @@ def imm16ShiftedSExt : PatLeaf<(imm), [{
|
||||
}], HI16>;
|
||||
|
||||
// Some r+i load/store instructions (such as LD, STD, LDU, etc.) that require
|
||||
// restricted memrix (offset/4) constants are alignment sensitive. If these
|
||||
// restricted memrix (4-aligned) constants are alignment sensitive. If these
|
||||
// offsets are hidden behind TOC entries than the values of the lower-order
|
||||
// bits cannot be checked directly. As a result, we need to also incorporate
|
||||
// an alignment check into the relevant patterns.
|
||||
@ -492,12 +492,14 @@ def ptr_rc_idx : Operand<iPTR>, PointerLikeRegClass<0> {
|
||||
|
||||
def PPCDispRIOperand : AsmOperandClass {
|
||||
let Name = "DispRI"; let PredicateMethod = "isS16Imm";
|
||||
let RenderMethod = "addImmOperands";
|
||||
}
|
||||
def dispRI : Operand<iPTR> {
|
||||
let ParserMatchClass = PPCDispRIOperand;
|
||||
}
|
||||
def PPCDispRIXOperand : AsmOperandClass {
|
||||
let Name = "DispRIX"; let PredicateMethod = "isS16ImmX4";
|
||||
let RenderMethod = "addImmOperands";
|
||||
}
|
||||
def dispRIX : Operand<iPTR> {
|
||||
let ParserMatchClass = PPCDispRIXOperand;
|
||||
@ -512,8 +514,8 @@ def memrr : Operand<iPTR> {
|
||||
let PrintMethod = "printMemRegReg";
|
||||
let MIOperandInfo = (ops ptr_rc_nor0:$ptrreg, ptr_rc_idx:$offreg);
|
||||
}
|
||||
def memrix : Operand<iPTR> { // memri where the imm is shifted 2 bits.
|
||||
let PrintMethod = "printMemRegImmShifted";
|
||||
def memrix : Operand<iPTR> { // memri where the imm is 4-aligned.
|
||||
let PrintMethod = "printMemRegImm";
|
||||
let MIOperandInfo = (ops dispRIX:$imm, ptr_rc_nor0:$reg);
|
||||
let EncoderMethod = "getMemRIXEncoding";
|
||||
}
|
||||
@ -534,7 +536,7 @@ def pred : Operand<OtherVT> {
|
||||
def iaddr : ComplexPattern<iPTR, 2, "SelectAddrImm", [], []>;
|
||||
def xaddr : ComplexPattern<iPTR, 2, "SelectAddrIdx", [], []>;
|
||||
def xoaddr : ComplexPattern<iPTR, 2, "SelectAddrIdxOnly",[], []>;
|
||||
def ixaddr : ComplexPattern<iPTR, 2, "SelectAddrImmShift", [], []>; // "std"
|
||||
def ixaddr : ComplexPattern<iPTR, 2, "SelectAddrImmX4", [], []>; // "std"
|
||||
|
||||
// The address in a single register. This is used with the SjLj
|
||||
// pseudo-instructions.
|
||||
|
@ -459,9 +459,8 @@ PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
|
||||
return false;
|
||||
}
|
||||
|
||||
// Figure out if the offset in the instruction is shifted right two bits. This
|
||||
// is true for instructions like "STD", which the machine implicitly adds two
|
||||
// low zeros to.
|
||||
// Figure out if the offset in the instruction must be a multiple of 4.
|
||||
// This is true for instructions like "STD".
|
||||
static bool usesIXAddr(const MachineInstr &MI) {
|
||||
unsigned OpC = MI.getOpcode();
|
||||
|
||||
@ -554,10 +553,7 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
||||
|
||||
// Now add the frame object offset to the offset from r1.
|
||||
int Offset = MFI->getObjectOffset(FrameIndex);
|
||||
if (!isIXAddr)
|
||||
Offset += MI.getOperand(OffsetOperandNo).getImm();
|
||||
else
|
||||
Offset += MI.getOperand(OffsetOperandNo).getImm() << 2;
|
||||
Offset += MI.getOperand(OffsetOperandNo).getImm();
|
||||
|
||||
// If we're not using a Frame Pointer that has been set to the value of the
|
||||
// SP before having the stack size subtracted from it, then add the stack size
|
||||
@ -577,8 +573,6 @@ PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
||||
if (OpC == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm
|
||||
(!noImmForm &&
|
||||
isInt<16>(Offset) && (!isIXAddr || (Offset & 3) == 0))) {
|
||||
if (isIXAddr)
|
||||
Offset >>= 2; // The actual encoded value has the low two bits zero.
|
||||
MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
|
||||
return;
|
||||
}
|
||||
@ -655,11 +649,7 @@ needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
|
||||
}
|
||||
|
||||
unsigned OffsetOperandNo = getOffsetONFromFION(*MI, FIOperandNum);
|
||||
|
||||
if (!usesIXAddr(*MI))
|
||||
Offset += MI->getOperand(OffsetOperandNo).getImm();
|
||||
else
|
||||
Offset += MI->getOperand(OffsetOperandNo).getImm() << 2;
|
||||
Offset += MI->getOperand(OffsetOperandNo).getImm();
|
||||
|
||||
// It's the load/store FI references that cause issues, as it can be difficult
|
||||
// to materialize the offset if it won't fit in the literal field. Estimate
|
||||
@ -739,17 +729,7 @@ PPCRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
|
||||
|
||||
MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
|
||||
unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum);
|
||||
|
||||
bool isIXAddr = usesIXAddr(MI);
|
||||
if (!isIXAddr)
|
||||
Offset += MI.getOperand(OffsetOperandNo).getImm();
|
||||
else
|
||||
Offset += MI.getOperand(OffsetOperandNo).getImm() << 2;
|
||||
|
||||
// Figure out if the offset in the instruction is shifted right two bits.
|
||||
if (isIXAddr)
|
||||
Offset >>= 2; // The actual encoded value has the low two bits zero.
|
||||
|
||||
Offset += MI.getOperand(OffsetOperandNo).getImm();
|
||||
MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
|
||||
}
|
||||
|
||||
|
@ -17,3 +17,17 @@ entry:
|
||||
; CHECK-NEXT: stb 4, 2(3)
|
||||
; CHECK-NEXT: blr
|
||||
|
||||
define i64* @test64(i64* %base, i64 %val) {
|
||||
entry:
|
||||
%arrayidx = getelementptr inbounds i64* %base, i32 -1
|
||||
store i64 %val, i64* %arrayidx, align 8
|
||||
%arrayidx2 = getelementptr inbounds i64* %base, i32 1
|
||||
store i64 %val, i64* %arrayidx2, align 8
|
||||
ret i64* %arrayidx
|
||||
}
|
||||
; CHECK: @test64
|
||||
; CHECK: %entry
|
||||
; CHECK-NEXT: stdu 4, -8(3)
|
||||
; CHECK-NEXT: std 4, 16(3)
|
||||
; CHECK-NEXT: blr
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user