mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-04 17:58:22 +00:00
Temporarily revert r68552. This was causing a failure in the self-hosting LLVM
builds. --- Reverse-merging (from foreign repository) r68552 into '.': U test/CodeGen/X86/tls8.ll U test/CodeGen/X86/tls10.ll U test/CodeGen/X86/tls2.ll U test/CodeGen/X86/tls6.ll U lib/Target/X86/X86Instr64bit.td U lib/Target/X86/X86InstrSSE.td U lib/Target/X86/X86InstrInfo.td U lib/Target/X86/X86RegisterInfo.cpp U lib/Target/X86/X86ISelLowering.cpp U lib/Target/X86/X86CodeEmitter.cpp U lib/Target/X86/X86FastISel.cpp U lib/Target/X86/X86InstrInfo.h U lib/Target/X86/X86ISelDAGToDAG.cpp U lib/Target/X86/AsmPrinter/X86ATTAsmPrinter.cpp U lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.cpp U lib/Target/X86/AsmPrinter/X86ATTAsmPrinter.h U lib/Target/X86/AsmPrinter/X86IntelAsmPrinter.h U lib/Target/X86/X86ISelLowering.h U lib/Target/X86/X86InstrInfo.cpp U lib/Target/X86/X86InstrBuilder.h U lib/Target/X86/X86RegisterInfo.td git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@68560 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
caf6129aba
commit
044b5344c4
@ -566,8 +566,9 @@ void X86ATTAsmPrinter::printSSECC(const MachineInstr *MI, unsigned Op) {
|
||||
}
|
||||
}
|
||||
|
||||
void X86ATTAsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op,
|
||||
const char *Modifier){
|
||||
void X86ATTAsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op,
|
||||
const char *Modifier){
|
||||
assert(isMem(MI, Op) && "Invalid memory reference!");
|
||||
MachineOperand BaseReg = MI->getOperand(Op);
|
||||
MachineOperand IndexReg = MI->getOperand(Op+2);
|
||||
const MachineOperand &DispSpec = MI->getOperand(Op+3);
|
||||
@ -610,17 +611,6 @@ void X86ATTAsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op,
|
||||
}
|
||||
}
|
||||
|
||||
void X86ATTAsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op,
|
||||
const char *Modifier){
|
||||
assert(isMem(MI, Op) && "Invalid memory reference!");
|
||||
MachineOperand Segment = MI->getOperand(Op+4);
|
||||
if (Segment.getReg()) {
|
||||
printOperand(MI, Op+4, Modifier);
|
||||
O << ':';
|
||||
}
|
||||
printLeaMemReference(MI, Op, Modifier);
|
||||
}
|
||||
|
||||
void X86ATTAsmPrinter::printPICJumpTableSetLabel(unsigned uid,
|
||||
const MachineBasicBlock *MBB) const {
|
||||
if (!TAI->getSetDirective())
|
||||
|
@ -93,14 +93,8 @@ class VISIBILITY_HIDDEN X86ATTAsmPrinter : public AsmPrinter {
|
||||
void printf128mem(const MachineInstr *MI, unsigned OpNo) {
|
||||
printMemReference(MI, OpNo);
|
||||
}
|
||||
void printlea32mem(const MachineInstr *MI, unsigned OpNo) {
|
||||
printLeaMemReference(MI, OpNo);
|
||||
}
|
||||
void printlea64mem(const MachineInstr *MI, unsigned OpNo) {
|
||||
printLeaMemReference(MI, OpNo);
|
||||
}
|
||||
void printlea64_32mem(const MachineInstr *MI, unsigned OpNo) {
|
||||
printLeaMemReference(MI, OpNo, "subreg64");
|
||||
printMemReference(MI, OpNo, "subreg64");
|
||||
}
|
||||
|
||||
bool printAsmMRegister(const MachineOperand &MO, const char Mode);
|
||||
@ -113,8 +107,6 @@ class VISIBILITY_HIDDEN X86ATTAsmPrinter : public AsmPrinter {
|
||||
void printSSECC(const MachineInstr *MI, unsigned Op);
|
||||
void printMemReference(const MachineInstr *MI, unsigned Op,
|
||||
const char *Modifier=NULL);
|
||||
void printLeaMemReference(const MachineInstr *MI, unsigned Op,
|
||||
const char *Modifier=NULL);
|
||||
void printPICJumpTableSetLabel(unsigned uid,
|
||||
const MachineBasicBlock *MBB) const;
|
||||
void printPICJumpTableSetLabel(unsigned uid, unsigned uid2,
|
||||
|
@ -271,9 +271,10 @@ void X86IntelAsmPrinter::printOp(const MachineOperand &MO,
|
||||
}
|
||||
}
|
||||
|
||||
void X86IntelAsmPrinter::printLeaMemReference(const MachineInstr *MI,
|
||||
unsigned Op,
|
||||
const char *Modifier) {
|
||||
void X86IntelAsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op,
|
||||
const char *Modifier) {
|
||||
assert(isMem(MI, Op) && "Invalid memory reference!");
|
||||
|
||||
const MachineOperand &BaseReg = MI->getOperand(Op);
|
||||
int ScaleVal = MI->getOperand(Op+1).getImm();
|
||||
const MachineOperand &IndexReg = MI->getOperand(Op+2);
|
||||
@ -316,17 +317,6 @@ void X86IntelAsmPrinter::printLeaMemReference(const MachineInstr *MI,
|
||||
O << "]";
|
||||
}
|
||||
|
||||
void X86IntelAsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op,
|
||||
const char *Modifier) {
|
||||
assert(isMem(MI, Op) && "Invalid memory reference!");
|
||||
MachineOperand Segment = MI->getOperand(Op+4);
|
||||
if (Segment.getReg()) {
|
||||
printOperand(MI, Op+4, Modifier);
|
||||
O << ':';
|
||||
}
|
||||
printLeaMemReference(MI, Op, Modifier);
|
||||
}
|
||||
|
||||
void X86IntelAsmPrinter::printPICJumpTableSetLabel(unsigned uid,
|
||||
const MachineBasicBlock *MBB) const {
|
||||
if (!TAI->getSetDirective())
|
||||
|
@ -88,17 +88,9 @@ struct VISIBILITY_HIDDEN X86IntelAsmPrinter : public AsmPrinter {
|
||||
O << "XMMWORD PTR ";
|
||||
printMemReference(MI, OpNo);
|
||||
}
|
||||
void printlea32mem(const MachineInstr *MI, unsigned OpNo) {
|
||||
O << "DWORD PTR ";
|
||||
printLeaMemReference(MI, OpNo);
|
||||
}
|
||||
void printlea64mem(const MachineInstr *MI, unsigned OpNo) {
|
||||
O << "QWORD PTR ";
|
||||
printLeaMemReference(MI, OpNo);
|
||||
}
|
||||
void printlea64_32mem(const MachineInstr *MI, unsigned OpNo) {
|
||||
O << "QWORD PTR ";
|
||||
printLeaMemReference(MI, OpNo, "subreg64");
|
||||
printMemReference(MI, OpNo, "subreg64");
|
||||
}
|
||||
|
||||
bool printAsmMRegister(const MachineOperand &MO, const char Mode);
|
||||
@ -111,8 +103,6 @@ struct VISIBILITY_HIDDEN X86IntelAsmPrinter : public AsmPrinter {
|
||||
void printSSECC(const MachineInstr *MI, unsigned Op);
|
||||
void printMemReference(const MachineInstr *MI, unsigned Op,
|
||||
const char *Modifier=NULL);
|
||||
void printLeaMemReference(const MachineInstr *MI, unsigned Op,
|
||||
const char *Modifier=NULL);
|
||||
void printPICJumpTableSetLabel(unsigned uid,
|
||||
const MachineBasicBlock *MBB) const;
|
||||
void printPICJumpTableSetLabel(unsigned uid, unsigned uid2,
|
||||
|
@ -533,6 +533,23 @@ void Emitter::emitInstruction(const MachineInstr &MI,
|
||||
case X86::DWARF_LOC:
|
||||
case X86::FP_REG_KILL:
|
||||
break;
|
||||
case X86::TLS_tp: {
|
||||
MCE.emitByte(BaseOpcode);
|
||||
unsigned RegOpcodeField = getX86RegNum(MI.getOperand(0).getReg());
|
||||
MCE.emitByte(ModRMByte(0, RegOpcodeField, 5));
|
||||
emitConstant(0, 4);
|
||||
break;
|
||||
}
|
||||
case X86::TLS_gs_ri: {
|
||||
MCE.emitByte(BaseOpcode);
|
||||
unsigned RegOpcodeField = getX86RegNum(MI.getOperand(0).getReg());
|
||||
MCE.emitByte(ModRMByte(0, RegOpcodeField, 5));
|
||||
GlobalValue* GV = MI.getOperand(1).getGlobal();
|
||||
unsigned rt = Is64BitMode ? X86::reloc_pcrel_word
|
||||
: (IsPIC ? X86::reloc_picrel_word : X86::reloc_absolute_word);
|
||||
emitGlobalAddress(GV, rt);
|
||||
break;
|
||||
}
|
||||
case X86::MOVPC32r: {
|
||||
// This emits the "call" portion of this pseudo instruction.
|
||||
MCE.emitByte(BaseOpcode);
|
||||
@ -644,21 +661,13 @@ void Emitter::emitInstruction(const MachineInstr &MI,
|
||||
break;
|
||||
|
||||
case X86II::MRMSrcMem: {
|
||||
// FIXME: Maybe lea should have its own form?
|
||||
int AddrOperands;
|
||||
if (Opcode == X86::LEA64r || Opcode == X86::LEA64_32r ||
|
||||
Opcode == X86::LEA16r || Opcode == X86::LEA32r)
|
||||
AddrOperands = X86AddrNumOperands - 1; // No segment register
|
||||
else
|
||||
AddrOperands = X86AddrNumOperands;
|
||||
|
||||
intptr_t PCAdj = (CurOp + AddrOperands + 1 != NumOps) ?
|
||||
intptr_t PCAdj = (CurOp + X86AddrNumOperands + 1 != NumOps) ?
|
||||
X86InstrInfo::sizeOfImm(Desc) : 0;
|
||||
|
||||
MCE.emitByte(BaseOpcode);
|
||||
emitMemModRMByte(MI, CurOp+1, getX86RegNum(MI.getOperand(CurOp).getReg()),
|
||||
PCAdj);
|
||||
CurOp += AddrOperands + 1;
|
||||
CurOp += X86AddrNumOperands + 1;
|
||||
if (CurOp != NumOps)
|
||||
emitConstant(MI.getOperand(CurOp++).getImm(), X86InstrInfo::sizeOfImm(Desc));
|
||||
break;
|
||||
|
@ -1490,7 +1490,7 @@ unsigned X86FastISel::TargetMaterializeConstant(Constant *C) {
|
||||
else
|
||||
Opc = X86::LEA64r;
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
|
||||
addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
|
||||
return ResultReg;
|
||||
}
|
||||
return 0;
|
||||
@ -1535,7 +1535,7 @@ unsigned X86FastISel::TargetMaterializeAlloca(AllocaInst *C) {
|
||||
unsigned Opc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
|
||||
TargetRegisterClass* RC = TLI.getRegClassFor(TLI.getPointerTy());
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
addLeaAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
|
||||
addFullAddress(BuildMI(MBB, DL, TII.get(Opc), ResultReg), AM);
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
|
@ -69,7 +69,6 @@ namespace {
|
||||
unsigned Scale;
|
||||
SDValue IndexReg;
|
||||
int32_t Disp;
|
||||
SDValue Segment;
|
||||
GlobalValue *GV;
|
||||
Constant *CP;
|
||||
const char *ES;
|
||||
@ -78,7 +77,7 @@ namespace {
|
||||
|
||||
X86ISelAddressMode()
|
||||
: BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0),
|
||||
Segment(), GV(0), CP(0), ES(0), JT(-1), Align(0) {
|
||||
GV(0), CP(0), ES(0), JT(-1), Align(0) {
|
||||
}
|
||||
|
||||
bool hasSymbolicDisplacement() const {
|
||||
@ -160,25 +159,20 @@ namespace {
|
||||
SDNode *Select(SDValue N);
|
||||
SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
|
||||
|
||||
bool MatchSegmentBaseAddress(SDValue N, X86ISelAddressMode &AM);
|
||||
bool MatchLoad(SDValue N, X86ISelAddressMode &AM);
|
||||
bool MatchAddress(SDValue N, X86ISelAddressMode &AM,
|
||||
unsigned Depth = 0);
|
||||
bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
|
||||
bool SelectAddr(SDValue Op, SDValue N, SDValue &Base,
|
||||
SDValue &Scale, SDValue &Index, SDValue &Disp,
|
||||
SDValue &Segment);
|
||||
SDValue &Scale, SDValue &Index, SDValue &Disp);
|
||||
bool SelectLEAAddr(SDValue Op, SDValue N, SDValue &Base,
|
||||
SDValue &Scale, SDValue &Index, SDValue &Disp);
|
||||
bool SelectScalarSSELoad(SDValue Op, SDValue Pred,
|
||||
SDValue N, SDValue &Base, SDValue &Scale,
|
||||
SDValue &Index, SDValue &Disp,
|
||||
SDValue &Segment,
|
||||
SDValue &InChain, SDValue &OutChain);
|
||||
bool TryFoldLoad(SDValue P, SDValue N,
|
||||
SDValue &Base, SDValue &Scale,
|
||||
SDValue &Index, SDValue &Disp,
|
||||
SDValue &Segment);
|
||||
SDValue &Index, SDValue &Disp);
|
||||
void PreprocessForRMW();
|
||||
void PreprocessForFPConvert();
|
||||
|
||||
@ -192,7 +186,7 @@ namespace {
|
||||
|
||||
inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
|
||||
SDValue &Scale, SDValue &Index,
|
||||
SDValue &Disp, SDValue &Segment) {
|
||||
SDValue &Disp) {
|
||||
Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
|
||||
CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
|
||||
AM.Base.Reg;
|
||||
@ -211,11 +205,6 @@ namespace {
|
||||
Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32);
|
||||
else
|
||||
Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
|
||||
|
||||
if (AM.Segment.getNode())
|
||||
Segment = AM.Segment;
|
||||
else
|
||||
Segment = CurDAG->getRegister(0, MVT::i32);
|
||||
}
|
||||
|
||||
/// getI8Imm - Return a target constant with the specified value, of type
|
||||
@ -737,33 +726,6 @@ void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
|
||||
EmitSpecialCodeForMain(BB, MF.getFrameInfo());
|
||||
}
|
||||
|
||||
|
||||
bool X86DAGToDAGISel::MatchSegmentBaseAddress(SDValue N,
|
||||
X86ISelAddressMode &AM) {
|
||||
assert(N.getOpcode() == X86ISD::SegmentBaseAddress);
|
||||
SDValue Segment = N.getOperand(0);
|
||||
|
||||
if (AM.Segment.getNode() == 0) {
|
||||
AM.Segment = Segment;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool X86DAGToDAGISel::MatchLoad(SDValue N, X86ISelAddressMode &AM) {
|
||||
// This optimization is valid because the GNU TLS model defines that
|
||||
// gs:0 (or fs:0 on X86-64) contains its own address.
|
||||
// For more information see http://people.redhat.com/drepper/tls.pdf
|
||||
|
||||
SDValue Address = N.getOperand(1);
|
||||
if (Address.getOpcode() == X86ISD::SegmentBaseAddress &&
|
||||
!MatchSegmentBaseAddress (Address, AM))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// MatchAddress - Add the specified node to the specified addressing mode,
|
||||
/// returning true if it cannot be done. This just pattern matches for the
|
||||
/// addressing mode.
|
||||
@ -799,11 +761,6 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
|
||||
break;
|
||||
}
|
||||
|
||||
case X86ISD::SegmentBaseAddress:
|
||||
if (!MatchSegmentBaseAddress(N, AM))
|
||||
return false;
|
||||
break;
|
||||
|
||||
case X86ISD::Wrapper: {
|
||||
DOUT << "Wrapper: 64bit " << is64Bit;
|
||||
DOUT << " AM "; DEBUG(AM.dump()); DOUT << "\n";
|
||||
@ -850,11 +807,6 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
|
||||
break;
|
||||
}
|
||||
|
||||
case ISD::LOAD:
|
||||
if (!MatchLoad(N, AM))
|
||||
return false;
|
||||
break;
|
||||
|
||||
case ISD::FrameIndex:
|
||||
if (AM.BaseType == X86ISelAddressMode::RegBase
|
||||
&& AM.Base.Reg.getNode() == 0) {
|
||||
@ -1082,7 +1034,7 @@ bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
|
||||
/// match by reference.
|
||||
bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base,
|
||||
SDValue &Scale, SDValue &Index,
|
||||
SDValue &Disp, SDValue &Segment) {
|
||||
SDValue &Disp) {
|
||||
X86ISelAddressMode AM;
|
||||
bool Done = false;
|
||||
if (AvoidDupAddrCompute && !N.hasOneUse()) {
|
||||
@ -1117,7 +1069,7 @@ bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base,
|
||||
if (!AM.IndexReg.getNode())
|
||||
AM.IndexReg = CurDAG->getRegister(0, VT);
|
||||
|
||||
getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
|
||||
getAddressOperands(AM, Base, Scale, Index, Disp);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1127,8 +1079,7 @@ bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base,
|
||||
bool X86DAGToDAGISel::SelectScalarSSELoad(SDValue Op, SDValue Pred,
|
||||
SDValue N, SDValue &Base,
|
||||
SDValue &Scale, SDValue &Index,
|
||||
SDValue &Disp, SDValue &Segment,
|
||||
SDValue &InChain,
|
||||
SDValue &Disp, SDValue &InChain,
|
||||
SDValue &OutChain) {
|
||||
if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
|
||||
InChain = N.getOperand(0).getValue(1);
|
||||
@ -1137,7 +1088,7 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDValue Op, SDValue Pred,
|
||||
N.hasOneUse() &&
|
||||
IsLegalAndProfitableToFold(N.getNode(), Pred.getNode(), Op.getNode())) {
|
||||
LoadSDNode *LD = cast<LoadSDNode>(InChain);
|
||||
if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
|
||||
if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
|
||||
return false;
|
||||
OutChain = LD->getChain();
|
||||
return true;
|
||||
@ -1154,7 +1105,7 @@ bool X86DAGToDAGISel::SelectScalarSSELoad(SDValue Op, SDValue Pred,
|
||||
N.getOperand(0).getOperand(0).hasOneUse()) {
|
||||
// Okay, this is a zero extending load. Fold it.
|
||||
LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
|
||||
if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
|
||||
if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
|
||||
return false;
|
||||
OutChain = LD->getChain();
|
||||
InChain = SDValue(LD, 1);
|
||||
@ -1173,11 +1124,6 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDValue Op, SDValue N,
|
||||
if (MatchAddress(N, AM))
|
||||
return false;
|
||||
|
||||
//Is it better to set AM.Segment before calling MatchAddress to
|
||||
//prevent it from adding a segment?
|
||||
if (AM.Segment.getNode())
|
||||
return false;
|
||||
|
||||
MVT VT = N.getValueType();
|
||||
unsigned Complexity = 0;
|
||||
if (AM.BaseType == X86ISelAddressMode::RegBase)
|
||||
@ -1216,8 +1162,7 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDValue Op, SDValue N,
|
||||
Complexity++;
|
||||
|
||||
if (Complexity > 2) {
|
||||
SDValue Segment;
|
||||
getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
|
||||
getAddressOperands(AM, Base, Scale, Index, Disp);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -1225,12 +1170,11 @@ bool X86DAGToDAGISel::SelectLEAAddr(SDValue Op, SDValue N,
|
||||
|
||||
bool X86DAGToDAGISel::TryFoldLoad(SDValue P, SDValue N,
|
||||
SDValue &Base, SDValue &Scale,
|
||||
SDValue &Index, SDValue &Disp,
|
||||
SDValue &Segment) {
|
||||
SDValue &Index, SDValue &Disp) {
|
||||
if (ISD::isNON_EXTLoad(N.getNode()) &&
|
||||
N.hasOneUse() &&
|
||||
IsLegalAndProfitableToFold(N.getNode(), P.getNode(), P.getNode()))
|
||||
return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp, Segment);
|
||||
return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1286,11 +1230,11 @@ SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
|
||||
SDValue In1 = Node->getOperand(1);
|
||||
SDValue In2L = Node->getOperand(2);
|
||||
SDValue In2H = Node->getOperand(3);
|
||||
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
|
||||
if (!SelectAddr(In1, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
|
||||
SDValue Tmp0, Tmp1, Tmp2, Tmp3;
|
||||
if (!SelectAddr(In1, In1, Tmp0, Tmp1, Tmp2, Tmp3))
|
||||
return NULL;
|
||||
SDValue LSI = Node->getOperand(4); // MemOperand
|
||||
const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, LSI, Chain};
|
||||
const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, In2L, In2H, LSI, Chain };
|
||||
return CurDAG->getTargetNode(Opc, Node->getDebugLoc(),
|
||||
MVT::i32, MVT::i32, MVT::Other, Ops,
|
||||
array_lengthof(Ops));
|
||||
@ -1372,11 +1316,11 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
|
||||
case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
|
||||
}
|
||||
|
||||
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
|
||||
bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
|
||||
SDValue Tmp0, Tmp1, Tmp2, Tmp3;
|
||||
bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
|
||||
// multiplty is commmutative
|
||||
if (!foldedLoad) {
|
||||
foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
|
||||
foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3);
|
||||
if (foldedLoad)
|
||||
std::swap(N0, N1);
|
||||
}
|
||||
@ -1385,8 +1329,7 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
|
||||
N0, SDValue()).getValue(1);
|
||||
|
||||
if (foldedLoad) {
|
||||
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
|
||||
InFlag };
|
||||
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
|
||||
SDNode *CNode =
|
||||
CurDAG->getTargetNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
|
||||
array_lengthof(Ops));
|
||||
@ -1495,17 +1438,17 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
|
||||
break;
|
||||
}
|
||||
|
||||
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
|
||||
bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
|
||||
SDValue Tmp0, Tmp1, Tmp2, Tmp3;
|
||||
bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
|
||||
bool signBitIsZero = CurDAG->SignBitIsZero(N0);
|
||||
|
||||
SDValue InFlag;
|
||||
if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
|
||||
// Special case for div8, just use a move with zero extension to AX to
|
||||
// clear the upper 8 bits (AH).
|
||||
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
|
||||
if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
|
||||
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
|
||||
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Move, Chain;
|
||||
if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3)) {
|
||||
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N0.getOperand(0) };
|
||||
Move =
|
||||
SDValue(CurDAG->getTargetNode(X86::MOVZX16rm8, dl, MVT::i16,
|
||||
MVT::Other, Ops,
|
||||
@ -1537,8 +1480,7 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
|
||||
}
|
||||
|
||||
if (foldedLoad) {
|
||||
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
|
||||
InFlag };
|
||||
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
|
||||
SDNode *CNode =
|
||||
CurDAG->getTargetNode(MOpc, dl, MVT::Other, MVT::Flag, Ops,
|
||||
array_lengthof(Ops));
|
||||
@ -1707,13 +1649,13 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
|
||||
bool X86DAGToDAGISel::
|
||||
SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
|
||||
std::vector<SDValue> &OutOps) {
|
||||
SDValue Op0, Op1, Op2, Op3, Op4;
|
||||
SDValue Op0, Op1, Op2, Op3;
|
||||
switch (ConstraintCode) {
|
||||
case 'o': // offsetable ??
|
||||
case 'v': // not offsetable ??
|
||||
default: return true;
|
||||
case 'm': // memory
|
||||
if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3, Op4))
|
||||
if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3))
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
@ -1722,7 +1664,6 @@ SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
|
||||
OutOps.push_back(Op1);
|
||||
OutOps.push_back(Op2);
|
||||
OutOps.push_back(Op3);
|
||||
OutOps.push_back(Op4);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -4834,13 +4834,8 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
|
||||
const MVT PtrVT, TLSModel::Model model) {
|
||||
DebugLoc dl = GA->getDebugLoc();
|
||||
// Get the Thread Pointer
|
||||
SDValue Base = DAG.getNode(X86ISD::SegmentBaseAddress,
|
||||
DebugLoc::getUnknownLoc(), PtrVT,
|
||||
DAG.getRegister(X86::GS, MVT::i32));
|
||||
|
||||
SDValue ThreadPointer = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Base,
|
||||
NULL, 0);
|
||||
|
||||
SDValue ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER,
|
||||
DebugLoc::getUnknownLoc(), PtrVT);
|
||||
// emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
|
||||
// exec)
|
||||
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(),
|
||||
@ -7154,7 +7149,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
|
||||
case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
|
||||
case X86ISD::FRCP: return "X86ISD::FRCP";
|
||||
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
|
||||
case X86ISD::SegmentBaseAddress: return "X86ISD::SegmentBaseAddress";
|
||||
case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER";
|
||||
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
|
||||
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
|
||||
case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
|
||||
@ -7478,7 +7473,7 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
|
||||
unsigned t2 = F->getRegInfo().createVirtualRegister(RC);
|
||||
MIB = BuildMI(thisMBB, dl, TII->get(LoadOpc), t2);
|
||||
// add 4 to displacement.
|
||||
for (int i=0; i <= lastAddrIndx-2; ++i)
|
||||
for (int i=0; i <= lastAddrIndx-1; ++i)
|
||||
(*MIB).addOperand(*argOpers[i]);
|
||||
MachineOperand newOp3 = *(argOpers[3]);
|
||||
if (newOp3.isImm())
|
||||
@ -7486,7 +7481,6 @@ X86TargetLowering::EmitAtomicBit6432WithCustomInserter(MachineInstr *bInstr,
|
||||
else
|
||||
newOp3.setOffset(newOp3.getOffset()+4);
|
||||
(*MIB).addOperand(newOp3);
|
||||
(*MIB).addOperand(*argOpers[lastAddrIndx]);
|
||||
|
||||
// t3/4 are defined later, at the bottom of the loop
|
||||
unsigned t3 = F->getRegInfo().createVirtualRegister(RC);
|
||||
|
@ -188,11 +188,8 @@ namespace llvm {
|
||||
/// in order to obtain suitable precision.
|
||||
FRSQRT, FRCP,
|
||||
|
||||
// TLSADDR - Thread Local Storage.
|
||||
TLSADDR,
|
||||
|
||||
// SegmentBaseAddress - The address segment:0
|
||||
SegmentBaseAddress,
|
||||
// TLSADDR, THREAD_POINTER - Thread Local Storage.
|
||||
TLSADDR, THREAD_POINTER,
|
||||
|
||||
// EH_RETURN - Exception Handling helpers.
|
||||
EH_RETURN,
|
||||
|
@ -23,7 +23,7 @@ def i64i32imm : Operand<i64>;
|
||||
def i64i8imm : Operand<i64>;
|
||||
|
||||
def lea64mem : Operand<i64> {
|
||||
let PrintMethod = "printlea64mem";
|
||||
let PrintMethod = "printi64mem";
|
||||
let MIOperandInfo = (ops GR64, i8imm, GR64, i32imm);
|
||||
}
|
||||
|
||||
|
@ -66,15 +66,6 @@ inline const MachineInstrBuilder &addDirectMem(const MachineInstrBuilder &MIB,
|
||||
return MIB.addReg(Reg).addImm(1).addReg(0).addImm(0);
|
||||
}
|
||||
|
||||
inline const MachineInstrBuilder &addLeaOffset(const MachineInstrBuilder &MIB,
|
||||
int Offset) {
|
||||
return MIB.addImm(1).addReg(0).addImm(Offset);
|
||||
}
|
||||
|
||||
inline const MachineInstrBuilder &addOffset(const MachineInstrBuilder &MIB,
|
||||
int Offset) {
|
||||
return addLeaOffset(MIB, Offset).addReg(0);
|
||||
}
|
||||
|
||||
/// addRegOffset - This function is used to add a memory reference of the form
|
||||
/// [Reg + Offset], i.e., one with no scale or index, but with a
|
||||
@ -83,13 +74,8 @@ inline const MachineInstrBuilder &addOffset(const MachineInstrBuilder &MIB,
|
||||
inline const MachineInstrBuilder &addRegOffset(const MachineInstrBuilder &MIB,
|
||||
unsigned Reg, bool isKill,
|
||||
int Offset) {
|
||||
return addOffset(MIB.addReg(Reg, false, false, isKill), Offset);
|
||||
}
|
||||
|
||||
inline const MachineInstrBuilder &addLeaRegOffset(const MachineInstrBuilder &MIB,
|
||||
unsigned Reg, bool isKill,
|
||||
int Offset) {
|
||||
return addLeaOffset(MIB.addReg(Reg, false, false, isKill), Offset);
|
||||
return MIB.addReg(Reg, false, false, isKill)
|
||||
.addImm(1).addReg(0).addImm(Offset);
|
||||
}
|
||||
|
||||
/// addRegReg - This function is used to add a memory reference of the form:
|
||||
@ -101,8 +87,8 @@ inline const MachineInstrBuilder &addRegReg(const MachineInstrBuilder &MIB,
|
||||
.addReg(Reg2, false, false, isKill2).addImm(0);
|
||||
}
|
||||
|
||||
inline const MachineInstrBuilder &addLeaAddress(const MachineInstrBuilder &MIB,
|
||||
const X86AddressMode &AM) {
|
||||
inline const MachineInstrBuilder &addFullAddress(const MachineInstrBuilder &MIB,
|
||||
const X86AddressMode &AM) {
|
||||
assert (AM.Scale == 1 || AM.Scale == 2 || AM.Scale == 4 || AM.Scale == 8);
|
||||
|
||||
if (AM.BaseType == X86AddressMode::RegBase)
|
||||
@ -118,11 +104,6 @@ inline const MachineInstrBuilder &addLeaAddress(const MachineInstrBuilder &MIB,
|
||||
return MIB.addImm(AM.Disp);
|
||||
}
|
||||
|
||||
inline const MachineInstrBuilder &addFullAddress(const MachineInstrBuilder &MIB,
|
||||
const X86AddressMode &AM) {
|
||||
return addLeaAddress(MIB, AM).addReg(0);
|
||||
}
|
||||
|
||||
/// addFrameReference - This function is used to add a reference to the base of
|
||||
/// an abstract object on the stack frame of the current function. This
|
||||
/// reference has base register as the FrameIndex offset until it is resolved.
|
||||
@ -144,7 +125,7 @@ addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset = 0) {
|
||||
MFI.getObjectOffset(FI) + Offset,
|
||||
MFI.getObjectSize(FI),
|
||||
MFI.getObjectAlignment(FI));
|
||||
return addOffset(MIB.addFrameIndex(FI), Offset)
|
||||
return MIB.addFrameIndex(FI).addImm(1).addReg(0).addImm(Offset)
|
||||
.addMemOperand(MMO);
|
||||
}
|
||||
|
||||
|
@ -1138,9 +1138,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
|
||||
unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
|
||||
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
|
||||
NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
.addReg(Dest, true, false, false, isDead),
|
||||
Src, isKill, 1);
|
||||
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
.addReg(Dest, true, false, false, isDead),
|
||||
Src, isKill, 1);
|
||||
break;
|
||||
}
|
||||
case X86::INC16r:
|
||||
@ -1157,9 +1157,9 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
|
||||
unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
|
||||
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
|
||||
NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
.addReg(Dest, true, false, false, isDead),
|
||||
Src, isKill, -1);
|
||||
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
.addReg(Dest, true, false, false, isDead),
|
||||
Src, isKill, -1);
|
||||
break;
|
||||
}
|
||||
case X86::DEC16r:
|
||||
@ -1200,18 +1200,18 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
|
||||
case X86::ADD64ri8:
|
||||
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
|
||||
if (MI->getOperand(2).isImm())
|
||||
NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
|
||||
.addReg(Dest, true, false, false, isDead),
|
||||
Src, isKill, MI->getOperand(2).getImm());
|
||||
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
|
||||
.addReg(Dest, true, false, false, isDead),
|
||||
Src, isKill, MI->getOperand(2).getImm());
|
||||
break;
|
||||
case X86::ADD32ri:
|
||||
case X86::ADD32ri8:
|
||||
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
|
||||
if (MI->getOperand(2).isImm()) {
|
||||
unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
|
||||
NewMI = addLeaRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
.addReg(Dest, true, false, false, isDead),
|
||||
Src, isKill, MI->getOperand(2).getImm());
|
||||
NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
|
||||
.addReg(Dest, true, false, false, isDead),
|
||||
Src, isKill, MI->getOperand(2).getImm());
|
||||
}
|
||||
break;
|
||||
case X86::ADD16ri:
|
||||
@ -1959,7 +1959,7 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
|
||||
for (unsigned i = 0; i != NumAddrOps; ++i)
|
||||
MIB.addOperand(MOs[i]);
|
||||
if (NumAddrOps < 4) // FrameIndex only
|
||||
addOffset(MIB, 0);
|
||||
MIB.addImm(1).addReg(0).addImm(0);
|
||||
|
||||
// Loop over the rest of the ri operands, converting them over.
|
||||
unsigned NumOps = MI->getDesc().getNumOperands()-2;
|
||||
@ -1990,7 +1990,7 @@ static MachineInstr *FuseInst(MachineFunction &MF,
|
||||
for (unsigned i = 0; i != NumAddrOps; ++i)
|
||||
MIB.addOperand(MOs[i]);
|
||||
if (NumAddrOps < 4) // FrameIndex only
|
||||
addOffset(MIB, 0);
|
||||
MIB.addImm(1).addReg(0).addImm(0);
|
||||
} else {
|
||||
MIB.addOperand(MO);
|
||||
}
|
||||
@ -2008,7 +2008,7 @@ static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
|
||||
for (unsigned i = 0; i != NumAddrOps; ++i)
|
||||
MIB.addOperand(MOs[i]);
|
||||
if (NumAddrOps < 4) // FrameIndex only
|
||||
addOffset(MIB, 0);
|
||||
MIB.addImm(1).addReg(0).addImm(0);
|
||||
return MIB.addImm(0);
|
||||
}
|
||||
|
||||
@ -2164,7 +2164,7 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
} else if (Ops.size() != 1)
|
||||
return NULL;
|
||||
|
||||
SmallVector<MachineOperand,X86AddrNumOperands> MOs;
|
||||
SmallVector<MachineOperand,4> MOs;
|
||||
if (LoadMI->getOpcode() == X86::V_SET0 ||
|
||||
LoadMI->getOpcode() == X86::V_SETALLONES) {
|
||||
// Folding a V_SET0 or V_SETALLONES as a load, to ease register pressure.
|
||||
@ -2193,7 +2193,6 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MOs.push_back(MachineOperand::CreateImm(1));
|
||||
MOs.push_back(MachineOperand::CreateReg(0, false));
|
||||
MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
|
||||
MOs.push_back(MachineOperand::CreateReg(0, false));
|
||||
} else {
|
||||
// Folding a normal load. Just copy the load's address operands.
|
||||
unsigned NumOps = LoadMI->getDesc().getNumOperands();
|
||||
@ -2883,6 +2882,11 @@ static unsigned GetInstSizeWithDesc(const MachineInstr &MI,
|
||||
FinalSize += sizeConstant(X86InstrInfo::sizeOfImm(Desc));
|
||||
break;
|
||||
}
|
||||
case X86::TLS_tp:
|
||||
case X86::TLS_gs_ri:
|
||||
FinalSize += 2;
|
||||
FinalSize += sizeGlobalAddress(false);
|
||||
break;
|
||||
}
|
||||
CurOp = NumOps;
|
||||
break;
|
||||
|
@ -243,7 +243,7 @@ namespace X86II {
|
||||
};
|
||||
}
|
||||
|
||||
const int X86AddrNumOperands = 5;
|
||||
const int X86AddrNumOperands = 4;
|
||||
|
||||
inline static bool isScale(const MachineOperand &MO) {
|
||||
return MO.isImm() &&
|
||||
@ -251,7 +251,7 @@ inline static bool isScale(const MachineOperand &MO) {
|
||||
MO.getImm() == 4 || MO.getImm() == 8);
|
||||
}
|
||||
|
||||
inline static bool isLeaMem(const MachineInstr *MI, unsigned Op) {
|
||||
inline static bool isMem(const MachineInstr *MI, unsigned Op) {
|
||||
if (MI->getOperand(Op).isFI()) return true;
|
||||
return Op+4 <= MI->getNumOperands() &&
|
||||
MI->getOperand(Op ).isReg() && isScale(MI->getOperand(Op+1)) &&
|
||||
@ -262,13 +262,6 @@ inline static bool isLeaMem(const MachineInstr *MI, unsigned Op) {
|
||||
MI->getOperand(Op+3).isJTI());
|
||||
}
|
||||
|
||||
inline static bool isMem(const MachineInstr *MI, unsigned Op) {
|
||||
if (MI->getOperand(Op).isFI()) return true;
|
||||
return Op+5 <= MI->getNumOperands() &&
|
||||
MI->getOperand(Op+4).isReg() &&
|
||||
isLeaMem(MI, Op);
|
||||
}
|
||||
|
||||
class X86InstrInfo : public TargetInstrInfoImpl {
|
||||
X86TargetMachine &TM;
|
||||
const X86RegisterInfo RI;
|
||||
|
@ -65,7 +65,7 @@ def SDTX86Wrapper : SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>, SDTCisPtrTy<0>]>;
|
||||
|
||||
def SDT_X86TLSADDR : SDTypeProfile<1, 1, [SDTCisPtrTy<0>, SDTCisInt<1>]>;
|
||||
|
||||
def SDT_X86SegmentBaseAddress : SDTypeProfile<1, 1, [SDTCisPtrTy<0>]>;
|
||||
def SDT_X86TLSTP : SDTypeProfile<1, 0, [SDTCisPtrTy<0>]>;
|
||||
|
||||
def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>;
|
||||
|
||||
@ -142,8 +142,7 @@ def X86WrapperRIP : SDNode<"X86ISD::WrapperRIP", SDTX86Wrapper>;
|
||||
|
||||
def X86tlsaddr : SDNode<"X86ISD::TLSADDR", SDT_X86TLSADDR,
|
||||
[SDNPHasChain, SDNPOptInFlag, SDNPOutFlag]>;
|
||||
def X86SegmentBaseAddress : SDNode<"X86ISD::SegmentBaseAddress",
|
||||
SDT_X86SegmentBaseAddress, []>;
|
||||
def X86TLStp : SDNode<"X86ISD::THREAD_POINTER", SDT_X86TLSTP, []>;
|
||||
|
||||
def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
|
||||
[SDNPHasChain]>;
|
||||
@ -168,7 +167,7 @@ def X86mul_imm : SDNode<"X86ISD::MUL_IMM", SDTIntBinOp>;
|
||||
//
|
||||
class X86MemOperand<string printMethod> : Operand<iPTR> {
|
||||
let PrintMethod = printMethod;
|
||||
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm, i8imm);
|
||||
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
|
||||
}
|
||||
|
||||
def i8mem : X86MemOperand<"printi8mem">;
|
||||
@ -182,7 +181,7 @@ def f80mem : X86MemOperand<"printf80mem">;
|
||||
def f128mem : X86MemOperand<"printf128mem">;
|
||||
|
||||
def lea32mem : Operand<i32> {
|
||||
let PrintMethod = "printlea32mem";
|
||||
let PrintMethod = "printi32mem";
|
||||
let MIOperandInfo = (ops GR32, i8imm, GR32, i32imm);
|
||||
}
|
||||
|
||||
@ -208,7 +207,7 @@ def brtarget : Operand<OtherVT>;
|
||||
//
|
||||
|
||||
// Define X86 specific addressing mode.
|
||||
def addr : ComplexPattern<iPTR, 5, "SelectAddr", [], []>;
|
||||
def addr : ComplexPattern<iPTR, 4, "SelectAddr", [], []>;
|
||||
def lea32addr : ComplexPattern<i32, 4, "SelectLEAAddr",
|
||||
[add, mul, shl, or, frameindex], []>;
|
||||
|
||||
@ -2923,11 +2922,101 @@ def MOV32_mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32_:$src),
|
||||
// Thread Local Storage Instructions
|
||||
//
|
||||
|
||||
// FIXME: there is duplication with the non-TLS case.
|
||||
// There is a suggestion on how to fix this at
|
||||
// http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20090309/075212.html
|
||||
|
||||
let Uses = [EBX] in
|
||||
def TLS_addr32 : I<0, Pseudo, (outs GR32:$dst), (ins i32imm:$sym),
|
||||
"leal\t${sym:mem}(,%ebx,1), $dst",
|
||||
[(set GR32:$dst, (X86tlsaddr tglobaltlsaddr:$sym))]>;
|
||||
|
||||
let AddedComplexity = 10 in
|
||||
def TLS_gs_rr : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src),
|
||||
"movl\t%gs:($src), $dst",
|
||||
[(set GR32:$dst, (load (add X86TLStp, GR32:$src)))]>;
|
||||
|
||||
let AddedComplexity = 15 in
|
||||
def TLS_gs_ri : I<0x8B, Pseudo, (outs GR32:$dst), (ins i32imm:$src),
|
||||
"movl\t%gs:${src:mem}, $dst",
|
||||
[(set GR32:$dst,
|
||||
(load (add X86TLStp, (X86Wrapper tglobaltlsaddr:$src))))]>,
|
||||
SegGS;
|
||||
|
||||
let AddedComplexity = 15 in
|
||||
def TLS16_gs_ri : I<0x8B, Pseudo, (outs GR16:$dst), (ins i32imm:$src),
|
||||
"movw\t%gs:${src:mem}, $dst",
|
||||
[(set GR16:$dst,
|
||||
(load (add X86TLStp,
|
||||
(X86Wrapper tglobaltlsaddr:$src))))]>,
|
||||
SegGS;
|
||||
|
||||
let AddedComplexity = 15 in
|
||||
def TLS8_gs_ri : I<0x8B, Pseudo, (outs GR8:$dst), (ins i32imm:$src),
|
||||
"movb\t%gs:${src:mem}, $dst",
|
||||
[(set GR8:$dst,
|
||||
(load (add X86TLStp,
|
||||
(X86Wrapper tglobaltlsaddr:$src))))]>,
|
||||
SegGS;
|
||||
|
||||
let AddedComplexity = 15 in
|
||||
def TLS_ext16_gs_ri : I<0x8B, Pseudo, (outs GR32:$dst), (ins i32imm:$src),
|
||||
"movzwl\t%gs:${src:mem}, $dst",
|
||||
[(set GR32:$dst,
|
||||
(extloadi32i16
|
||||
(add X86TLStp,
|
||||
(X86Wrapper tglobaltlsaddr:$src))))]>,
|
||||
SegGS;
|
||||
|
||||
let AddedComplexity = 15 in
|
||||
def TLS_sext16_gs_ri : I<0x8B, Pseudo, (outs GR32:$dst), (ins i32imm:$src),
|
||||
"movswl\t%gs:${src:mem}, $dst",
|
||||
[(set GR32:$dst,
|
||||
(sextloadi32i16
|
||||
(add X86TLStp,
|
||||
(X86Wrapper tglobaltlsaddr:$src))))]>,
|
||||
SegGS;
|
||||
|
||||
let AddedComplexity = 15 in
|
||||
def TLS_zext16_gs_ri : I<0x8B, Pseudo, (outs GR32:$dst), (ins i32imm:$src),
|
||||
"movzwl\t%gs:${src:mem}, $dst",
|
||||
[(set GR32:$dst,
|
||||
(zextloadi32i16
|
||||
(add X86TLStp,
|
||||
(X86Wrapper tglobaltlsaddr:$src))))]>,
|
||||
SegGS;
|
||||
|
||||
let AddedComplexity = 15 in
|
||||
def TLS_ext8_gs_ri : I<0x8B, Pseudo, (outs GR32:$dst), (ins i32imm:$src),
|
||||
"movzbl\t%gs:${src:mem}, $dst",
|
||||
[(set GR32:$dst,
|
||||
(extloadi32i8
|
||||
(add X86TLStp,
|
||||
(X86Wrapper tglobaltlsaddr:$src))))]>,
|
||||
SegGS;
|
||||
|
||||
let AddedComplexity = 15 in
|
||||
def TLS_sext8_gs_ri : I<0x8B, Pseudo, (outs GR32:$dst), (ins i32imm:$src),
|
||||
"movsbl\t%gs:${src:mem}, $dst",
|
||||
[(set GR32:$dst,
|
||||
(sextloadi32i8
|
||||
(add X86TLStp,
|
||||
(X86Wrapper tglobaltlsaddr:$src))))]>,
|
||||
SegGS;
|
||||
|
||||
let AddedComplexity = 15 in
|
||||
def TLS_zext8_gs_ri : I<0x8B, Pseudo, (outs GR32:$dst), (ins i32imm:$src),
|
||||
"movzbl\t%gs:${src:mem}, $dst",
|
||||
[(set GR32:$dst,
|
||||
(zextloadi32i8
|
||||
(add X86TLStp,
|
||||
(X86Wrapper tglobaltlsaddr:$src))))]>,
|
||||
SegGS;
|
||||
|
||||
def TLS_tp : I<0x8B, Pseudo, (outs GR32:$dst), (ins),
|
||||
"movl\t%gs:0, $dst",
|
||||
[(set GR32:$dst, X86TLStp)]>, SegGS;
|
||||
|
||||
let AddedComplexity = 5 in
|
||||
def GS_MOV32rm : I<0x8B, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
|
||||
"movl\t%gs:$src, $dst",
|
||||
|
@ -76,18 +76,18 @@ def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>;
|
||||
// These are 'extloads' from a scalar to the low element of a vector, zeroing
|
||||
// the top elements. These are used for the SSE 'ss' and 'sd' instruction
|
||||
// forms.
|
||||
def sse_load_f32 : ComplexPattern<v4f32, 5, "SelectScalarSSELoad", [],
|
||||
def sse_load_f32 : ComplexPattern<v4f32, 4, "SelectScalarSSELoad", [],
|
||||
[SDNPHasChain, SDNPMayLoad]>;
|
||||
def sse_load_f64 : ComplexPattern<v2f64, 5, "SelectScalarSSELoad", [],
|
||||
def sse_load_f64 : ComplexPattern<v2f64, 4, "SelectScalarSSELoad", [],
|
||||
[SDNPHasChain, SDNPMayLoad]>;
|
||||
|
||||
def ssmem : Operand<v4f32> {
|
||||
let PrintMethod = "printf32mem";
|
||||
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm, i8imm);
|
||||
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
|
||||
}
|
||||
def sdmem : Operand<v2f64> {
|
||||
let PrintMethod = "printf64mem";
|
||||
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm, i8imm);
|
||||
let MIOperandInfo = (ops ptr_rc, i8imm, ptr_rc, i32imm);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -971,8 +971,8 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
|
||||
} else if (MFI->hasVarSizedObjects()) {
|
||||
if (CSSize) {
|
||||
unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
|
||||
MachineInstr *MI = addLeaRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
|
||||
FramePtr, false, -CSSize);
|
||||
MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
|
||||
FramePtr, false, -CSSize);
|
||||
MBB.insert(MBBI, MI);
|
||||
} else
|
||||
BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
|
||||
|
@ -168,14 +168,6 @@ let Namespace = "X86" in {
|
||||
|
||||
// Status flags register
|
||||
def EFLAGS : Register<"flags">;
|
||||
|
||||
// Segment registers
|
||||
def CS : Register<"cs">;
|
||||
def DS : Register<"ds">;
|
||||
def SS : Register<"ss">;
|
||||
def ES : Register<"es">;
|
||||
def FS : Register<"fs">;
|
||||
def GS : Register<"gs">;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
; RUN: llvm-as < %s | llc -march=x86 -mtriple=i386-linux-gnu > %t
|
||||
; RUN: grep {movl \$i@NTPOFF, %eax} %t
|
||||
; RUN: grep {addl %gs:0, %eax} %t
|
||||
; RUN: grep {movl %gs:0, %eax} %t
|
||||
; RUN: grep {leal i@NTPOFF(%eax), %eax} %t
|
||||
|
||||
@i = external hidden thread_local global i32
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
; RUN: llvm-as < %s | llc -march=x86 -mtriple=i386-linux-gnu > %t
|
||||
; RUN: grep {movl \$i@NTPOFF, %eax} %t
|
||||
; RUN: grep {addl %gs:0, %eax} %t
|
||||
; RUN: grep {movl %gs:0, %eax} %t
|
||||
; RUN: grep {leal i@NTPOFF(%eax), %eax} %t
|
||||
|
||||
@i = thread_local global i32 15
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
; RUN: llvm-as < %s | llc -march=x86 -mtriple=i386-linux-gnu > %t
|
||||
; RUN: grep {movl \$i@NTPOFF, %eax} %t
|
||||
; RUN: grep {addl %gs:0, %eax} %t
|
||||
; RUN: grep {movl %gs:0, %eax} %t
|
||||
; RUN: grep {leal i@NTPOFF(%eax), %eax} %t
|
||||
|
||||
@i = internal thread_local global i32 15
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
; RUN: llvm-as < %s | llc -march=x86 -mtriple=i386-linux-gnu > %t
|
||||
; RUN: grep {movl \$i@NTPOFF, %eax} %t
|
||||
; RUN: grep {addl %gs:0, %eax} %t
|
||||
; RUN: grep {movl %gs:0, %eax} %t
|
||||
; RUN: grep {leal i@NTPOFF(%eax), %eax} %t
|
||||
|
||||
@i = hidden thread_local global i32 15
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user