mirror of
https://github.com/RPCS3/llvm.git
synced 2025-01-01 01:14:12 +00:00
Remove the x86 MOV{32,64}{rr,rm,mr}_TC instructions.
The reg-reg copies were no longer being generated since copyPhysReg copies physical registers only. The loads and stores are not necessary - The TC constraint is imposed by the TAILJMP and TCRETURN instructions, there should be no need for constrained loads and stores. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@116314 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
0de6ab3c43
commit
d0eeeeb558
@ -285,7 +285,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
|
||||
{ X86::MOV16rr, X86::MOV16mr, 0, 0 },
|
||||
{ X86::MOV32ri, X86::MOV32mi, 0, 0 },
|
||||
{ X86::MOV32rr, X86::MOV32mr, 0, 0 },
|
||||
{ X86::MOV32rr_TC, X86::MOV32mr_TC, 0, 0 },
|
||||
{ X86::MOV64ri32, X86::MOV64mi32, 0, 0 },
|
||||
{ X86::MOV64rr, X86::MOV64mr, 0, 0 },
|
||||
{ X86::MOV8ri, X86::MOV8mi, 0, 0 },
|
||||
@ -400,7 +399,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
|
||||
{ X86::Int_UCOMISSrr, X86::Int_UCOMISSrm, 0 },
|
||||
{ X86::MOV16rr, X86::MOV16rm, 0 },
|
||||
{ X86::MOV32rr, X86::MOV32rm, 0 },
|
||||
{ X86::MOV32rr_TC, X86::MOV32rm_TC, 0 },
|
||||
{ X86::MOV64rr, X86::MOV64rm, 0 },
|
||||
{ X86::MOV64toPQIrr, X86::MOVQI2PQIrm, 0 },
|
||||
{ X86::MOV64toSDrr, X86::MOV64toSDrm, 0 },
|
||||
@ -774,9 +772,7 @@ static bool isFrameLoadOpcode(int Opcode) {
|
||||
case X86::MOV8rm:
|
||||
case X86::MOV16rm:
|
||||
case X86::MOV32rm:
|
||||
case X86::MOV32rm_TC:
|
||||
case X86::MOV64rm:
|
||||
case X86::MOV64rm_TC:
|
||||
case X86::LD_Fp64m:
|
||||
case X86::MOVSSrm:
|
||||
case X86::MOVSDrm:
|
||||
@ -797,9 +793,7 @@ static bool isFrameStoreOpcode(int Opcode) {
|
||||
case X86::MOV8mr:
|
||||
case X86::MOV16mr:
|
||||
case X86::MOV32mr:
|
||||
case X86::MOV32mr_TC:
|
||||
case X86::MOV64mr:
|
||||
case X86::MOV64mr_TC:
|
||||
case X86::ST_FpP64m:
|
||||
case X86::MOVSSmr:
|
||||
case X86::MOVSDmr:
|
||||
@ -2026,13 +2020,22 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
|
||||
default:
|
||||
llvm_unreachable("Unknown regclass");
|
||||
case X86::GR64RegClassID:
|
||||
case X86::GR64_ABCDRegClassID:
|
||||
case X86::GR64_NOREXRegClassID:
|
||||
case X86::GR64_NOREX_NOSPRegClassID:
|
||||
case X86::GR64_NOSPRegClassID:
|
||||
case X86::GR64_TCRegClassID:
|
||||
return load ? X86::MOV64rm : X86::MOV64mr;
|
||||
case X86::GR32RegClassID:
|
||||
case X86::GR32_NOSPRegClassID:
|
||||
case X86::GR32_ABCDRegClassID:
|
||||
case X86::GR32_ADRegClassID:
|
||||
case X86::GR32_NOREXRegClassID:
|
||||
case X86::GR32_NOSPRegClassID:
|
||||
case X86::GR32_TCRegClassID:
|
||||
return load ? X86::MOV32rm : X86::MOV32mr;
|
||||
case X86::GR16RegClassID:
|
||||
case X86::GR16_ABCDRegClassID:
|
||||
case X86::GR16_NOREXRegClassID:
|
||||
return load ? X86::MOV16rm : X86::MOV16mr;
|
||||
case X86::GR8RegClassID:
|
||||
// Copying to or from a physical H register on x86-64 requires a NOREX
|
||||
@ -2042,32 +2045,14 @@ static unsigned getLoadStoreRegOpcode(unsigned Reg,
|
||||
return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
|
||||
else
|
||||
return load ? X86::MOV8rm : X86::MOV8mr;
|
||||
case X86::GR64_ABCDRegClassID:
|
||||
return load ? X86::MOV64rm : X86::MOV64mr;
|
||||
case X86::GR32_ABCDRegClassID:
|
||||
return load ? X86::MOV32rm : X86::MOV32mr;
|
||||
case X86::GR16_ABCDRegClassID:
|
||||
return load ? X86::MOV16rm : X86::MOV16mr;
|
||||
case X86::GR8_ABCD_LRegClassID:
|
||||
case X86::GR8_NOREXRegClassID:
|
||||
return load ? X86::MOV8rm :X86::MOV8mr;
|
||||
case X86::GR8_ABCD_HRegClassID:
|
||||
if (TM.getSubtarget<X86Subtarget>().is64Bit())
|
||||
return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
|
||||
else
|
||||
return load ? X86::MOV8rm : X86::MOV8mr;
|
||||
case X86::GR64_NOREXRegClassID:
|
||||
case X86::GR64_NOREX_NOSPRegClassID:
|
||||
return load ? X86::MOV64rm : X86::MOV64mr;
|
||||
case X86::GR32_NOREXRegClassID:
|
||||
return load ? X86::MOV32rm : X86::MOV32mr;
|
||||
case X86::GR16_NOREXRegClassID:
|
||||
return load ? X86::MOV16rm : X86::MOV16mr;
|
||||
case X86::GR8_NOREXRegClassID:
|
||||
return load ? X86::MOV8rm : X86::MOV8mr;
|
||||
case X86::GR64_TCRegClassID:
|
||||
return load ? X86::MOV64rm_TC : X86::MOV64mr_TC;
|
||||
case X86::GR32_TCRegClassID:
|
||||
return load ? X86::MOV32rm_TC : X86::MOV32mr_TC;
|
||||
case X86::RFP80RegClassID:
|
||||
return load ? X86::LD_Fp80m : X86::ST_FpP80m;
|
||||
case X86::RFP64RegClassID:
|
||||
|
@ -865,34 +865,6 @@ def MOV64mr : RI<0x89, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
|
||||
"mov{q}\t{$src, $dst|$dst, $src}",
|
||||
[(store GR64:$src, addr:$dst)]>;
|
||||
|
||||
/// Versions of MOV32rr, MOV32rm, and MOV32mr for i32mem_TC and GR32_TC.
|
||||
let isCodeGenOnly = 1 in {
|
||||
let neverHasSideEffects = 1 in {
|
||||
def MOV32rr_TC : I<0x89, MRMDestReg, (outs GR32_TC:$dst), (ins GR32_TC:$src),
|
||||
"mov{l}\t{$src, $dst|$dst, $src}", []>;
|
||||
def MOV64rr_TC : RI<0x89, MRMDestReg, (outs GR64_TC:$dst), (ins GR64_TC:$src),
|
||||
"mov{q}\t{$src, $dst|$dst, $src}", []>;
|
||||
}
|
||||
|
||||
let mayLoad = 1, canFoldAsLoad = 1, isReMaterializable = 1 in {
|
||||
def MOV32rm_TC : I<0x8B, MRMSrcMem, (outs GR32_TC:$dst), (ins i32mem_TC:$src),
|
||||
"mov{l}\t{$src, $dst|$dst, $src}",
|
||||
[]>;
|
||||
def MOV64rm_TC : RI<0x8B, MRMSrcMem, (outs GR64_TC:$dst), (ins i64mem_TC:$src),
|
||||
"mov{q}\t{$src, $dst|$dst, $src}",
|
||||
[]>;
|
||||
}
|
||||
|
||||
let mayStore = 1 in {
|
||||
def MOV32mr_TC : I<0x89, MRMDestMem, (outs), (ins i32mem_TC:$dst, GR32_TC:$src),
|
||||
"mov{l}\t{$src, $dst|$dst, $src}",
|
||||
[]>;
|
||||
def MOV64mr_TC : RI<0x89, MRMDestMem, (outs), (ins i64mem_TC:$dst, GR64_TC:$src),
|
||||
"mov{q}\t{$src, $dst|$dst, $src}",
|
||||
[]>;
|
||||
}
|
||||
} // isCodeGenOnly
|
||||
|
||||
// Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so
|
||||
// that they can be used for copying and storing h registers, which can't be
|
||||
// encoded when a REX prefix is present.
|
||||
|
@ -251,8 +251,7 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
|
||||
// movq loads are handled with a special relocation form which allows the
|
||||
// linker to eliminate some loads for GOT references which end up in the
|
||||
// same linkage unit.
|
||||
if (MI.getOpcode() == X86::MOV64rm ||
|
||||
MI.getOpcode() == X86::MOV64rm_TC)
|
||||
if (MI.getOpcode() == X86::MOV64rm)
|
||||
FixupKind = X86::reloc_riprel_4byte_movq_load;
|
||||
|
||||
// rip-relative addressing is actually relative to the *next* instruction.
|
||||
|
Loading…
Reference in New Issue
Block a user