Short-term workaround for frame-related weirdness on win64.

Some other minor win64 fixes as well.

Patch by Michael Beck!

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@80370 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Anton Korobeynikov 2009-08-28 16:06:41 +00:00
parent 1cb2de3188
commit 6f9bb6f31b
3 changed files with 6 additions and 4 deletions

View File

@ -2035,6 +2035,7 @@ bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
if (MI != MBB.end()) DL = MI->getDebugLoc(); if (MI != MBB.end()) DL = MI->getDebugLoc();
bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
bool isWin64 = TM.getSubtarget<X86Subtarget>().isTargetWin64();
unsigned SlotSize = is64Bit ? 8 : 4; unsigned SlotSize = is64Bit ? 8 : 4;
MachineFunction &MF = *MBB.getParent(); MachineFunction &MF = *MBB.getParent();
@ -2051,7 +2052,7 @@ bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
if (Reg == FPReg) if (Reg == FPReg)
// X86RegisterInfo::emitPrologue will handle spilling of frame register. // X86RegisterInfo::emitPrologue will handle spilling of frame register.
continue; continue;
if (RegClass != &X86::VR128RegClass) { if (RegClass != &X86::VR128RegClass && !isWin64) {
CalleeFrameSize += SlotSize; CalleeFrameSize += SlotSize;
BuildMI(MBB, MI, DL, get(Opc)).addReg(Reg, RegState::Kill); BuildMI(MBB, MI, DL, get(Opc)).addReg(Reg, RegState::Kill);
} else { } else {
@ -2075,6 +2076,7 @@ bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
MachineFunction &MF = *MBB.getParent(); MachineFunction &MF = *MBB.getParent();
unsigned FPReg = RI.getFrameRegister(MF); unsigned FPReg = RI.getFrameRegister(MF);
bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit(); bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
bool isWin64 = TM.getSubtarget<X86Subtarget>().isTargetWin64();
unsigned Opc = is64Bit ? X86::POP64r : X86::POP32r; unsigned Opc = is64Bit ? X86::POP64r : X86::POP32r;
for (unsigned i = 0, e = CSI.size(); i != e; ++i) { for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
unsigned Reg = CSI[i].getReg(); unsigned Reg = CSI[i].getReg();
@ -2082,7 +2084,7 @@ bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
// X86RegisterInfo::emitEpilogue will handle restoring of frame register. // X86RegisterInfo::emitEpilogue will handle restoring of frame register.
continue; continue;
const TargetRegisterClass *RegClass = CSI[i].getRegClass(); const TargetRegisterClass *RegClass = CSI[i].getRegClass();
if (RegClass != &X86::VR128RegClass) { if (RegClass != &X86::VR128RegClass && !isWin64) {
BuildMI(MBB, MI, DL, get(Opc), Reg); BuildMI(MBB, MI, DL, get(Opc), Reg);
} else { } else {
loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RegClass); loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RegClass);

View File

@ -23,7 +23,7 @@
using namespace llvm; using namespace llvm;
// Determine the platform we're running on // Determine the platform we're running on
#if defined (__x86_64__) || defined (_M_AMD64) #if defined (__x86_64__) || defined (_M_AMD64) || defined (_M_X64)
# define X86_64_JIT # define X86_64_JIT
#elif defined(__i386__) || defined(i386) || defined(_M_IX86) #elif defined(__i386__) || defined(i386) || defined(_M_IX86)
# define X86_32_JIT # define X86_32_JIT

View File

@ -160,7 +160,7 @@ unsigned X86Subtarget::getSpecialAddressLatency() const {
/// specified arguments. If we can't run cpuid on the host, return true. /// specified arguments. If we can't run cpuid on the host, return true.
bool X86::GetCpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX, bool X86::GetCpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
unsigned *rECX, unsigned *rEDX) { unsigned *rECX, unsigned *rEDX) {
#if defined(__x86_64__) || defined(_M_AMD64) #if defined(__x86_64__) || defined(_M_AMD64) || defined (_M_X64)
#if defined(__GNUC__) #if defined(__GNUC__)
// gcc doesn't know cpuid would clobber ebx/rbx. Preseve it manually. // gcc doesn't know cpuid would clobber ebx/rbx. Preseve it manually.
asm ("movq\t%%rbx, %%rsi\n\t" asm ("movq\t%%rbx, %%rsi\n\t"