diff --git a/include/llvm/CodeGen/MachineFrameInfo.h b/include/llvm/CodeGen/MachineFrameInfo.h index 747938f3f9f..022634df87c 100644 --- a/include/llvm/CodeGen/MachineFrameInfo.h +++ b/include/llvm/CodeGen/MachineFrameInfo.h @@ -223,10 +223,6 @@ class MachineFrameInfo { /// Whether the "realign-stack" option is on. bool RealignOption; - /// True if the function includes inline assembly that adjusts the stack - /// pointer. - bool HasInlineAsmWithSPAdjust; - const TargetFrameLowering *getFrameLowering() const; public: explicit MachineFrameInfo(const TargetMachine &TM, bool RealignOpt) @@ -455,10 +451,6 @@ public: bool hasCalls() const { return HasCalls; } void setHasCalls(bool V) { HasCalls = V; } - /// Returns true if the function contains any stack-adjusting inline assembly. - bool hasInlineAsmWithSPAdjust() const { return HasInlineAsmWithSPAdjust; } - void setHasInlineAsmWithSPAdjust(bool B) { HasInlineAsmWithSPAdjust = B; } - /// getMaxCallFrameSize - Return the maximum size of a call frame that must be /// allocated for an outgoing function call. This is only available if /// CallFrameSetup/Destroy pseudo instructions are used by the target, and diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h index 43b370cccff..c886e256e04 100644 --- a/include/llvm/CodeGen/MachineFunction.h +++ b/include/llvm/CodeGen/MachineFunction.h @@ -131,8 +131,8 @@ class MachineFunction { /// about the control flow of such functions. bool ExposesReturnsTwice; - /// True if the function includes any inline assembly. - bool HasInlineAsm; + /// True if the function includes MS-style inline assembly. + bool HasMSInlineAsm; MachineFunction(const MachineFunction &) LLVM_DELETED_FUNCTION; void operator=(const MachineFunction&) LLVM_DELETED_FUNCTION; @@ -218,14 +218,15 @@ public: ExposesReturnsTwice = B; } - /// Returns true if the function contains any inline assembly. - bool hasInlineAsm() const { - return HasInlineAsm; + /// Returns true if the function contains any MS-style inline assembly. + bool hasMSInlineAsm() const { + return HasMSInlineAsm; } - /// Set a flag that indicates that the function contains inline assembly. - void setHasInlineAsm(bool B) { - HasInlineAsm = B; + /// Set a flag that indicates that the function contains MS-style inline + /// assembly. + void setHasMSInlineAsm(bool B) { + HasMSInlineAsm = B; } /// getInfo - Keep track of various per-function pieces of information for diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index b3cbd918386..34b7df59acd 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -851,20 +851,12 @@ void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching, SDValue Res = DAG.getTargetConstant(Flag, MVT::i32); Ops.push_back(Res); - unsigned SP = TLI.getStackPointerRegisterToSaveRestore(); for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) { unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]); MVT RegisterVT = RegVTs[Value]; for (unsigned i = 0; i != NumRegs; ++i) { assert(Reg < Regs.size() && "Mismatch in # registers expected"); - unsigned TheReg = Regs[Reg++]; - Ops.push_back(DAG.getRegister(TheReg, RegisterVT)); - - // Notice if we clobbered the stack pointer. Yes, inline asm can do this. - if (TheReg == SP && Code == InlineAsm::Kind_Clobber) { - MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo(); - MFI->setHasInlineAsmWithSPAdjust(true); - } + Ops.push_back(DAG.getRegister(Regs[Reg++], RegisterVT)); } } } diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index 506b7380515..6c335d96c73 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -428,9 +428,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { SDB->init(GFI, *AA, LibInfo); - MF->setHasInlineAsm(false); - MF->getFrameInfo()->setHasInlineAsmWithSPAdjust(false); - + MF->setHasMSInlineAsm(false); SelectAllBasicBlocks(Fn); // If the first basic block in the function has live ins that need to be @@ -514,7 +512,7 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { for (MachineFunction::const_iterator I = MF->begin(), E = MF->end(); I != E; ++I) { - if (MFI->hasCalls() && MF->hasInlineAsm()) + if (MFI->hasCalls() && MF->hasMSInlineAsm()) break; const MachineBasicBlock *MBB = I; @@ -525,8 +523,8 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) { II->isStackAligningInlineAsm()) { MFI->setHasCalls(true); } - if (II->isInlineAsm()) { - MF->setHasInlineAsm(true); + if (II->isMSInlineAsm()) { + MF->setHasMSInlineAsm(true); } } } diff --git a/lib/MC/MCParser/AsmParser.cpp b/lib/MC/MCParser/AsmParser.cpp index a336cb97c66..fe3969a0a78 100644 --- a/lib/MC/MCParser/AsmParser.cpp +++ b/lib/MC/MCParser/AsmParser.cpp @@ -4208,11 +4208,6 @@ bool AsmParser::parseMSInlineAsm( AsmStrRewrites.push_back(AsmRewrite(AOK_Input, Start, SymName.size())); } } - - // Consider implicit defs to be clobbers. Think of cpuid and push. - const uint16_t *ImpDefs = Desc.getImplicitDefs(); - for (unsigned I = 0, E = Desc.getNumImplicitDefs(); I != E; ++I) - ClobberRegs.push_back(ImpDefs[I]); } // Set the number of Outputs and Inputs. diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 0d76534711d..a06ba9d750a 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -50,7 +50,7 @@ bool X86FrameLowering::hasFP(const MachineFunction &MF) const { return (MF.getTarget().Options.DisableFramePointerElim(MF) || RegInfo->needsStackRealignment(MF) || MFI->hasVarSizedObjects() || - MFI->isFrameAddressTaken() || MFI->hasInlineAsmWithSPAdjust() || + MFI->isFrameAddressTaken() || MF.hasMSInlineAsm() || MF.getInfo()->getForceFramePointer() || MMI.callsUnwindInit() || MMI.callsEHReturn()); } diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index d3d05cd83a4..dbda556b1b5 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -347,12 +347,6 @@ BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const { "Stack realignment in presence of dynamic allocas is not supported with" "this calling convention."); - // FIXME: Do a proper analysis of the inline asm to see if it actually - // conflicts with the base register we chose. - if (MF.hasInlineAsm()) - report_fatal_error("Stack realignment in presence of dynamic stack " - "adjustments is not supported with inline assembly."); - for (MCSubRegIterator I(getBaseRegister(), this, /*IncludeSelf=*/true); I.isValid(); ++I) Reserved.set(*I); @@ -409,15 +403,18 @@ bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const { if (!EnableBasePointer) return false; - // When we need stack realignment, we can't address the stack from the frame - // pointer. When we have dynamic allocas or stack-adjusting inline asm, we - // can't address variables from the stack pointer. MS inline asm can - // reference locals while also adjusting the stack pointer. When we can't - // use both the SP and the FP, we need a separate base pointer register. - bool CantUseFP = needsStackRealignment(MF); - bool CantUseSP = - MFI->hasVarSizedObjects() || MFI->hasInlineAsmWithSPAdjust(); - return CantUseFP && CantUseSP; + // When we need stack realignment and there are dynamic allocas, we can't + // reference off of the stack pointer, so we reserve a base pointer. + // + // This is also true if the function contain MS-style inline assembly. We + // do this because if any stack changes occur in the inline assembly, e.g., + // "pusha", then any C local variable or C argument references in the + // inline assembly will be wrong because the SP is not properly tracked. + if ((needsStackRealignment(MF) && MFI->hasVarSizedObjects()) || + MF.hasMSInlineAsm()) + return true; + + return false; } bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { diff --git a/test/CodeGen/X86/inline-asm-stack-realign.ll b/test/CodeGen/X86/inline-asm-stack-realign.ll deleted file mode 100644 index 5cf06277633..00000000000 --- a/test/CodeGen/X86/inline-asm-stack-realign.ll +++ /dev/null @@ -1,16 +0,0 @@ -; RUN: not llc -march x86 < %s 2>&1 | FileCheck %s - -; We don't currently support realigning the stack and adjusting the stack -; pointer in inline asm. This commonly happens in MS inline assembly using -; push and pop. - -; CHECK: Stack realignment in presence of dynamic stack adjustments is not supported with inline assembly - -define i32 @foo() { -entry: - %r = alloca i32, align 16 - store i32 -1, i32* %r, align 16 - call void asm sideeffect inteldialect "push esi\0A\09xor esi, esi\0A\09mov dword ptr $0, esi\0A\09pop esi", "=*m,~{flags},~{esi},~{esp},~{dirflag},~{fpsr},~{flags}"(i32* %r) - %0 = load i32* %r, align 16 - ret i32 %0 -} diff --git a/test/CodeGen/X86/inline-asm-stack-realign2.ll b/test/CodeGen/X86/inline-asm-stack-realign2.ll deleted file mode 100644 index fa18258172b..00000000000 --- a/test/CodeGen/X86/inline-asm-stack-realign2.ll +++ /dev/null @@ -1,15 +0,0 @@ -; RUN: not llc -march x86 < %s 2>&1 | FileCheck %s - -; We don't currently support realigning the stack and adjusting the stack -; pointer in inline asm. This can even happen in GNU asm. - -; CHECK: Stack realignment in presence of dynamic stack adjustments is not supported with inline assembly - -define i32 @foo() { -entry: - %r = alloca i32, align 16 - store i32 -1, i32* %r, align 16 - call void asm sideeffect "push %esi\0A\09xor %esi, %esi\0A\09mov %esi, $0\0A\09pop %esi", "=*m,~{flags},~{esi},~{esp},~{dirflag},~{fpsr},~{flags}"(i32* %r) - %0 = load i32* %r, align 16 - ret i32 %0 -} diff --git a/test/CodeGen/X86/ms-inline-asm.ll b/test/CodeGen/X86/ms-inline-asm.ll index 779abfeeeaf..5e7ba37b39c 100644 --- a/test/CodeGen/X86/ms-inline-asm.ll +++ b/test/CodeGen/X86/ms-inline-asm.ll @@ -5,6 +5,7 @@ entry: %0 = tail call i32 asm sideeffect inteldialect "mov eax, $1\0A\09mov $0, eax", "=r,r,~{eax},~{dirflag},~{fpsr},~{flags}"(i32 1) nounwind ret i32 %0 ; CHECK: t1 +; CHECK: movl %esp, %ebp ; CHECK: {{## InlineAsm Start|#APP}} ; CHECK: .intel_syntax ; CHECK: mov eax, ecx @@ -18,6 +19,7 @@ entry: call void asm sideeffect inteldialect "mov eax, $$1", "~{eax},~{dirflag},~{fpsr},~{flags}"() nounwind ret void ; CHECK: t2 +; CHECK: movl %esp, %ebp ; CHECK: {{## InlineAsm Start|#APP}} ; CHECK: .intel_syntax ; CHECK: mov eax, 1 @@ -32,6 +34,7 @@ entry: call void asm sideeffect inteldialect "mov eax, DWORD PTR [$0]", "*m,~{eax},~{dirflag},~{fpsr},~{flags}"(i32* %V.addr) nounwind ret void ; CHECK: t3 +; CHECK: movl %esp, %ebp ; CHECK: {{## InlineAsm Start|#APP}} ; CHECK: .intel_syntax ; CHECK: mov eax, DWORD PTR {{[[esp]}} @@ -53,6 +56,7 @@ entry: %0 = load i32* %b1, align 4 ret i32 %0 ; CHECK: t18 +; CHECK: movl %esp, %ebp ; CHECK: {{## InlineAsm Start|#APP}} ; CHECK: .intel_syntax ; CHECK: lea ebx, foo @@ -72,6 +76,7 @@ entry: call void asm sideeffect inteldialect "call $0", "r,~{dirflag},~{fpsr},~{flags}"(void ()* @t19_helper) nounwind ret void ; CHECK-LABEL: t19: +; CHECK: movl %esp, %ebp ; CHECK: movl ${{_?}}t19_helper, %eax ; CHECK: {{## InlineAsm Start|#APP}} ; CHECK: .intel_syntax @@ -90,6 +95,7 @@ entry: %0 = load i32** %res, align 4 ret i32* %0 ; CHECK-LABEL: t30: +; CHECK: movl %esp, %ebp ; CHECK: {{## InlineAsm Start|#APP}} ; CHECK: .intel_syntax ; CHECK: lea edi, dword ptr [{{_?}}results] @@ -97,31 +103,8 @@ entry: ; CHECK: {{## InlineAsm End|#NO_APP}} ; CHECK: {{## InlineAsm Start|#APP}} ; CHECK: .intel_syntax -; CHECK: mov dword ptr [esp], edi +; CHECK: mov dword ptr [esi], edi ; CHECK: .att_syntax ; CHECK: {{## InlineAsm End|#NO_APP}} -; CHECK: movl (%esp), %eax -} - -; Stack realignment plus MS inline asm that does *not* adjust the stack is no -; longer an error. - -define i32 @t31() { -entry: - %val = alloca i32, align 16 - store i32 -1, i32* %val, align 16 - call void asm sideeffect inteldialect "mov dword ptr $0, esp", "=*m,~{dirflag},~{fpsr},~{flags}"(i32* %val) #1 - %sp = load i32* %val, align 16 - ret i32 %sp -; CHECK-LABEL: t31: -; CHECK: pushl %ebp -; CHECK: movl %esp, %ebp -; CHECK: andl $-16, %esp -; CHECK: {{## InlineAsm Start|#APP}} -; CHECK: .intel_syntax -; CHECK: mov dword ptr [esp], esp -; CHECK: .att_syntax -; CHECK: {{## InlineAsm End|#NO_APP}} -; CHECK: movl (%esp), %eax -; CHECK: ret +; CHECK: movl (%esi), %eax }