[x86/SLH] Fix a bug where we would harden tail calls twice -- once as

a call, and then again as a return.

Also added a comment to try and explain better why we would be doing
what we're doing when hardening the (non-call) returns.

llvm-svn: 337673
This commit is contained in:
Chandler Carruth 2018-07-23 07:56:15 +00:00
parent b66f2d8df8
commit 1d926fb9f4
2 changed files with 5 additions and 7 deletions

View File

@ -525,7 +525,11 @@ bool X86SpeculativeLoadHardeningPass::runOnMachineFunction(
continue;
MachineInstr &MI = MBB.back();
if (!MI.isReturn())
// We only care about returns that are not also calls. For calls, that
// happen to also be returns (tail calls) we will have already handled
// them as calls.
if (!MI.isReturn() || MI.isCall())
continue;
hardenReturnInstr(MI);

View File

@ -37,9 +37,6 @@ define i32 @test_indirect_tail_call(i32 ()** %ptr) nounwind {
; X64-NEXT: movq %rsp, %rax
; X64-NEXT: movq $-1, %rcx
; X64-NEXT: sarq $63, %rax
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: shlq $47, %rcx
; X64-NEXT: orq %rcx, %rsp
; X64-NEXT: shlq $47, %rax
; X64-NEXT: orq %rax, %rsp
; X64-NEXT: jmpq *(%rdi) # TAILCALL
@ -77,9 +74,6 @@ define i32 @test_indirect_tail_call_global() nounwind {
; X64-NEXT: movq %rsp, %rax
; X64-NEXT: movq $-1, %rcx
; X64-NEXT: sarq $63, %rax
; X64-NEXT: movq %rax, %rcx
; X64-NEXT: shlq $47, %rcx
; X64-NEXT: orq %rcx, %rsp
; X64-NEXT: shlq $47, %rax
; X64-NEXT: orq %rax, %rsp
; X64-NEXT: jmpq *{{.*}}(%rip) # TAILCALL