mirror of
https://github.com/RPCSX/llvm.git
synced 2024-11-26 21:20:37 +00:00
c7b902e7fe
The current Intel Atom microarchitecture has a feature whereby when a function returns early then it is slightly faster to execute a sequence of NOP instructions to wait until the return address is ready, as opposed to simply stalling on the ret instruction until the return address is ready. When compiling for X86 Atom only, this patch will run a pass, called "X86PadShortFunction" which will add NOP instructions where less than four cycles elapse between function entry and return. It includes tests. This patch has been updated to address Nadav's review comments - Optimize only at >= O1 and don't do optimization if -Os is set - Stores MachineBasicBlock* instead of BBNum - Uses DenseMap instead of std::map - Fixes placement of braces Patch by Andy Zhang. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@171879 91177308-0d34-0410-b5e6-96231b3b80d8
40 lines
806 B
LLVM
40 lines
806 B
LLVM
; RUN: llc < %s -mtriple=x86_64-apple-darwin11 -mcpu=core2 -mattr=+mmx,+sse2 | FileCheck %s
|
|
; rdar://6602459
|
|
|
|
@g_v1di = external global <1 x i64>
|
|
|
|
define void @t1() nounwind {
|
|
entry:
|
|
%call = call <1 x i64> @return_v1di() ; <<1 x i64>> [#uses=0]
|
|
store <1 x i64> %call, <1 x i64>* @g_v1di
|
|
ret void
|
|
; CHECK: t1:
|
|
; CHECK: callq
|
|
; CHECK-NEXT: movq _g_v1di
|
|
; CHECK-NEXT: movq %rax,
|
|
}
|
|
|
|
declare <1 x i64> @return_v1di()
|
|
|
|
define <1 x i64> @t2() nounwind {
|
|
ret <1 x i64> <i64 1>
|
|
; CHECK: t2:
|
|
; CHECK: movl $1
|
|
; CHECK-NEXT: ret
|
|
}
|
|
|
|
define <2 x i32> @t3() nounwind {
|
|
ret <2 x i32> <i32 1, i32 0>
|
|
; CHECK: t3:
|
|
; CHECK: movl $1
|
|
; CHECK: movd {{.*}}, %xmm0
|
|
}
|
|
|
|
define double @t4() nounwind {
|
|
ret double bitcast (<2 x i32> <i32 1, i32 0> to double)
|
|
; CHECK: t4:
|
|
; CHECK: movl $1
|
|
; CHECK: movd {{.*}}, %xmm0
|
|
}
|
|
|