[X86][SSE] Let 64-bit targets use the fast 2i32-2f32 UINT_TO_FP conversion as well as 32-bit

The 2i32-2i64 legalization means that we can use the slightly quicker double bits + fptrunc approach for the same results

llvm-svn: 277271
This commit is contained in:
Simon Pilgrim 2016-07-30 14:06:59 +00:00
parent 01a2cb55f1
commit cf49fa3251
2 changed files with 11 additions and 13 deletions

View File

@ -834,10 +834,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
// As there is no 64-bit GPR available, we need build a special custom
// sequence to convert from v2i32 to v2f32.
if (!Subtarget.is64Bit())
setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
// Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);

View File

@ -2,8 +2,8 @@
; RUN: llc < %s -mtriple=i686-linux-pc -mcpu=corei7 | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-linux-pc -mcpu=corei7 | FileCheck %s --check-prefix=X64
; FIXME: As discussed on PR14760, we currently have a difference in uitofp <2 x i32> codegen between
; buildvector and legalization on 32-bit targets:
; uitofp <2 x i32> codegen from buildvector or legalization is different but gives the same results
; across the full 0 - 0xFFFFFFFF u32 range.
define <2 x float> @uitofp_2i32_buildvector(i32 %x, i32 %y, <2 x float> %v) {
; X32-LABEL: uitofp_2i32_buildvector:
@ -52,13 +52,12 @@ define <2 x float> @uitofp_2i32_legalized(<2 x i32> %in, <2 x float> %v) {
;
; X64-LABEL: uitofp_2i32_legalized:
; X64: # BB#0:
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; X64-NEXT: movdqa {{.*#+}} xmm2 = [1258291200,1258291200,1258291200,1258291200]
; X64-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7]
; X64-NEXT: psrld $16, %xmm0
; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
; X64-NEXT: addps {{.*}}(%rip), %xmm0
; X64-NEXT: addps %xmm2, %xmm0
; X64-NEXT: pxor %xmm2, %xmm2
; X64-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
; X64-NEXT: movdqa {{.*#+}} xmm0 = [4.503600e+15,4.503600e+15]
; X64-NEXT: por %xmm0, %xmm2
; X64-NEXT: subpd %xmm0, %xmm2
; X64-NEXT: cvtpd2ps %xmm2, %xmm0
; X64-NEXT: mulps %xmm1, %xmm0
; X64-NEXT: retq
%t1 = uitofp <2 x i32> %in to <2 x float>