mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-12-11 21:45:16 +00:00
64ad084e97
We currently generate BUILD_VECTOR as a tree of UNPCKL shuffles of the same type: e.g. for v4f32: Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> Step 2: unpcklps X, Y ==> <3, 2, 1, 0> The issue is because we are not placing sequential vector elements together early enough, we fail to recognise many combinable patterns - consecutive scalar loads, extractions etc. Instead, this patch unpacks progressively larger sequential vector elements together: e.g. for v4f32: Step 1: unpcklps 0, 2 ==> X: <?, ?, 1, 0> : unpcklps 1, 3 ==> Y: <?, ?, 3, 2> Step 2: unpcklpd X, Y ==> <3, 2, 1, 0> This does mean that we are creating UNPCKL shuffle of different value types, but the relevant combines that benefit from this are quite capable of handling the additional BITCASTs that are now included in the shuffle tree. Differential Revision: https://reviews.llvm.org/D33864 llvm-svn: 304688
117 lines
4.6 KiB
LLVM
117 lines
4.6 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
|
|
|
|
define <4 x i32> @foo(<4 x i32> %t, <4 x i32> %u) nounwind {
|
|
; CHECK-LABEL: foo:
|
|
; CHECK: # BB#0:
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
|
|
; CHECK-NEXT: movd %xmm2, %eax
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
|
|
; CHECK-NEXT: movd %xmm2, %ecx
|
|
; CHECK-NEXT: cltd
|
|
; CHECK-NEXT: idivl %ecx
|
|
; CHECK-NEXT: movd %edx, %xmm2
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
|
|
; CHECK-NEXT: movd %xmm3, %eax
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
|
|
; CHECK-NEXT: movd %xmm3, %ecx
|
|
; CHECK-NEXT: cltd
|
|
; CHECK-NEXT: idivl %ecx
|
|
; CHECK-NEXT: movd %edx, %xmm3
|
|
; CHECK-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
|
; CHECK-NEXT: movd %xmm0, %eax
|
|
; CHECK-NEXT: movd %xmm1, %ecx
|
|
; CHECK-NEXT: cltd
|
|
; CHECK-NEXT: idivl %ecx
|
|
; CHECK-NEXT: movd %edx, %xmm2
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
; CHECK-NEXT: movd %xmm0, %eax
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; CHECK-NEXT: movd %xmm0, %ecx
|
|
; CHECK-NEXT: cltd
|
|
; CHECK-NEXT: idivl %ecx
|
|
; CHECK-NEXT: movd %edx, %xmm0
|
|
; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
|
|
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
|
|
; CHECK-NEXT: movdqa %xmm2, %xmm0
|
|
; CHECK-NEXT: retq
|
|
%m = srem <4 x i32> %t, %u
|
|
ret <4 x i32> %m
|
|
}
|
|
|
|
define <4 x i32> @bar(<4 x i32> %t, <4 x i32> %u) nounwind {
|
|
; CHECK-LABEL: bar:
|
|
; CHECK: # BB#0:
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[3,1,2,3]
|
|
; CHECK-NEXT: movd %xmm2, %eax
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,1,2,3]
|
|
; CHECK-NEXT: movd %xmm2, %ecx
|
|
; CHECK-NEXT: xorl %edx, %edx
|
|
; CHECK-NEXT: divl %ecx
|
|
; CHECK-NEXT: movd %edx, %xmm2
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,0,1]
|
|
; CHECK-NEXT: movd %xmm3, %eax
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
|
|
; CHECK-NEXT: movd %xmm3, %ecx
|
|
; CHECK-NEXT: xorl %edx, %edx
|
|
; CHECK-NEXT: divl %ecx
|
|
; CHECK-NEXT: movd %edx, %xmm3
|
|
; CHECK-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
|
|
; CHECK-NEXT: movd %xmm0, %eax
|
|
; CHECK-NEXT: movd %xmm1, %ecx
|
|
; CHECK-NEXT: xorl %edx, %edx
|
|
; CHECK-NEXT: divl %ecx
|
|
; CHECK-NEXT: movd %edx, %xmm2
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
; CHECK-NEXT: movd %xmm0, %eax
|
|
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
|
|
; CHECK-NEXT: movd %xmm0, %ecx
|
|
; CHECK-NEXT: xorl %edx, %edx
|
|
; CHECK-NEXT: divl %ecx
|
|
; CHECK-NEXT: movd %edx, %xmm0
|
|
; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
|
|
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
|
|
; CHECK-NEXT: movdqa %xmm2, %xmm0
|
|
; CHECK-NEXT: retq
|
|
%m = urem <4 x i32> %t, %u
|
|
ret <4 x i32> %m
|
|
}
|
|
|
|
define <4 x float> @qux(<4 x float> %t, <4 x float> %u) nounwind {
|
|
; CHECK-LABEL: qux:
|
|
; CHECK: # BB#0:
|
|
; CHECK-NEXT: subq $72, %rsp
|
|
; CHECK-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
|
|
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
|
|
; CHECK-NEXT: callq fmodf
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
|
|
; CHECK-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
|
|
; CHECK-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
|
|
; CHECK-NEXT: callq fmodf
|
|
; CHECK-NEXT: unpcklps (%rsp), %xmm0 # 16-byte Folded Reload
|
|
; CHECK-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
|
|
; CHECK-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
|
|
; CHECK-NEXT: callq fmodf
|
|
; CHECK-NEXT: movaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill
|
|
; CHECK-NEXT: movaps {{[0-9]+}}(%rsp), %xmm0 # 16-byte Reload
|
|
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
; CHECK-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
|
|
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,1,2,3]
|
|
; CHECK-NEXT: callq fmodf
|
|
; CHECK-NEXT: movaps {{[0-9]+}}(%rsp), %xmm1 # 16-byte Reload
|
|
; CHECK-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
; CHECK-NEXT: unpcklpd (%rsp), %xmm1 # 16-byte Folded Reload
|
|
; CHECK-NEXT: # xmm1 = xmm1[0],mem[0]
|
|
; CHECK-NEXT: movapd %xmm1, %xmm0
|
|
; CHECK-NEXT: addq $72, %rsp
|
|
; CHECK-NEXT: retq
|
|
%m = frem <4 x float> %t, %u
|
|
ret <4 x float> %m
|
|
}
|