[X86] Regenerated shift combine tests.

Added x86_64 tests

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@281341 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Simon Pilgrim 2016-09-13 14:41:39 +00:00
parent befcbd18bd
commit 7724253460

View File

@ -1,15 +1,24 @@
; RUN: llc -march=x86 < %s | FileCheck %s
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=i686-unknown < %s | FileCheck %s --check-prefix=X32
; RUN: llc -mtriple=x86_64-unknown < %s | FileCheck %s --check-prefix=X64
@array = weak global [4 x i32] zeroinitializer
define i32 @test_lshr_and(i32 %x) {
; CHECK-LABEL: test_lshr_and:
; CHECK-NOT: shrl
; CHECK: andl $12,
; CHECK: movl {{.*}}array{{.*}},
; CHECK: ret
entry:
; X32-LABEL: test_lshr_and:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: andl $12, %eax
; X32-NEXT: movl array(%eax), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_lshr_and:
; X64: # BB#0:
; X64-NEXT: # kill: %EDI<def> %EDI<kill> %RDI<def>
; X64-NEXT: shrl $2, %edi
; X64-NEXT: andl $3, %edi
; X64-NEXT: movl array(,%rdi,4), %eax
; X64-NEXT: retq
%tmp2 = lshr i32 %x, 2
%tmp3 = and i32 %tmp2, 3
%tmp4 = getelementptr [4 x i32], [4 x i32]* @array, i32 0, i32 %tmp3
@ -18,9 +27,21 @@ entry:
}
define i32* @test_exact1(i32 %a, i32 %b, i32* %x) {
; CHECK-LABEL: test_exact1:
; CHECK: sarl %
; X32-LABEL: test_exact1:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: sarl %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_exact1:
; X64: # BB#0:
; X64-NEXT: subl %edi, %esi
; X64-NEXT: sarl $3, %esi
; X64-NEXT: movslq %esi, %rax
; X64-NEXT: leaq (%rdx,%rax,4), %rax
; X64-NEXT: retq
%sub = sub i32 %b, %a
%shr = ashr exact i32 %sub, 3
%gep = getelementptr inbounds i32, i32* %x, i32 %shr
@ -28,9 +49,21 @@ define i32* @test_exact1(i32 %a, i32 %b, i32* %x) {
}
define i32* @test_exact2(i32 %a, i32 %b, i32* %x) {
; CHECK-LABEL: test_exact2:
; CHECK: sarl %
; X32-LABEL: test_exact2:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: sarl %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_exact2:
; X64: # BB#0:
; X64-NEXT: subl %edi, %esi
; X64-NEXT: sarl $3, %esi
; X64-NEXT: movslq %esi, %rax
; X64-NEXT: leaq (%rdx,%rax,4), %rax
; X64-NEXT: retq
%sub = sub i32 %b, %a
%shr = ashr exact i32 %sub, 3
%gep = getelementptr inbounds i32, i32* %x, i32 %shr
@ -38,9 +71,20 @@ define i32* @test_exact2(i32 %a, i32 %b, i32* %x) {
}
define i32* @test_exact3(i32 %a, i32 %b, i32* %x) {
; CHECK-LABEL: test_exact3:
; CHECK-NOT: sarl
; X32-LABEL: test_exact3:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_exact3:
; X64: # BB#0:
; X64-NEXT: subl %edi, %esi
; X64-NEXT: sarl $2, %esi
; X64-NEXT: movslq %esi, %rax
; X64-NEXT: leaq (%rdx,%rax,4), %rax
; X64-NEXT: retq
%sub = sub i32 %b, %a
%shr = ashr exact i32 %sub, 2
%gep = getelementptr inbounds i32, i32* %x, i32 %shr
@ -48,9 +92,21 @@ define i32* @test_exact3(i32 %a, i32 %b, i32* %x) {
}
define i32* @test_exact4(i32 %a, i32 %b, i32* %x) {
; CHECK-LABEL: test_exact4:
; CHECK: shrl %
; X32-LABEL: test_exact4:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shrl %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_exact4:
; X64: # BB#0:
; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; X64-NEXT: subl %edi, %esi
; X64-NEXT: shrl $3, %esi
; X64-NEXT: leaq (%rdx,%rsi,4), %rax
; X64-NEXT: retq
%sub = sub i32 %b, %a
%shr = lshr exact i32 %sub, 3
%gep = getelementptr inbounds i32, i32* %x, i32 %shr
@ -58,9 +114,21 @@ define i32* @test_exact4(i32 %a, i32 %b, i32* %x) {
}
define i32* @test_exact5(i32 %a, i32 %b, i32* %x) {
; CHECK-LABEL: test_exact5:
; CHECK: shrl %
; X32-LABEL: test_exact5:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: shrl %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_exact5:
; X64: # BB#0:
; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; X64-NEXT: subl %edi, %esi
; X64-NEXT: shrl $3, %esi
; X64-NEXT: leaq (%rdx,%rsi,4), %rax
; X64-NEXT: retq
%sub = sub i32 %b, %a
%shr = lshr exact i32 %sub, 3
%gep = getelementptr inbounds i32, i32* %x, i32 %shr
@ -68,9 +136,19 @@ define i32* @test_exact5(i32 %a, i32 %b, i32* %x) {
}
define i32* @test_exact6(i32 %a, i32 %b, i32* %x) {
; CHECK-LABEL: test_exact6:
; CHECK-NOT: shrl
; X32-LABEL: test_exact6:
; X32: # BB#0:
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-NEXT: subl {{[0-9]+}}(%esp), %eax
; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
; X32-NEXT: retl
;
; X64-LABEL: test_exact6:
; X64: # BB#0:
; X64-NEXT: # kill: %ESI<def> %ESI<kill> %RSI<def>
; X64-NEXT: subl %edi, %esi
; X64-NEXT: leaq (%rsi,%rdx), %rax
; X64-NEXT: retq
%sub = sub i32 %b, %a
%shr = lshr exact i32 %sub, 2
%gep = getelementptr inbounds i32, i32* %x, i32 %shr