[x86] add tests for potential andn optimization; NFC

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@300617 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Sanjay Patel 2017-04-18 22:36:59 +00:00
parent 129271c86a
commit 2fdc237817

View File

@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512
define <4 x double> @andpd256(<4 x double> %y, <4 x double> %x) nounwind uwtable readnone ssp {
; CHECK-LABEL: andpd256:
@ -271,3 +271,41 @@ entry:
ret <2 x i64> %x
}
define <4 x i32> @and_xor_splat1_v4i32(<4 x i32> %x) nounwind {
; AVX-LABEL: and_xor_splat1_v4i32:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [1,1,1,1]
; AVX-NEXT: vxorps %xmm1, %xmm0, %xmm0
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
; AVX512-LABEL: and_xor_splat1_v4i32:
; AVX512: # BB#0:
; AVX512-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
; AVX512-NEXT: vxorps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: vandps %xmm1, %xmm0, %xmm0
; AVX512-NEXT: retq
%xor = xor <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
%and = and <4 x i32> %xor, <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %and
}
define <4 x i64> @and_xor_splat1_v4i64(<4 x i64> %x) nounwind {
; AVX-LABEL: and_xor_splat1_v4i64:
; AVX: # BB#0:
; AVX-NEXT: vmovaps {{.*#+}} ymm1 = [1,1,1,1]
; AVX-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq
;
; AVX512-LABEL: and_xor_splat1_v4i64:
; AVX512: # BB#0:
; AVX512-NEXT: vbroadcastsd {{.*}}(%rip), %ymm1
; AVX512-NEXT: vxorps %ymm1, %ymm0, %ymm0
; AVX512-NEXT: vandps %ymm1, %ymm0, %ymm0
; AVX512-NEXT: retq
%xor = xor <4 x i64> %x, <i64 1, i64 1, i64 1, i64 1>
%and = and <4 x i64> %xor, <i64 1, i64 1, i64 1, i64 1>
ret <4 x i64> %and
}