[X86][AVX] Regenerate tests with constant broadcast comments

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@308131 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Simon Pilgrim 2017-07-16 11:43:16 +00:00
parent a4a73ad242
commit 0ad8dddb9f
4 changed files with 15 additions and 15 deletions

View File

@ -19,7 +19,7 @@ define i32 @sext_inc(i1 zeroext %x) nounwind {
define <4 x i32> @sext_inc_vec(<4 x i1> %x) nounwind { define <4 x i32> @sext_inc_vec(<4 x i1> %x) nounwind {
; CHECK-LABEL: sext_inc_vec: ; CHECK-LABEL: sext_inc_vec:
; CHECK: # BB#0: ; CHECK: # BB#0:
; CHECK-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 ; CHECK-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
; CHECK-NEXT: vandnps %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: vandnps %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq ; CHECK-NEXT: retq
%ext = sext <4 x i1> %x to <4 x i32> %ext = sext <4 x i1> %x to <4 x i32>
@ -31,7 +31,7 @@ define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) nounwind {
; CHECK-LABEL: cmpgt_sext_inc_vec: ; CHECK-LABEL: cmpgt_sext_inc_vec:
; CHECK: # BB#0: ; CHECK: # BB#0:
; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 ; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq ; CHECK-NEXT: retq
%cmp = icmp sgt <4 x i32> %x, %y %cmp = icmp sgt <4 x i32> %x, %y
@ -56,7 +56,7 @@ define <4 x i64> @cmpgt_sext_inc_vec256(<4 x i64> %x, <4 x i64> %y) nounwind {
; CHECK-LABEL: cmpgt_sext_inc_vec256: ; CHECK-LABEL: cmpgt_sext_inc_vec256:
; CHECK: # BB#0: ; CHECK: # BB#0:
; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0 ; CHECK-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1 ; CHECK-NEXT: vpbroadcastq {{.*#+}} ymm1 = [1,1,1,1]
; CHECK-NEXT: vpandn %ymm1, %ymm0, %ymm0 ; CHECK-NEXT: vpandn %ymm1, %ymm0, %ymm0
; CHECK-NEXT: retq ; CHECK-NEXT: retq
%cmp = icmp sgt <4 x i64> %x, %y %cmp = icmp sgt <4 x i64> %x, %y
@ -91,7 +91,7 @@ define <4 x i32> @bool_logic_and_math_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32>
; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 ; CHECK-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpxor %xmm2, %xmm1, %xmm1 ; CHECK-NEXT: vpxor %xmm2, %xmm1, %xmm1
; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 ; CHECK-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1]
; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0 ; CHECK-NEXT: vpandn %xmm1, %xmm0, %xmm0
; CHECK-NEXT: retq ; CHECK-NEXT: retq
%cmp1 = icmp ne <4 x i32> %a, %b %cmp1 = icmp ne <4 x i32> %a, %b

View File

@ -105,7 +105,7 @@ define <4 x i32> @mul_v4i32c(<4 x i32> %i) nounwind {
; ;
; AVX-LABEL: mul_v4i32c: ; AVX-LABEL: mul_v4i32c:
; AVX: # BB#0: # %entry ; AVX: # BB#0: # %entry
; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [117,117,117,117]
; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq ; AVX-NEXT: retq
entry: entry:
@ -523,7 +523,7 @@ define <8 x i32> @mul_v8i32c(<8 x i32> %i) nounwind {
; ;
; AVX-LABEL: mul_v8i32c: ; AVX-LABEL: mul_v8i32c:
; AVX: # BB#0: # %entry ; AVX: # BB#0: # %entry
; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %ymm1 ; AVX-NEXT: vpbroadcastd {{.*#+}} ymm1 = [117,117,117,117,117,117,117,117]
; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0 ; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX-NEXT: retq ; AVX-NEXT: retq
entry: entry:
@ -551,7 +551,7 @@ define <4 x i64> @mul_v4i64c(<4 x i64> %i) nounwind {
; ;
; AVX-LABEL: mul_v4i64c: ; AVX-LABEL: mul_v4i64c:
; AVX: # BB#0: # %entry ; AVX: # BB#0: # %entry
; AVX-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1 ; AVX-NEXT: vpbroadcastq {{.*#+}} ymm1 = [117,117,117,117]
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm2 ; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm2
; AVX-NEXT: vpsrlq $32, %ymm0, %ymm0 ; AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0 ; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0

View File

@ -463,7 +463,7 @@ define <4 x i32> @gt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; ;
; AVX2-LABEL: gt_v4i32: ; AVX2-LABEL: gt_v4i32:
; AVX2: # BB#0: ; AVX2: # BB#0:
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
@ -476,7 +476,7 @@ define <4 x i32> @gt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; ;
; AVX512-LABEL: gt_v4i32: ; AVX512-LABEL: gt_v4i32:
; AVX512: # BB#0: ; AVX512: # BB#0:
; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 ; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX512-NEXT: vpxor %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
@ -782,7 +782,7 @@ define <4 x i32> @lt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; ;
; AVX2-LABEL: lt_v4i32: ; AVX2-LABEL: lt_v4i32:
; AVX2: # BB#0: ; AVX2: # BB#0:
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0 ; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 ; AVX2-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0
@ -795,7 +795,7 @@ define <4 x i32> @lt_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; ;
; AVX512-LABEL: lt_v4i32: ; AVX512-LABEL: lt_v4i32:
; AVX512: # BB#0: ; AVX512: # BB#0:
; AVX512-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 ; AVX512-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vpxor %xmm2, %xmm0, %xmm0
; AVX512-NEXT: vpxor %xmm2, %xmm1, %xmm1 ; AVX512-NEXT: vpxor %xmm2, %xmm1, %xmm1
; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 ; AVX512-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0

View File

@ -58,8 +58,8 @@ define void @test2(double** %call1559, i64 %indvars.iv4198, <4 x i1> %tmp1895) {
; AVX2-NEXT: vpslld $31, %xmm0, %xmm0 ; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0 ; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
; AVX2-NEXT: movq (%rdi,%rsi,8), %rax ; AVX2-NEXT: movq (%rdi,%rsi,8), %rax
; AVX2-NEXT: vbroadcastsd {{.*}}(%rip), %ymm1 ; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [-0.5,-0.5,-0.5,-0.5]
; AVX2-NEXT: vbroadcastsd {{.*}}(%rip), %ymm2 ; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm2 = [0.5,0.5,0.5,0.5]
; AVX2-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0 ; AVX2-NEXT: vblendvpd %ymm0, %ymm1, %ymm2, %ymm0
; AVX2-NEXT: vmovupd %ymm0, (%rax) ; AVX2-NEXT: vmovupd %ymm0, (%rax)
; AVX2-NEXT: vzeroupper ; AVX2-NEXT: vzeroupper
@ -108,7 +108,7 @@ define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17,
; ;
; AVX2-LABEL: test3: ; AVX2-LABEL: test3:
; AVX2: ## BB#0: ; AVX2: ## BB#0:
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm3 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm3 = [1431655766,1431655766,1431655766,1431655766]
; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3] ; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3] ; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; AVX2-NEXT: vpmuldq %xmm4, %xmm5, %xmm4 ; AVX2-NEXT: vpmuldq %xmm4, %xmm5, %xmm4
@ -117,7 +117,7 @@ define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17,
; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3] ; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3]
; AVX2-NEXT: vpsrld $31, %xmm3, %xmm4 ; AVX2-NEXT: vpsrld $31, %xmm3, %xmm4
; AVX2-NEXT: vpaddd %xmm4, %xmm3, %xmm3 ; AVX2-NEXT: vpaddd %xmm4, %xmm3, %xmm3
; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm4 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [3,3,3,3]
; AVX2-NEXT: vpmulld %xmm4, %xmm3, %xmm3 ; AVX2-NEXT: vpmulld %xmm4, %xmm3, %xmm3
; AVX2-NEXT: vpsubd %xmm3, %xmm0, %xmm0 ; AVX2-NEXT: vpsubd %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3