From 71e142fc8a436fa6e44d74c21d1ea288a4bd8f22 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sun, 16 Jul 2017 11:36:11 +0000 Subject: [PATCH] [X86][AVX] Regenerate combine tests with constant broadcast comments llvm-svn: 308128 --- test/CodeGen/X86/combine-shl.ll | 12 ++++++------ test/CodeGen/X86/combine-srl.ll | 8 ++++---- test/CodeGen/X86/combine-udiv.ll | 2 +- test/CodeGen/X86/combine-urem.ll | 8 ++++---- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/test/CodeGen/X86/combine-shl.ll b/test/CodeGen/X86/combine-shl.ll index 3dbff2680c2..a6491a0a869 100644 --- a/test/CodeGen/X86/combine-shl.ll +++ b/test/CodeGen/X86/combine-shl.ll @@ -392,7 +392,7 @@ define <4 x i32> @combine_vec_shl_gt_lshr0(<4 x i32> %x) { ; ; AVX-LABEL: combine_vec_shl_gt_lshr0: ; AVX: # BB#0: -; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 +; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967264,4294967264,4294967264,4294967264] ; AVX-NEXT: vpslld $2, %xmm0, %xmm0 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq @@ -437,7 +437,7 @@ define <4 x i32> @combine_vec_shl_le_lshr0(<4 x i32> %x) { ; ; AVX-LABEL: combine_vec_shl_le_lshr0: ; AVX: # BB#0: -; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 +; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1073741816,1073741816,1073741816,1073741816] ; AVX-NEXT: vpsrld $2, %xmm0, %xmm0 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq @@ -481,7 +481,7 @@ define <4 x i32> @combine_vec_shl_ashr0(<4 x i32> %x) { ; ; AVX-LABEL: combine_vec_shl_ashr0: ; AVX: # BB#0: -; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 +; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [4294967264,4294967264,4294967264,4294967264] ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %1 = ashr <4 x i32> %x, @@ -515,7 +515,7 @@ define <4 x i32> @combine_vec_shl_add0(<4 x i32> %x) { ; AVX-LABEL: combine_vec_shl_add0: ; AVX: # BB#0: ; AVX-NEXT: vpslld $2, %xmm0, %xmm0 -; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 +; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20] ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %1 = add <4 x i32> %x, @@ -550,7 +550,7 @@ define <4 x i32> @combine_vec_shl_or0(<4 x i32> %x) { ; ; AVX-LABEL: combine_vec_shl_or0: ; AVX: # BB#0: -; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 +; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [5,5,5,5] ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpslld $2, %xmm0, %xmm0 ; AVX-NEXT: retq @@ -585,7 +585,7 @@ define <4 x i32> @combine_vec_shl_mul0(<4 x i32> %x) { ; ; AVX-LABEL: combine_vec_shl_mul0: ; AVX: # BB#0: -; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 +; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20] ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %1 = mul <4 x i32> %x, diff --git a/test/CodeGen/X86/combine-srl.ll b/test/CodeGen/X86/combine-srl.ll index 21564cdd735..473fae19f4f 100644 --- a/test/CodeGen/X86/combine-srl.ll +++ b/test/CodeGen/X86/combine-srl.ll @@ -91,7 +91,7 @@ define <4 x i32> @combine_vec_lshr_known_zero1(<4 x i32> %x) { ; ; AVX-LABEL: combine_vec_lshr_known_zero1: ; AVX: # BB#0: -; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 +; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [15,15,15,15] ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq @@ -326,7 +326,7 @@ define <4 x i32> @combine_vec_lshr_shl_mask0(<4 x i32> %x) { ; ; AVX-LABEL: combine_vec_lshr_shl_mask0: ; AVX: # BB#0: -; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 +; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [1073741823,1073741823,1073741823,1073741823] ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %1 = shl <4 x i32> %x, @@ -376,10 +376,10 @@ define <4 x i32> @combine_vec_lshr_lzcnt_bit0(<4 x i32> %x) { ; ; AVX-LABEL: combine_vec_lshr_lzcnt_bit0: ; AVX: # BB#0: -; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 +; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [16,16,16,16] ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vpsrld $4, %xmm0, %xmm0 -; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1 +; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1,1,1,1] ; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %1 = and <4 x i32> %x, diff --git a/test/CodeGen/X86/combine-udiv.ll b/test/CodeGen/X86/combine-udiv.ll index e1e84992940..b6ae2fa6d15 100644 --- a/test/CodeGen/X86/combine-udiv.ll +++ b/test/CodeGen/X86/combine-udiv.ll @@ -166,7 +166,7 @@ define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) { ; ; AVX2-LABEL: combine_vec_udiv_by_shl_pow2a: ; AVX2: # BB#0: -; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2,2,2,2] ; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 ; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq diff --git a/test/CodeGen/X86/combine-urem.ll b/test/CodeGen/X86/combine-urem.ll index 91da268a8d7..4c7716bbaeb 100644 --- a/test/CodeGen/X86/combine-urem.ll +++ b/test/CodeGen/X86/combine-urem.ll @@ -43,7 +43,7 @@ define <4 x i32> @combine_vec_urem_by_pow2a(<4 x i32> %x) { ; ; AVX2-LABEL: combine_vec_urem_by_pow2a: ; AVX2: # BB#0: -; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %xmm1 +; AVX2-NEXT: vbroadcastss {{.*#+}} xmm1 = [3,3,3,3] ; AVX2-NEXT: vandps %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq %1 = urem <4 x i32> %x, @@ -87,7 +87,7 @@ define <4 x i32> @combine_vec_urem_by_pow2c(<4 x i32> %x, <4 x i32> %y) { ; ; AVX2-LABEL: combine_vec_urem_by_pow2c: ; AVX2: # BB#0: -; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [1,1,1,1] ; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 @@ -146,7 +146,7 @@ define <4 x i32> @combine_vec_urem_by_pow2d(<4 x i32> %x, <4 x i32> %y) { ; ; AVX2-LABEL: combine_vec_urem_by_pow2d: ; AVX2: # BB#0: -; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648] ; AVX2-NEXT: vpsrlvd %xmm1, %xmm2, %xmm1 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1 @@ -183,7 +183,7 @@ define <4 x i32> @combine_vec_urem_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) { ; ; AVX2-LABEL: combine_vec_urem_by_shl_pow2a: ; AVX2: # BB#0: -; AVX2-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2 +; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [4,4,4,4] ; AVX2-NEXT: vpsllvd %xmm1, %xmm2, %xmm1 ; AVX2-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1