From 53b6da24288717fd816a67714dac2d40a6451a1e Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Sat, 11 Feb 2017 12:29:56 +0000 Subject: [PATCH] [X86][SSE] Regenerate float comparison commutation tests. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@294840 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/commute-fcmp.ll | 38 +++----------------------------- 1 file changed, 3 insertions(+), 35 deletions(-) diff --git a/test/CodeGen/X86/commute-fcmp.ll b/test/CodeGen/X86/commute-fcmp.ll index 4274d1feaa3..f05fb805b41 100644 --- a/test/CodeGen/X86/commute-fcmp.ll +++ b/test/CodeGen/X86/commute-fcmp.ll @@ -1,6 +1,6 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE -; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE +; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX ; ; Float Comparisons @@ -17,7 +17,6 @@ define <4 x i32> @commute_cmpps_eq(<4 x float>* %a0, <4 x float> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq -; %1 = load <4 x float>, <4 x float>* %a0 %2 = fcmp oeq <4 x float> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i32> @@ -34,7 +33,6 @@ define <4 x i32> @commute_cmpps_ne(<4 x float>* %a0, <4 x float> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpneqps (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq -; %1 = load <4 x float>, <4 x float>* %a0 %2 = fcmp une <4 x float> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i32> @@ -51,7 +49,6 @@ define <4 x i32> @commute_cmpps_ord(<4 x float>* %a0, <4 x float> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpordps (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq -; %1 = load <4 x float>, <4 x float>* %a0 %2 = fcmp ord <4 x float> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i32> @@ -68,7 +65,6 @@ define <4 x i32> @commute_cmpps_uno(<4 x float>* %a0, <4 x float> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpunordps (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq -; %1 = load <4 x float>, <4 x float>* %a0 %2 = fcmp uno <4 x float> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i32> @@ -92,7 +88,6 @@ define <4 x i32> @commute_cmpps_ueq(<4 x float>* %a0, <4 x float> %a1) { ; AVX-NEXT: vcmpunordps %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vorps %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq -; %1 = load <4 x float>, <4 x float>* %a0 %2 = fcmp ueq <4 x float> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i32> @@ -116,7 +111,6 @@ define <4 x i32> @commute_cmpps_one(<4 x float>* %a0, <4 x float> %a1) { ; AVX-NEXT: vcmpordps %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vandps %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq -; %1 = load <4 x float>, <4 x float>* %a0 %2 = fcmp one <4 x float> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i32> @@ -136,7 +130,6 @@ define <4 x i32> @commute_cmpps_lt(<4 x float>* %a0, <4 x float> %a1) { ; AVX-NEXT: vmovaps (%rdi), %xmm1 ; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq -; %1 = load <4 x float>, <4 x float>* %a0 %2 = fcmp olt <4 x float> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i32> @@ -156,7 +149,6 @@ define <4 x i32> @commute_cmpps_le(<4 x float>* %a0, <4 x float> %a1) { ; AVX-NEXT: vmovaps (%rdi), %xmm1 ; AVX-NEXT: vcmpleps %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq -; %1 = load <4 x float>, <4 x float>* %a0 %2 = fcmp ole <4 x float> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i32> @@ -174,7 +166,6 @@ define <8 x i32> @commute_cmpps_eq_ymm(<8 x float>* %a0, <8 x float> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 ; AVX-NEXT: retq -; %1 = load <8 x float>, <8 x float>* %a0 %2 = fcmp oeq <8 x float> %1, %a1 %3 = sext <8 x i1> %2 to <8 x i32> @@ -192,7 +183,6 @@ define <8 x i32> @commute_cmpps_ne_ymm(<8 x float>* %a0, <8 x float> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpneqps (%rdi), %ymm0, %ymm0 ; AVX-NEXT: retq -; %1 = load <8 x float>, <8 x float>* %a0 %2 = fcmp une <8 x float> %1, %a1 %3 = sext <8 x i1> %2 to <8 x i32> @@ -210,7 +200,6 @@ define <8 x i32> @commute_cmpps_ord_ymm(<8 x float>* %a0, <8 x float> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpordps (%rdi), %ymm0, %ymm0 ; AVX-NEXT: retq -; %1 = load <8 x float>, <8 x float>* %a0 %2 = fcmp ord <8 x float> %1, %a1 %3 = sext <8 x i1> %2 to <8 x i32> @@ -228,7 +217,6 @@ define <8 x i32> @commute_cmpps_uno_ymm(<8 x float>* %a0, <8 x float> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpunordps (%rdi), %ymm0, %ymm0 ; AVX-NEXT: retq -; %1 = load <8 x float>, <8 x float>* %a0 %2 = fcmp uno <8 x float> %1, %a1 %3 = sext <8 x i1> %2 to <8 x i32> @@ -257,7 +245,6 @@ define <8 x i32> @commute_cmpps_ueq_ymm(<8 x float>* %a0, <8 x float> %a1) { ; AVX-NEXT: vcmpunordps %ymm0, %ymm1, %ymm0 ; AVX-NEXT: vorps %ymm2, %ymm0, %ymm0 ; AVX-NEXT: retq -; %1 = load <8 x float>, <8 x float>* %a0 %2 = fcmp ueq <8 x float> %1, %a1 %3 = sext <8 x i1> %2 to <8 x i32> @@ -286,7 +273,6 @@ define <8 x i32> @commute_cmpps_one_ymm(<8 x float>* %a0, <8 x float> %a1) { ; AVX-NEXT: vcmpordps %ymm0, %ymm1, %ymm0 ; AVX-NEXT: vandps %ymm2, %ymm0, %ymm0 ; AVX-NEXT: retq -; %1 = load <8 x float>, <8 x float>* %a0 %2 = fcmp one <8 x float> %1, %a1 %3 = sext <8 x i1> %2 to <8 x i32> @@ -309,7 +295,6 @@ define <8 x i32> @commute_cmpps_lt_ymm(<8 x float>* %a0, <8 x float> %a1) { ; AVX-NEXT: vmovaps (%rdi), %ymm1 ; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0 ; AVX-NEXT: retq -; %1 = load <8 x float>, <8 x float>* %a0 %2 = fcmp olt <8 x float> %1, %a1 %3 = sext <8 x i1> %2 to <8 x i32> @@ -332,7 +317,6 @@ define <8 x i32> @commute_cmpps_le_ymm(<8 x float>* %a0, <8 x float> %a1) { ; AVX-NEXT: vmovaps (%rdi), %ymm1 ; AVX-NEXT: vcmpleps %ymm0, %ymm1, %ymm0 ; AVX-NEXT: retq -; %1 = load <8 x float>, <8 x float>* %a0 %2 = fcmp ole <8 x float> %1, %a1 %3 = sext <8 x i1> %2 to <8 x i32> @@ -354,7 +338,6 @@ define <2 x i64> @commute_cmppd_eq(<2 x double>* %a0, <2 x double> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq -; %1 = load <2 x double>, <2 x double>* %a0 %2 = fcmp oeq <2 x double> %1, %a1 %3 = sext <2 x i1> %2 to <2 x i64> @@ -371,7 +354,6 @@ define <2 x i64> @commute_cmppd_ne(<2 x double>* %a0, <2 x double> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpneqpd (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq -; %1 = load <2 x double>, <2 x double>* %a0 %2 = fcmp une <2 x double> %1, %a1 %3 = sext <2 x i1> %2 to <2 x i64> @@ -388,7 +370,6 @@ define <2 x i64> @commute_cmppd_ord(<2 x double>* %a0, <2 x double> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpordpd (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq -; %1 = load <2 x double>, <2 x double>* %a0 %2 = fcmp ord <2 x double> %1, %a1 %3 = sext <2 x i1> %2 to <2 x i64> @@ -412,7 +393,6 @@ define <2 x i64> @commute_cmppd_ueq(<2 x double>* %a0, <2 x double> %a1) { ; AVX-NEXT: vcmpunordpd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vorpd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq -; %1 = load <2 x double>, <2 x double>* %a0 %2 = fcmp ueq <2 x double> %1, %a1 %3 = sext <2 x i1> %2 to <2 x i64> @@ -436,7 +416,6 @@ define <2 x i64> @commute_cmppd_one(<2 x double>* %a0, <2 x double> %a1) { ; AVX-NEXT: vcmpordpd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vandpd %xmm2, %xmm0, %xmm0 ; AVX-NEXT: retq -; %1 = load <2 x double>, <2 x double>* %a0 %2 = fcmp one <2 x double> %1, %a1 %3 = sext <2 x i1> %2 to <2 x i64> @@ -453,7 +432,6 @@ define <2 x i64> @commute_cmppd_uno(<2 x double>* %a0, <2 x double> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpunordpd (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq -; %1 = load <2 x double>, <2 x double>* %a0 %2 = fcmp uno <2 x double> %1, %a1 %3 = sext <2 x i1> %2 to <2 x i64> @@ -473,7 +451,6 @@ define <2 x i64> @commute_cmppd_lt(<2 x double>* %a0, <2 x double> %a1) { ; AVX-NEXT: vmovapd (%rdi), %xmm1 ; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq -; %1 = load <2 x double>, <2 x double>* %a0 %2 = fcmp olt <2 x double> %1, %a1 %3 = sext <2 x i1> %2 to <2 x i64> @@ -493,7 +470,6 @@ define <2 x i64> @commute_cmppd_le(<2 x double>* %a0, <2 x double> %a1) { ; AVX-NEXT: vmovapd (%rdi), %xmm1 ; AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq -; %1 = load <2 x double>, <2 x double>* %a0 %2 = fcmp ole <2 x double> %1, %a1 %3 = sext <2 x i1> %2 to <2 x i64> @@ -511,7 +487,6 @@ define <4 x i64> @commute_cmppd_eq_ymmm(<4 x double>* %a0, <4 x double> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 ; AVX-NEXT: retq -; %1 = load <4 x double>, <4 x double>* %a0 %2 = fcmp oeq <4 x double> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i64> @@ -529,7 +504,6 @@ define <4 x i64> @commute_cmppd_ne_ymmm(<4 x double>* %a0, <4 x double> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpneqpd (%rdi), %ymm0, %ymm0 ; AVX-NEXT: retq -; %1 = load <4 x double>, <4 x double>* %a0 %2 = fcmp une <4 x double> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i64> @@ -547,7 +521,6 @@ define <4 x i64> @commute_cmppd_ord_ymmm(<4 x double>* %a0, <4 x double> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpordpd (%rdi), %ymm0, %ymm0 ; AVX-NEXT: retq -; %1 = load <4 x double>, <4 x double>* %a0 %2 = fcmp ord <4 x double> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i64> @@ -565,7 +538,6 @@ define <4 x i64> @commute_cmppd_uno_ymmm(<4 x double>* %a0, <4 x double> %a1) { ; AVX: # BB#0: ; AVX-NEXT: vcmpunordpd (%rdi), %ymm0, %ymm0 ; AVX-NEXT: retq -; %1 = load <4 x double>, <4 x double>* %a0 %2 = fcmp uno <4 x double> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i64> @@ -594,7 +566,6 @@ define <4 x i64> @commute_cmppd_ueq_ymmm(<4 x double>* %a0, <4 x double> %a1) { ; AVX-NEXT: vcmpunordpd %ymm0, %ymm1, %ymm0 ; AVX-NEXT: vorpd %ymm2, %ymm0, %ymm0 ; AVX-NEXT: retq -; %1 = load <4 x double>, <4 x double>* %a0 %2 = fcmp ueq <4 x double> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i64> @@ -623,7 +594,6 @@ define <4 x i64> @commute_cmppd_one_ymmm(<4 x double>* %a0, <4 x double> %a1) { ; AVX-NEXT: vcmpordpd %ymm0, %ymm1, %ymm0 ; AVX-NEXT: vandpd %ymm2, %ymm0, %ymm0 ; AVX-NEXT: retq -; %1 = load <4 x double>, <4 x double>* %a0 %2 = fcmp one <4 x double> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i64> @@ -646,7 +616,6 @@ define <4 x i64> @commute_cmppd_lt_ymmm(<4 x double>* %a0, <4 x double> %a1) { ; AVX-NEXT: vmovapd (%rdi), %ymm1 ; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0 ; AVX-NEXT: retq -; %1 = load <4 x double>, <4 x double>* %a0 %2 = fcmp olt <4 x double> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i64> @@ -669,7 +638,6 @@ define <4 x i64> @commute_cmppd_le_ymmm(<4 x double>* %a0, <4 x double> %a1) { ; AVX-NEXT: vmovapd (%rdi), %ymm1 ; AVX-NEXT: vcmplepd %ymm0, %ymm1, %ymm0 ; AVX-NEXT: retq -; %1 = load <4 x double>, <4 x double>* %a0 %2 = fcmp ole <4 x double> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i64>