From eaf92ee067e24da9bc9e2fea6e932704511dee11 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 10 Aug 2018 18:58:56 +0000 Subject: [PATCH] AMDGPU: Combine and of seto/setuo and fp_class Clear the nan (or non-nan) test bits from the mask. llvm-svn: 339462 --- lib/Target/AMDGPU/SIISelLowering.cpp | 23 +++++++++++ test/CodeGen/AMDGPU/fp-classify.ll | 50 +++++++++++++++++++++--- test/CodeGen/AMDGPU/llvm.amdgcn.class.ll | 37 ++++++++++++++++++ 3 files changed, 104 insertions(+), 6 deletions(-) diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp index cb74cfb5241..ca846f998fd 100644 --- a/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/lib/Target/AMDGPU/SIISelLowering.cpp @@ -6429,6 +6429,29 @@ SDValue SITargetLowering::performAndCombine(SDNode *N, } } + if (RHS.getOpcode() == ISD::SETCC && LHS.getOpcode() == AMDGPUISD::FP_CLASS) + std::swap(LHS, RHS); + + if (LHS.getOpcode() == ISD::SETCC && RHS.getOpcode() == AMDGPUISD::FP_CLASS && + RHS.hasOneUse()) { + ISD::CondCode LCC = cast(LHS.getOperand(2))->get(); + // and (fcmp seto), (fp_class x, mask) -> fp_class x, mask & ~(p_nan | n_nan) + // and (fcmp setuo), (fp_class x, mask) -> fp_class x, mask & (p_nan | n_nan) + const ConstantSDNode *Mask = dyn_cast(RHS.getOperand(1)); + if ((LCC == ISD::SETO || LCC == ISD::SETUO) && Mask && + (RHS.getOperand(0) == LHS.getOperand(0) && + LHS.getOperand(0) == LHS.getOperand(1))) { + const unsigned OrdMask = SIInstrFlags::S_NAN | SIInstrFlags::Q_NAN; + unsigned NewMask = LCC == ISD::SETO ? + Mask->getZExtValue() & ~OrdMask : + Mask->getZExtValue() & OrdMask; + + SDLoc DL(N); + return DAG.getNode(AMDGPUISD::FP_CLASS, DL, MVT::i1, RHS.getOperand(0), + DAG.getConstant(NewMask, DL, MVT::i32)); + } + } + if (VT == MVT::i32 && (RHS.getOpcode() == ISD::SIGN_EXTEND || LHS.getOpcode() == ISD::SIGN_EXTEND)) { // and x, (sext cc from i1) => select cc, x, 0 diff --git a/test/CodeGen/AMDGPU/fp-classify.ll b/test/CodeGen/AMDGPU/fp-classify.ll index 1476b7fa520..d4416543187 100644 --- a/test/CodeGen/AMDGPU/fp-classify.ll +++ b/test/CodeGen/AMDGPU/fp-classify.ll @@ -1,5 +1,5 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,SI %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI %s declare float @llvm.fabs.f32(float) #1 declare double @llvm.fabs.f64(double) #1 @@ -124,14 +124,11 @@ define amdgpu_kernel void @test_isfinite_not_pattern_3(i32 addrspace(1)* nocaptu ret void } -; Wrong unordered compare ; GCN-LABEL: {{^}}test_isfinite_pattern_4: ; GCN-DAG: s_load_dword [[X:s[0-9]+]] ; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x1f8 -; GCN-DAG: v_cmp_o_f32_e64 [[ORD:s\[[0-9]+:[0-9]+\]]], [[X]], [[X]] ; GCN-DAG: v_cmp_class_f32_e32 vcc, [[X]], [[K]] -; GCN: s_and_b64 [[AND:s\[[0-9]+:[0-9]+\]]], [[ORD]], vcc -; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[AND]] +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc define amdgpu_kernel void @test_isfinite_pattern_4(i32 addrspace(1)* nocapture %out, float %x) #0 { %ord = fcmp ord float %x, 0.000000e+00 %x.fabs = tail call float @llvm.fabs.f32(float %x) #1 @@ -142,5 +139,46 @@ define amdgpu_kernel void @test_isfinite_pattern_4(i32 addrspace(1)* nocapture % ret void } +; GCN-LABEL: {{^}}test_isfinite_pattern_4_commute_and: +; GCN-DAG: s_load_dword [[X:s[0-9]+]] +; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x1f8 +; GCN-DAG: v_cmp_class_f32_e32 vcc, [[X]], [[K]] +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, vcc +define amdgpu_kernel void @test_isfinite_pattern_4_commute_and(i32 addrspace(1)* nocapture %out, float %x) #0 { + %ord = fcmp ord float %x, 0.000000e+00 + %x.fabs = tail call float @llvm.fabs.f32(float %x) #1 + %ninf = fcmp one float %x.fabs, 0x7FF0000000000000 + %and = and i1 %ninf, %ord + %ext = zext i1 %and to i32 + store i32 %ext, i32 addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}test_not_isfinite_pattern_4_wrong_ord_test: +; GCN-DAG: s_load_dword [[X:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}} +; GCN-DAG: s_load_dword [[Y:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0x14|0x50}} + +; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x1f8 +; GCN-DAG: v_mov_b32_e32 [[VY:v[0-9]+]], [[Y]] + +; SI-DAG: v_cmp_o_f32_e32 vcc, [[X]], [[VY]] +; SI-DAG: v_cmp_class_f32_e64 [[CLASS:s\[[0-9]+:[0-9]+\]]], [[X]], [[K]] +; SI: s_and_b64 [[AND:s\[[0-9]+:[0-9]+\]]], vcc, [[CLASS]] + +; VI-DAG: v_cmp_o_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[X]], [[VY]] +; VI-DAG: v_cmp_class_f32_e32 vcc, [[X]], [[K]] +; VI: s_and_b64 [[AND:s\[[0-9]+:[0-9]+\]]], [[CMP]], vcc + +; GCN: v_cndmask_b32_e64 v{{[0-9]+}}, 0, 1, [[AND]] +define amdgpu_kernel void @test_not_isfinite_pattern_4_wrong_ord_test(i32 addrspace(1)* nocapture %out, float %x, [8 x i32], float %y) #0 { + %ord = fcmp ord float %x, %y + %x.fabs = tail call float @llvm.fabs.f32(float %x) #1 + %ninf = fcmp one float %x.fabs, 0x7FF0000000000000 + %and = and i1 %ord, %ninf + %ext = zext i1 %and to i32 + store i32 %ext, i32 addrspace(1)* %out, align 4 + ret void +} + attributes #0 = { nounwind } attributes #1 = { nounwind readnone } diff --git a/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll b/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll index ed4e6d39656..5b4d57bd8a2 100644 --- a/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll +++ b/test/CodeGen/AMDGPU/llvm.amdgcn.class.ll @@ -507,5 +507,42 @@ define amdgpu_kernel void @test_class_undef_f32(i32 addrspace(1)* %out, float %a ret void } +; SI-LABEL: {{^}}test_fold_and_ord: +; SI: s_waitcnt +; SI-NEXT: v_cmp_class_f32_e64 s[6:7], v0, 32{{$}} +; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[6:7] +; SI-NEXT: s_setpc_b64 +define i1 @test_fold_and_ord(float %a) { + %class = call i1 @llvm.amdgcn.class.f32(float %a, i32 35) #1 + %ord = fcmp ord float %a, %a + %and = and i1 %ord, %class + ret i1 %and +} + +; SI-LABEL: {{^}}test_fold_and_unord: +; SI: s_waitcnt +; SI-NEXT: v_cmp_class_f32_e64 s[6:7], v0, 3{{$}} +; SI-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[6:7] +; SI-NEXT: s_setpc_b64 +define i1 @test_fold_and_unord(float %a) { + %class = call i1 @llvm.amdgcn.class.f32(float %a, i32 35) #1 + %ord = fcmp uno float %a, %a + %and = and i1 %ord, %class + ret i1 %and +} + +; SI-LABEL: {{^}}test_fold_and_ord_multi_use: +; SI: v_cmp_class +; SI-NOT: v_cmp_class +; SI: v_cmp_o +; SI: s_and_b64 +define i1 @test_fold_and_ord_multi_use(float %a) { + %class = call i1 @llvm.amdgcn.class.f32(float %a, i32 35) #1 + store volatile i1 %class, i1 addrspace(1)* undef + %ord = fcmp ord float %a, %a + %and = and i1 %ord, %class + ret i1 %and +} + attributes #0 = { nounwind } attributes #1 = { nounwind readnone }