From 7fc6882374f805ed780f07001e317c8b0dc7711a Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 14 Jan 2019 14:16:24 +0000 Subject: [PATCH] [DAGCombiner] Add add/sub saturation undef handling Match ConstantFolding.cpp: (add_sat x, undef) -> -1 (sub_sat x, undef) -> 0 llvm-svn: 351070 --- llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 8 ++++++++ llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp | 6 ++++++ llvm/test/CodeGen/X86/combine-add-ssat.ll | 18 +++++------------- llvm/test/CodeGen/X86/combine-add-usat.ll | 12 ++++-------- llvm/test/CodeGen/X86/combine-sub-ssat.ll | 10 ++-------- llvm/test/CodeGen/X86/combine-sub-usat.ll | 6 ++---- 6 files changed, 27 insertions(+), 33 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 9a40774cff8e..25f2fc66b73c 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -2194,6 +2194,10 @@ SDValue DAGCombiner::visitADDSAT(SDNode *N) { return N1; } + // fold (add_sat x, undef) -> -1 + if (N0.isUndef() || N1.isUndef()) + return DAG.getAllOnesConstant(DL, VT); + if (DAG.isConstantIntBuildVectorOrConstantInt(N0)) { // canonicalize constant to RHS if (!DAG.isConstantIntBuildVectorOrConstantInt(N1)) @@ -2792,6 +2796,10 @@ SDValue DAGCombiner::visitSUBSAT(SDNode *N) { return N0; } + // fold (sub_sat x, undef) -> 0 + if (N0.isUndef() || N1.isUndef()) + return DAG.getConstant(0, DL, VT); + // TODO Constant Folding // fold (sub_sat x, 0) -> x diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp index a0ee80c0dcde..dc5b5e3123fd 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -5180,6 +5180,8 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, case ISD::SDIV: case ISD::UREM: case ISD::SREM: + case ISD::SSUBSAT: + case ISD::USUBSAT: return getConstant(0, DL, VT); // fold op(undef, arg2) -> 0 } } @@ -5203,8 +5205,12 @@ SDValue SelectionDAG::getNode(unsigned Opcode, const SDLoc &DL, EVT VT, return getUNDEF(VT); // fold op(arg1, undef) -> undef case ISD::MUL: case ISD::AND: + case ISD::SSUBSAT: + case ISD::USUBSAT: return getConstant(0, DL, VT); // fold op(arg1, undef) -> 0 case ISD::OR: + case ISD::SADDSAT: + case ISD::UADDSAT: return getAllOnesConstant(DL, VT); } } diff --git a/llvm/test/CodeGen/X86/combine-add-ssat.ll b/llvm/test/CodeGen/X86/combine-add-ssat.ll index bd06ae9de646..c26125305588 100644 --- a/llvm/test/CodeGen/X86/combine-add-ssat.ll +++ b/llvm/test/CodeGen/X86/combine-add-ssat.ll @@ -15,13 +15,7 @@ declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) define i32 @combine_undef_i32(i32 %a0) { ; CHECK-LABEL: combine_undef_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: movl %edi, %ecx -; CHECK-NEXT: addl %eax, %ecx -; CHECK-NEXT: setns %al -; CHECK-NEXT: addl $2147483647, %eax # imm = 0x7FFFFFFF -; CHECK-NEXT: addl %eax, %edi -; CHECK-NEXT: cmovnol %edi, %eax +; CHECK-NEXT: movl $-1, %eax ; CHECK-NEXT: retq %res = call i32 @llvm.sadd.sat.i32(i32 %a0, i32 undef) ret i32 %res @@ -30,12 +24,12 @@ define i32 @combine_undef_i32(i32 %a0) { define <8 x i16> @combine_undef_v8i16(<8 x i16> %a0) { ; SSE-LABEL: combine_undef_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: paddsw %xmm0, %xmm0 +; SSE-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_undef_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vpaddsw %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> %a0) ret <8 x i16> %res @@ -68,14 +62,12 @@ define <8 x i16> @combine_constfold_v8i16() { define <8 x i16> @combine_constfold_undef_v8i16() { ; SSE-LABEL: combine_constfold_undef_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm0 = -; SSE-NEXT: paddsw {{.*}}(%rip), %xmm0 +; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65535,65535,65534,0,65280,32768,0] ; SSE-NEXT: retq ; ; AVX-LABEL: combine_constfold_undef_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = -; AVX-NEXT: vpaddsw {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65535,65535,65534,0,65280,32768,0] ; AVX-NEXT: retq %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> , <8 x i16> ) ret <8 x i16> %res diff --git a/llvm/test/CodeGen/X86/combine-add-usat.ll b/llvm/test/CodeGen/X86/combine-add-usat.ll index 260d6ba00e5f..7565f0624b1a 100644 --- a/llvm/test/CodeGen/X86/combine-add-usat.ll +++ b/llvm/test/CodeGen/X86/combine-add-usat.ll @@ -15,9 +15,7 @@ declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>) define i32 @combine_undef_i32(i32 %a0) { ; CHECK-LABEL: combine_undef_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: addl %eax, %edi ; CHECK-NEXT: movl $-1, %eax -; CHECK-NEXT: cmovael %edi, %eax ; CHECK-NEXT: retq %res = call i32 @llvm.uadd.sat.i32(i32 %a0, i32 undef) ret i32 %res @@ -26,12 +24,12 @@ define i32 @combine_undef_i32(i32 %a0) { define <8 x i16> @combine_undef_v8i16(<8 x i16> %a0) { ; SSE-LABEL: combine_undef_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: paddusw %xmm0, %xmm0 +; SSE-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_undef_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vpaddusw %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> %a0) ret <8 x i16> %res @@ -64,14 +62,12 @@ define <8 x i16> @combine_constfold_v8i16() { define <8 x i16> @combine_constfold_undef_v8i16() { ; SSE-LABEL: combine_constfold_undef_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: movdqa {{.*#+}} xmm0 = -; SSE-NEXT: paddusw {{.*}}(%rip), %xmm0 +; SSE-NEXT: movaps {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,65535,2,65535] ; SSE-NEXT: retq ; ; AVX-LABEL: combine_constfold_undef_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = -; AVX-NEXT: vpaddusw {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,65535,2,65535] ; AVX-NEXT: retq %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> , <8 x i16> ) ret <8 x i16> %res diff --git a/llvm/test/CodeGen/X86/combine-sub-ssat.ll b/llvm/test/CodeGen/X86/combine-sub-ssat.ll index 423efd2b1817..eb146df5eb78 100644 --- a/llvm/test/CodeGen/X86/combine-sub-ssat.ll +++ b/llvm/test/CodeGen/X86/combine-sub-ssat.ll @@ -16,12 +16,6 @@ define i32 @combine_undef_i32(i32 %a0) { ; CHECK-LABEL: combine_undef_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: movl %edi, %ecx -; CHECK-NEXT: subl %eax, %ecx -; CHECK-NEXT: setns %al -; CHECK-NEXT: addl $2147483647, %eax # imm = 0x7FFFFFFF -; CHECK-NEXT: subl %eax, %edi -; CHECK-NEXT: cmovnol %edi, %eax ; CHECK-NEXT: retq %res = call i32 @llvm.ssub.sat.i32(i32 %a0, i32 undef) ret i32 %res @@ -30,12 +24,12 @@ define i32 @combine_undef_i32(i32 %a0) { define <8 x i16> @combine_undef_v8i16(<8 x i16> %a0) { ; SSE-LABEL: combine_undef_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: psubsw %xmm0, %xmm0 +; SSE-NEXT: xorps %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_undef_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vpsubsw %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %res = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> %a0) ret <8 x i16> %res diff --git a/llvm/test/CodeGen/X86/combine-sub-usat.ll b/llvm/test/CodeGen/X86/combine-sub-usat.ll index f421249babda..03c6e8840a8f 100644 --- a/llvm/test/CodeGen/X86/combine-sub-usat.ll +++ b/llvm/test/CodeGen/X86/combine-sub-usat.ll @@ -16,8 +16,6 @@ define i32 @combine_undef_i32(i32 %a0) { ; CHECK-LABEL: combine_undef_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: xorl %eax, %eax -; CHECK-NEXT: subl %eax, %edi -; CHECK-NEXT: cmovael %edi, %eax ; CHECK-NEXT: retq %res = call i32 @llvm.usub.sat.i32(i32 %a0, i32 undef) ret i32 %res @@ -26,12 +24,12 @@ define i32 @combine_undef_i32(i32 %a0) { define <8 x i16> @combine_undef_v8i16(<8 x i16> %a0) { ; SSE-LABEL: combine_undef_v8i16: ; SSE: # %bb.0: -; SSE-NEXT: psubusw %xmm0, %xmm0 +; SSE-NEXT: xorps %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: combine_undef_v8i16: ; AVX: # %bb.0: -; AVX-NEXT: vpsubusw %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq %res = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> %a0) ret <8 x i16> %res