[DAGCombiner][X86][AArch64] Generalize A-(A&B)->A&(~B) fold (PR44448)

The fold 'A - (A & (B - 1))' -> 'A & (0 - B)'
added in 8dab0a4a7d691f2704f1079538e0ef29548db159
is too specific. It should/can just be 'A - (A & B)' -> 'A & (~B)'

Even if we don't manage to fold `~` into B,
we have likely formed `ANDN` node.
Also, this way there's less similar-but-duplicate folds.

Name: X - (X & Y)  ->  X & (~Y)
%o = and i32 %X, %Y
%r = sub i32 %X, %o
  =>
%n = xor i32 %Y, -1
%r = and i32 %X, %n

https://rise4fun.com/Alive/kOUl

See
  https://bugs.llvm.org/show_bug.cgi?id=44448
  https://reviews.llvm.org/D71499
This commit is contained in:
Roman Lebedev 2020-01-03 16:14:29 +03:00
parent e5e0775c51
commit 24c9de9750
5 changed files with 138 additions and 103 deletions

View File

@ -3104,31 +3104,20 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
DAG.getNode(ISD::SUB, DL, VT, N1.getOperand(1),
N1.getOperand(0)));
// A - (A & (B - 1)) -> A & (~(B - 1)) -> A & (0 - B)
if (N1.getOpcode() == ISD::AND && N1.hasOneUse()) {
// A - (A & B) -> A & (~B)
if (N1.getOpcode() == ISD::AND) {
SDValue A = N1.getOperand(0);
SDValue BDec = N1.getOperand(1);
SDValue B = N1.getOperand(1);
if (A != N0)
std::swap(A, BDec);
if (A == N0 && BDec.getOpcode() == ISD::ADD &&
isAllOnesOrAllOnesSplat(BDec->getOperand(1))) {
SDValue B = BDec.getOperand(0);
SDValue NegB =
DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), B);
return DAG.getNode(ISD::AND, DL, VT, A, NegB);
std::swap(A, B);
if (A == N0 &&
(N1.hasOneUse() || isConstantOrConstantVector(B, /*NoOpaques=*/true))) {
SDValue InvB =
DAG.getNode(ISD::XOR, DL, VT, B, DAG.getAllOnesConstant(DL, VT));
return DAG.getNode(ISD::AND, DL, VT, A, InvB);
}
}
// A - (A & C) -> A & (~C)
if (N1.getOpcode() == ISD::AND && N1.getOperand(0) == N0 &&
isConstantOrConstantVector(N1.getOperand(1), /*NoOpaques=*/true)) {
SDValue InvC =
DAG.FoldConstantArithmetic(ISD::XOR, DL, VT, N1.getOperand(1).getNode(),
DAG.getAllOnesConstant(DL, VT).getNode());
assert(InvC && "Constant folding failed");
return DAG.getNode(ISD::AND, DL, VT, N0, InvC);
}
// fold (X - (-Y * Z)) -> (X + (Y * Z))
if (N1.getOpcode() == ISD::MUL && N1.hasOneUse()) {
if (N1.getOperand(0).getOpcode() == ISD::SUB &&

View File

@ -127,8 +127,7 @@ define i32 @n8_not_lowbit_mask(i32 %ptr, i32 %alignment) nounwind {
; CHECK-LABEL: n8_not_lowbit_mask:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w1, #1 // =1
; CHECK-NEXT: and w8, w0, w8
; CHECK-NEXT: sub w0, w0, w8
; CHECK-NEXT: bic w0, w0, w8
; CHECK-NEXT: ret
%mask = add i32 %alignment, 1 ; not -1
%bias = and i32 %ptr, %mask

View File

@ -17,8 +17,7 @@
define i32 @t0_32(i32 %ptr, i32 %mask) nounwind {
; CHECK-LABEL: t0_32:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, w1
; CHECK-NEXT: sub w0, w0, w8
; CHECK-NEXT: bic w0, w0, w1
; CHECK-NEXT: ret
%bias = and i32 %ptr, %mask
%r = sub i32 %ptr, %bias
@ -27,8 +26,7 @@ define i32 @t0_32(i32 %ptr, i32 %mask) nounwind {
define i64 @t1_64(i64 %ptr, i64 %mask) nounwind {
; CHECK-LABEL: t1_64:
; CHECK: // %bb.0:
; CHECK-NEXT: and x8, x0, x1
; CHECK-NEXT: sub x0, x0, x8
; CHECK-NEXT: bic x0, x0, x1
; CHECK-NEXT: ret
%bias = and i64 %ptr, %mask
%r = sub i64 %ptr, %bias
@ -38,8 +36,7 @@ define i64 @t1_64(i64 %ptr, i64 %mask) nounwind {
define i32 @t2_commutative(i32 %ptr, i32 %mask) nounwind {
; CHECK-LABEL: t2_commutative:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w1, w0
; CHECK-NEXT: sub w0, w0, w8
; CHECK-NEXT: bic w0, w0, w1
; CHECK-NEXT: ret
%bias = and i32 %mask, %ptr ; swapped
%r = sub i32 %ptr, %bias
@ -87,8 +84,7 @@ define i32 @n5_different_ptrs_commutative(i32 %ptr0, i32 %ptr1, i32 %mask) nounw
define i32 @n6_not_lowbit_mask(i32 %ptr, i32 %mask) nounwind {
; CHECK-LABEL: n6_not_lowbit_mask:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, w1
; CHECK-NEXT: sub w0, w0, w8
; CHECK-NEXT: bic w0, w0, w1
; CHECK-NEXT: ret
%bias = and i32 %ptr, %mask
%r = sub i32 %ptr, %bias

View File

@ -214,22 +214,34 @@ define i32 @n7_different_ptrs_commutative(i32 %ptr0, i32 %ptr1, i32 %alignment)
}
define i32 @n8_not_lowbit_mask(i32 %ptr, i32 %alignment) nounwind {
; X86-LABEL: n8_not_lowbit_mask:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: incl %ecx
; X86-NEXT: andl %eax, %ecx
; X86-NEXT: subl %ecx, %eax
; X86-NEXT: retl
; NOBMI-X86-LABEL: n8_not_lowbit_mask:
; NOBMI-X86: # %bb.0:
; NOBMI-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOBMI-X86-NEXT: incl %eax
; NOBMI-X86-NEXT: notl %eax
; NOBMI-X86-NEXT: andl {{[0-9]+}}(%esp), %eax
; NOBMI-X86-NEXT: retl
;
; X64-LABEL: n8_not_lowbit_mask:
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: incl %esi
; X64-NEXT: andl %edi, %esi
; X64-NEXT: subl %esi, %eax
; X64-NEXT: retq
; BMI-X86-LABEL: n8_not_lowbit_mask:
; BMI-X86: # %bb.0:
; BMI-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; BMI-X86-NEXT: incl %eax
; BMI-X86-NEXT: andnl {{[0-9]+}}(%esp), %eax, %eax
; BMI-X86-NEXT: retl
;
; NOBMI-X64-LABEL: n8_not_lowbit_mask:
; NOBMI-X64: # %bb.0:
; NOBMI-X64-NEXT: movl %esi, %eax
; NOBMI-X64-NEXT: incl %eax
; NOBMI-X64-NEXT: notl %eax
; NOBMI-X64-NEXT: andl %edi, %eax
; NOBMI-X64-NEXT: retq
;
; BMI-X64-LABEL: n8_not_lowbit_mask:
; BMI-X64: # %bb.0:
; BMI-X64-NEXT: incl %esi
; BMI-X64-NEXT: andnl %edi, %esi, %eax
; BMI-X64-NEXT: retq
%mask = add i32 %alignment, 1 ; not -1
%bias = and i32 %ptr, %mask
%r = sub i32 %ptr, %bias

View File

@ -18,65 +18,94 @@
; The basic positive tests
define i32 @t0_32(i32 %ptr, i32 %mask) nounwind {
; X86-LABEL: t0_32:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: andl %eax, %ecx
; X86-NEXT: subl %ecx, %eax
; X86-NEXT: retl
; NOBMI-X86-LABEL: t0_32:
; NOBMI-X86: # %bb.0:
; NOBMI-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOBMI-X86-NEXT: notl %eax
; NOBMI-X86-NEXT: andl {{[0-9]+}}(%esp), %eax
; NOBMI-X86-NEXT: retl
;
; X64-LABEL: t0_32:
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andl %edi, %esi
; X64-NEXT: subl %esi, %eax
; X64-NEXT: retq
; BMI-X86-LABEL: t0_32:
; BMI-X86: # %bb.0:
; BMI-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; BMI-X86-NEXT: andnl {{[0-9]+}}(%esp), %eax, %eax
; BMI-X86-NEXT: retl
;
; NOBMI-X64-LABEL: t0_32:
; NOBMI-X64: # %bb.0:
; NOBMI-X64-NEXT: movl %esi, %eax
; NOBMI-X64-NEXT: notl %eax
; NOBMI-X64-NEXT: andl %edi, %eax
; NOBMI-X64-NEXT: retq
;
; BMI-X64-LABEL: t0_32:
; BMI-X64: # %bb.0:
; BMI-X64-NEXT: andnl %edi, %esi, %eax
; BMI-X64-NEXT: retq
%bias = and i32 %ptr, %mask
%r = sub i32 %ptr, %bias
ret i32 %r
}
define i64 @t1_64(i64 %ptr, i64 %mask) nounwind {
; X86-LABEL: t1_64:
; X86: # %bb.0:
; X86-NEXT: pushl %esi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: andl %edx, %ecx
; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
; X86-NEXT: andl %eax, %esi
; X86-NEXT: subl %esi, %eax
; X86-NEXT: sbbl %ecx, %edx
; X86-NEXT: popl %esi
; X86-NEXT: retl
; NOBMI-X86-LABEL: t1_64:
; NOBMI-X86: # %bb.0:
; NOBMI-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOBMI-X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; NOBMI-X86-NEXT: notl %eax
; NOBMI-X86-NEXT: andl {{[0-9]+}}(%esp), %eax
; NOBMI-X86-NEXT: notl %edx
; NOBMI-X86-NEXT: andl {{[0-9]+}}(%esp), %edx
; NOBMI-X86-NEXT: retl
;
; X64-LABEL: t1_64:
; X64: # %bb.0:
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: andq %rdi, %rsi
; X64-NEXT: subq %rsi, %rax
; X64-NEXT: retq
; BMI-X86-LABEL: t1_64:
; BMI-X86: # %bb.0:
; BMI-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; BMI-X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; BMI-X86-NEXT: andnl {{[0-9]+}}(%esp), %eax, %eax
; BMI-X86-NEXT: andnl {{[0-9]+}}(%esp), %ecx, %edx
; BMI-X86-NEXT: retl
;
; NOBMI-X64-LABEL: t1_64:
; NOBMI-X64: # %bb.0:
; NOBMI-X64-NEXT: movq %rsi, %rax
; NOBMI-X64-NEXT: notq %rax
; NOBMI-X64-NEXT: andq %rdi, %rax
; NOBMI-X64-NEXT: retq
;
; BMI-X64-LABEL: t1_64:
; BMI-X64: # %bb.0:
; BMI-X64-NEXT: andnq %rdi, %rsi, %rax
; BMI-X64-NEXT: retq
%bias = and i64 %ptr, %mask
%r = sub i64 %ptr, %bias
ret i64 %r
}
define i32 @t2_commutative(i32 %ptr, i32 %mask) nounwind {
; X86-LABEL: t2_commutative:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: andl %eax, %ecx
; X86-NEXT: subl %ecx, %eax
; X86-NEXT: retl
; NOBMI-X86-LABEL: t2_commutative:
; NOBMI-X86: # %bb.0:
; NOBMI-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOBMI-X86-NEXT: notl %eax
; NOBMI-X86-NEXT: andl {{[0-9]+}}(%esp), %eax
; NOBMI-X86-NEXT: retl
;
; X64-LABEL: t2_commutative:
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andl %edi, %esi
; X64-NEXT: subl %esi, %eax
; X64-NEXT: retq
; BMI-X86-LABEL: t2_commutative:
; BMI-X86: # %bb.0:
; BMI-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; BMI-X86-NEXT: andnl {{[0-9]+}}(%esp), %eax, %eax
; BMI-X86-NEXT: retl
;
; NOBMI-X64-LABEL: t2_commutative:
; NOBMI-X64: # %bb.0:
; NOBMI-X64-NEXT: movl %esi, %eax
; NOBMI-X64-NEXT: notl %eax
; NOBMI-X64-NEXT: andl %edi, %eax
; NOBMI-X64-NEXT: retq
;
; BMI-X64-LABEL: t2_commutative:
; BMI-X64: # %bb.0:
; BMI-X64-NEXT: andnl %edi, %esi, %eax
; BMI-X64-NEXT: retq
%bias = and i32 %mask, %ptr ; swapped
%r = sub i32 %ptr, %bias
ret i32 %r
@ -150,20 +179,30 @@ define i32 @n5_different_ptrs_commutative(i32 %ptr0, i32 %ptr1, i32 %mask) nounw
}
define i32 @n6_not_lowbit_mask(i32 %ptr, i32 %mask) nounwind {
; X86-LABEL: n6_not_lowbit_mask:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: andl %eax, %ecx
; X86-NEXT: subl %ecx, %eax
; X86-NEXT: retl
; NOBMI-X86-LABEL: n6_not_lowbit_mask:
; NOBMI-X86: # %bb.0:
; NOBMI-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; NOBMI-X86-NEXT: notl %eax
; NOBMI-X86-NEXT: andl {{[0-9]+}}(%esp), %eax
; NOBMI-X86-NEXT: retl
;
; X64-LABEL: n6_not_lowbit_mask:
; X64: # %bb.0:
; X64-NEXT: movl %edi, %eax
; X64-NEXT: andl %edi, %esi
; X64-NEXT: subl %esi, %eax
; X64-NEXT: retq
; BMI-X86-LABEL: n6_not_lowbit_mask:
; BMI-X86: # %bb.0:
; BMI-X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; BMI-X86-NEXT: andnl {{[0-9]+}}(%esp), %eax, %eax
; BMI-X86-NEXT: retl
;
; NOBMI-X64-LABEL: n6_not_lowbit_mask:
; NOBMI-X64: # %bb.0:
; NOBMI-X64-NEXT: movl %esi, %eax
; NOBMI-X64-NEXT: notl %eax
; NOBMI-X64-NEXT: andl %edi, %eax
; NOBMI-X64-NEXT: retq
;
; BMI-X64-LABEL: n6_not_lowbit_mask:
; BMI-X64: # %bb.0:
; BMI-X64-NEXT: andnl %edi, %esi, %eax
; BMI-X64-NEXT: retq
%bias = and i32 %ptr, %mask
%r = sub i32 %ptr, %bias
ret i32 %r