[X86] Fold X86ISD::SBB(ISD::SUB(X,Y),0) -> X86ISD::SBB(X,Y) (PR25858)

We often generate X86ISD::SBB(X, 0) for carry flag arithmetic.

I had tried to create test cases for the ADC equivalent (which often uses the same pattern) but haven't managed to find anything yet.

Differential Revision: https://reviews.llvm.org/D57169

llvm-svn: 352288
This commit is contained in:
Simon Pilgrim 2019-01-26 20:13:44 +00:00
parent 8fd74ebfc0
commit b7a15acd38
2 changed files with 15 additions and 9 deletions

View File

@ -40566,6 +40566,15 @@ static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
Flags);
}
// Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
// iff the flag result is dead.
SDValue Op0 = N->getOperand(0);
SDValue Op1 = N->getOperand(1);
if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op1) &&
!N->hasAnyUseOfValue(1))
return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), Op0.getOperand(0),
Op0.getOperand(1), N->getOperand(2));
return SDValue();
}

View File

@ -13,11 +13,10 @@ define void @PR25858_i32(%WideUInt32* sret, %WideUInt32*, %WideUInt32*) nounwind
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl (%ecx), %esi
; X86-NEXT: movl 4(%ecx), %ecx
; X86-NEXT: subl 4(%edx), %ecx
; X86-NEXT: subl (%edx), %esi
; X86-NEXT: sbbl $0, %ecx
; X86-NEXT: movl %esi, (%eax)
; X86-NEXT: sbbl 4(%edx), %ecx
; X86-NEXT: movl %ecx, 4(%eax)
; X86-NEXT: movl %esi, (%eax)
; X86-NEXT: popl %esi
; X86-NEXT: retl $4
;
@ -26,11 +25,10 @@ define void @PR25858_i32(%WideUInt32* sret, %WideUInt32*, %WideUInt32*) nounwind
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movl (%rsi), %ecx
; X64-NEXT: movl 4(%rsi), %esi
; X64-NEXT: subl 4(%rdx), %esi
; X64-NEXT: subl (%rdx), %ecx
; X64-NEXT: sbbl $0, %esi
; X64-NEXT: movl %ecx, (%rdi)
; X64-NEXT: sbbl 4(%rdx), %esi
; X64-NEXT: movl %esi, 4(%rdi)
; X64-NEXT: movl %ecx, (%rdi)
; X64-NEXT: retq
top:
%3 = bitcast %WideUInt32* %1 to i32*
@ -94,11 +92,10 @@ define void @PR25858_i64(%WideUInt64* sret, %WideUInt64*, %WideUInt64*) nounwind
; X64-NEXT: movq %rdi, %rax
; X64-NEXT: movq (%rsi), %rcx
; X64-NEXT: movq 8(%rsi), %rsi
; X64-NEXT: subq 8(%rdx), %rsi
; X64-NEXT: subq (%rdx), %rcx
; X64-NEXT: sbbq $0, %rsi
; X64-NEXT: movq %rcx, (%rdi)
; X64-NEXT: sbbq 8(%rdx), %rsi
; X64-NEXT: movq %rsi, 8(%rdi)
; X64-NEXT: movq %rcx, (%rdi)
; X64-NEXT: retq
top:
%3 = bitcast %WideUInt64* %1 to i64*