llvm/test/CodeGen/X86/add-of-carry.ll
Benjamin Kramer f51190b697 X86: Add a bunch of peeps for add and sub of SETB.
"b + ((a < b) ? 1 : 0)" compiles into
	cmpl	%esi, %edi
	adcl	$0, %esi
instead of
	cmpl	%esi, %edi
	sbbl	%eax, %eax
	andl	$1, %eax
	addl	%esi, %eax

This saves a register, a false dependency on %eax
(Intel's CPUs still don't ignore it) and it's shorter.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@131070 91177308-0d34-0410-b5e6-96231b3b80d8
2011-05-08 18:36:07 +00:00

34 lines
861 B
LLVM

; RUN: llc < %s -march=x86 | FileCheck %s
; <rdar://problem/8449754>
define i32 @test1(i32 %sum, i32 %x) nounwind readnone ssp {
entry:
; CHECK: test1:
; CHECK: cmpl %ecx, %eax
; CHECK-NOT: addl
; CHECK: adcl $0, %eax
%add4 = add i32 %x, %sum
%cmp = icmp ult i32 %add4, %x
%inc = zext i1 %cmp to i32
%z.0 = add i32 %add4, %inc
ret i32 %z.0
}
; Instcombine transforms test1 into test2:
; CHECK: test2:
; CHECK: movl
; CHECK-NEXT: addl
; CHECK-NEXT: adcl $0
; CHECK-NEXT: ret
define i32 @test2(i32 %sum, i32 %x) nounwind readnone ssp {
entry:
%uadd = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %sum)
%0 = extractvalue { i32, i1 } %uadd, 0
%cmp = extractvalue { i32, i1 } %uadd, 1
%inc = zext i1 %cmp to i32
%z.0 = add i32 %0, %inc
ret i32 %z.0
}
declare { i32, i1 } @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone