llvm/test/Transforms/InstCombine/2008-08-17-ICmpXorSignbit.ll
Nick Lewycky 4333f49afe Reinstate this optimization to fold icmp of xor when possible. Don't try to
turn icmp eq a+x, b+x into icmp eq a, b if a+x or b+x has other uses. This
may have been increasing register pressure leading to the bzip2 slowdown.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@63487 91177308-0d34-0410-b5e6-96231b3b80d8
2009-01-31 21:30:05 +00:00

42 lines
739 B
LLVM

; RUN: llvm-as < %s | opt -instcombine | llvm-dis | not grep xor
define i1 @test1(i8 %x, i8 %y) {
%X = xor i8 %x, 128
%Y = xor i8 %y, 128
%tmp = icmp slt i8 %X, %Y
ret i1 %tmp
}
define i1 @test2(i8 %x, i8 %y) {
%X = xor i8 %x, 128
%Y = xor i8 %y, 128
%tmp = icmp ult i8 %X, %Y
ret i1 %tmp
}
define i1 @test3(i8 %x) {
%X = xor i8 %x, 128
%tmp = icmp uge i8 %X, 15
ret i1 %tmp
}
define i1 @test4(i8 %x, i8 %y) {
%X = xor i8 %x, 127
%Y = xor i8 %y, 127
%tmp = icmp slt i8 %X, %Y
ret i1 %tmp
}
define i1 @test5(i8 %x, i8 %y) {
%X = xor i8 %x, 127
%Y = xor i8 %y, 127
%tmp = icmp ult i8 %X, %Y
ret i1 %tmp
}
define i1 @test6(i8 %x) {
%X = xor i8 %x, 127
%tmp = icmp uge i8 %X, 15
ret i1 %tmp
}