[InstCombine] allow icmp (and X, C2), C1 folds for splat constant vectors

This is a revert of r280676 which was a revert of r280637;
ie, this is r280637 again. It was speculatively reverted to
help debug buildbot failures.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@280861 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Sanjay Patel 2016-09-07 20:50:44 +00:00
parent 5b033da95a
commit 350a7a4120
2 changed files with 55 additions and 55 deletions

View File

@ -1532,60 +1532,50 @@ Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp,
if (Instruction *I = foldICmpAndShift(Cmp, And, C1))
return I;
// FIXME: This check restricts all folds under here to scalar types.
ConstantInt *RHS = dyn_cast<ConstantInt>(Cmp.getOperand(1));
if (!RHS)
return nullptr;
// (icmp pred (and (or (lshr A, B), A), 1), 0) -->
// (icmp pred (and A, (or (shl 1, B), 1), 0))
//
// iff pred isn't signed
{
if (!Cmp.isSigned() && *C1 == 0 && match(And->getOperand(1), m_One())) {
Constant *One = cast<Constant>(And->getOperand(1));
Value *Or = And->getOperand(0);
Value *A, *B, *LShr;
if (!Cmp.isSigned() && *C1 == 0) {
if (match(And->getOperand(1), m_One())) {
Constant *One = cast<Constant>(And->getOperand(1));
Value *Or = And->getOperand(0);
if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
unsigned UsesRemoved = 0;
if (And->hasOneUse())
++UsesRemoved;
if (Or->hasOneUse())
++UsesRemoved;
if (LShr->hasOneUse())
++UsesRemoved;
Value *NewOr = nullptr;
// Compute A & ((1 << B) | 1)
if (auto *C = dyn_cast<Constant>(B)) {
if (UsesRemoved >= 1)
NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
} else {
if (UsesRemoved >= 3)
NewOr =
Builder->CreateOr(Builder->CreateShl(One, B, LShr->getName(),
if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
unsigned UsesRemoved = 0;
if (And->hasOneUse())
++UsesRemoved;
if (Or->hasOneUse())
++UsesRemoved;
if (LShr->hasOneUse())
++UsesRemoved;
// Compute A & ((1 << B) | 1)
Value *NewOr = nullptr;
if (auto *C = dyn_cast<Constant>(B)) {
if (UsesRemoved >= 1)
NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
} else {
if (UsesRemoved >= 3)
NewOr = Builder->CreateOr(Builder->CreateShl(One, B, LShr->getName(),
/*HasNUW=*/true),
One, Or->getName());
}
if (NewOr) {
Value *NewAnd = Builder->CreateAnd(A, NewOr, And->getName());
Cmp.setOperand(0, NewAnd);
return &Cmp;
}
}
}
if (NewOr) {
Value *NewAnd = Builder->CreateAnd(A, NewOr, And->getName());
Cmp.setOperand(0, NewAnd);
return &Cmp;
}
}
}
// Replace ((X & C2) > C1) with ((X & C2) != 0), if any bit set in (X & C2)
// will produce a result greater than C1.
if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) {
unsigned NTZ = C2->countTrailingZeros();
if ((NTZ < C2->getBitWidth()) &&
APInt::getOneBitSet(C2->getBitWidth(), NTZ).ugt(*C1))
return new ICmpInst(ICmpInst::ICMP_NE, And,
Constant::getNullValue(RHS->getType()));
// (X & C2) > C1 --> (X & C2) != 0, if any bit set in (X & C2) will produce a
// result greater than C1.
unsigned NumTZ = C2->countTrailingZeros();
if (Cmp.getPredicate() == ICmpInst::ICMP_UGT && NumTZ < C2->getBitWidth() &&
APInt::getOneBitSet(C2->getBitWidth(), NumTZ).ugt(*C1)) {
Constant *Zero = Constant::getNullValue(And->getType());
return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
}
return nullptr;

View File

@ -1013,15 +1013,29 @@ define i1 @test67(i32 %x) {
ret i1 %cmp
}
; FIXME: Vectors should fold the same way.
; The test above relies on 3 different folds.
; This test only checks the last of those (icmp ugt -> icmp ne).
define <2 x i1> @test67vec(<2 x i32> %x) {
; CHECK-LABEL: @test67vec(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> %x, <i32 96, i32 96>
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> [[AND]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%and = and <2 x i32> %x, <i32 96, i32 96>
%cmp = icmp ugt <2 x i32> %and, <i32 31, i32 31>
ret <2 x i1> %cmp
}
; FIXME: Vector constant for the 'and' should use less bits.
define <2 x i1> @test67vec2(<2 x i32> %x) {
; CHECK-LABEL: @test67vec2(
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> %x, <i32 127, i32 127>
; CHECK-NEXT: [[CMP:%.*]] = icmp ugt <2 x i32> [[AND]], <i32 31, i32 31>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%and = and <2 x i32> %x, <i32 127, i32 127>
%cmp = icmp sgt <2 x i32> %and, <i32 31, i32 31>
%cmp = icmp ugt <2 x i32> %and, <i32 31, i32 31>
ret <2 x i1> %cmp
}
@ -2059,13 +2073,12 @@ define i1 @icmp_and_or_lshr(i32 %x, i32 %y) {
ret i1 %ret
}
; FIXME: Vectors should fold the same way.
define <2 x i1> @icmp_and_or_lshr_vec(<2 x i32> %x, <2 x i32> %y) {
; CHECK-LABEL: @icmp_and_or_lshr_vec(
; CHECK-NEXT: [[SHF:%.*]] = lshr <2 x i32> %x, %y
; CHECK-NEXT: [[OR:%.*]] = or <2 x i32> [[SHF]], %x
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[OR]], <i32 1, i32 1>
; CHECK-NEXT: [[RET:%.*]] = icmp ne <2 x i32> [[AND]], zeroinitializer
; CHECK-NEXT: [[SHF1:%.*]] = shl nuw <2 x i32> <i32 1, i32 1>, %y
; CHECK-NEXT: [[OR2:%.*]] = or <2 x i32> [[SHF1]], <i32 1, i32 1>
; CHECK-NEXT: [[AND3:%.*]] = and <2 x i32> [[OR2]], %x
; CHECK-NEXT: [[RET:%.*]] = icmp ne <2 x i32> [[AND3]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[RET]]
;
%shf = lshr <2 x i32> %x, %y
@ -2088,13 +2101,10 @@ define i1 @icmp_and_or_lshr_cst(i32 %x) {
ret i1 %ret
}
; FIXME: Vectors should fold the same way.
define <2 x i1> @icmp_and_or_lshr_cst_vec(<2 x i32> %x) {
; CHECK-LABEL: @icmp_and_or_lshr_cst_vec(
; CHECK-NEXT: [[SHF:%.*]] = lshr <2 x i32> %x, <i32 1, i32 1>
; CHECK-NEXT: [[OR:%.*]] = or <2 x i32> [[SHF]], %x
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[OR]], <i32 1, i32 1>
; CHECK-NEXT: [[RET:%.*]] = icmp ne <2 x i32> [[AND]], zeroinitializer
; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> %x, <i32 3, i32 3>
; CHECK-NEXT: [[RET:%.*]] = icmp ne <2 x i32> [[AND1]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[RET]]
;
%shf = lshr <2 x i32> %x, <i32 1, i32 1>