[InstCombine] use m_APInt to allow icmp (and (sh X, Y), C2), C1 folds for splat constant vectors

llvm-svn: 280873
This commit is contained in:
Sanjay Patel 2016-09-07 22:33:03 +00:00
parent fe51c2f7b4
commit 9b40f98357
6 changed files with 31 additions and 67 deletions

View File

@ -1386,29 +1386,21 @@ Instruction *InstCombiner::foldICmpXorConstant(ICmpInst &Cmp,
/// Fold icmp (and (sh X, Y), C2), C1.
Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
const APInt *C1) {
// FIXME: This check restricts all folds under here to scalar types.
ConstantInt *RHS = dyn_cast<ConstantInt>(Cmp.getOperand(1));
if (!RHS)
return nullptr;
// FIXME: This could be passed in as APInt.
auto *C2 = dyn_cast<ConstantInt>(And->getOperand(1));
if (!C2)
const APInt *C1, const APInt *C2) {
BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
if (!Shift || !Shift->isShift())
return nullptr;
// If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
// exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
// code produced by the clang front-end, for bitfield access.
BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
if (!Shift || !Shift->isShift())
return nullptr;
// This seemingly simple opportunity to fold away a shift turns out to be
// rather complicated. See PR17827 for details.
if (auto *ShAmt = dyn_cast<ConstantInt>(Shift->getOperand(1))) {
unsigned ShiftOpcode = Shift->getOpcode();
bool IsShl = ShiftOpcode == Instruction::Shl;
const APInt *C3;
if (match(Shift->getOperand(1), m_APInt(C3))) {
bool CanFold = false;
unsigned ShiftOpcode = Shift->getOpcode();
if (ShiftOpcode == Instruction::AShr) {
// There may be some constraints that make this possible, but nothing
// simple has been discovered yet.
@ -1418,35 +1410,23 @@ Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
// also fold a signed comparison if the mask value and comparison value
// are not negative. These constraints may not be obvious, but we can
// prove that they are correct using an SMT solver.
if (!Cmp.isSigned() || (!C2->isNegative() && !RHS->isNegative()))
if (!Cmp.isSigned() || (!C2->isNegative() && !C1->isNegative()))
CanFold = true;
} else if (ShiftOpcode == Instruction::LShr) {
// For a logical right shift, we can fold if the comparison is not signed.
// We can also fold a signed comparison if the shifted mask value and the
// shifted comparison value are not negative. These constraints may not be
// obvious, but we can prove that they are correct using an SMT solver.
if (!Cmp.isSigned())
if (!Cmp.isSigned() ||
(!C2->shl(*C3).isNegative() && !C1->shl(*C3).isNegative()))
CanFold = true;
else {
ConstantInt *ShiftedAndCst =
cast<ConstantInt>(ConstantExpr::getShl(C2, ShAmt));
ConstantInt *ShiftedRHSCst =
cast<ConstantInt>(ConstantExpr::getShl(RHS, ShAmt));
if (!ShiftedAndCst->isNegative() && !ShiftedRHSCst->isNegative())
CanFold = true;
}
}
if (CanFold) {
Constant *NewCst;
if (ShiftOpcode == Instruction::Shl)
NewCst = ConstantExpr::getLShr(RHS, ShAmt);
else
NewCst = ConstantExpr::getShl(RHS, ShAmt);
APInt NewCst = IsShl ? C1->lshr(*C3) : C1->shl(*C3);
APInt SameAsC1 = IsShl ? NewCst.shl(*C3) : NewCst.lshr(*C3);
// Check to see if we are shifting out any of the bits being compared.
if (ConstantExpr::get(ShiftOpcode, NewCst, ShAmt) != RHS) {
if (SameAsC1 != *C1) {
// If we shifted bits out, the fold is not going to work out. As a
// special case, check to see if this means that the result is always
// true or false now.
@ -1455,13 +1435,9 @@ Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
return replaceInstUsesWith(Cmp, Builder->getTrue());
} else {
Cmp.setOperand(1, NewCst);
Constant *NewAndCst;
if (ShiftOpcode == Instruction::Shl)
NewAndCst = ConstantExpr::getLShr(C2, ShAmt);
else
NewAndCst = ConstantExpr::getShl(C2, ShAmt);
And->setOperand(1, NewAndCst);
Cmp.setOperand(1, ConstantInt::get(And->getType(), NewCst));
APInt NewAndCst = IsShl ? C2->lshr(*C3) : C2->shl(*C3);
And->setOperand(1, ConstantInt::get(And->getType(), NewAndCst));
And->setOperand(0, Shift->getOperand(0));
Worklist.Add(Shift); // Shift is dead.
return &Cmp;
@ -1475,18 +1451,12 @@ Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
if (Shift->hasOneUse() && *C1 == 0 && Cmp.isEquality() &&
!Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) {
// Compute C2 << Y.
Value *NS;
if (Shift->getOpcode() == Instruction::LShr) {
NS = Builder->CreateShl(C2, Shift->getOperand(1));
} else {
// Insert a logical shift.
NS = Builder->CreateLShr(C2, Shift->getOperand(1));
}
Value *NewShift =
IsShl ? Builder->CreateLShr(And->getOperand(1), Shift->getOperand(1))
: Builder->CreateShl(And->getOperand(1), Shift->getOperand(1));
// Compute X & (C2 << Y).
Value *NewAnd =
Builder->CreateAnd(Shift->getOperand(0), NS, And->getName());
Value *NewAnd = Builder->CreateAnd(Shift->getOperand(0), NewShift);
Cmp.setOperand(0, NewAnd);
return &Cmp;
}
@ -1529,7 +1499,7 @@ Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp,
}
}
if (Instruction *I = foldICmpAndShift(Cmp, And, C1))
if (Instruction *I = foldICmpAndShift(Cmp, And, C1, C2))
return I;
// (icmp pred (and (or (lshr A, B), A), 1), 0) -->

View File

@ -582,7 +582,7 @@ private:
Instruction *foldICmpAndConstConst(ICmpInst &Cmp, BinaryOperator *And,
const APInt *C1);
Instruction *foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
const APInt *C1);
const APInt *C1, const APInt *C2);
Instruction *foldICmpEqualityWithConstant(ICmpInst &ICI);
Instruction *foldICmpIntrinsicWithConstant(ICmpInst &ICI);

View File

@ -205,11 +205,9 @@ define i1 @test16(i84 %X) {
ret i1 %cmp
}
; FIXME: Vectors should fold too.
define <2 x i1> @test16vec(<2 x i84> %X) {
; CHECK-LABEL: @test16vec(
; CHECK-NEXT: [[SHR1:%.*]] = lshr <2 x i84> %X, <i84 4, i84 4>
; CHECK-NEXT: [[AND:%.*]] = and <2 x i84> [[SHR1]], <i84 1, i84 1>
; CHECK-NEXT: [[AND:%.*]] = and <2 x i84> %X, <i84 16, i84 16>
; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i84> [[AND]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;

View File

@ -25,12 +25,11 @@ define i1 @test1(i799 %X, i799 %A) {
ret i1 %D
}
; FIXME: Vectors should fold the same way.
define <2 x i1> @test0vec(<2 x i39> %X, <2 x i39> %A) {
; CHECK-LABEL: @test0vec(
; CHECK-NEXT: [[B:%.*]] = lshr <2 x i39> %X, %A
; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i39> [[B]], <i39 1, i39 1>
; CHECK-NEXT: [[D:%.*]] = icmp ne <2 x i39> [[TMP1]], zeroinitializer
; CHECK-NEXT: [[TMP1:%.*]] = shl <2 x i39> <i39 1, i39 1>, %A
; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i39> [[TMP1]], %X
; CHECK-NEXT: [[D:%.*]] = icmp ne <2 x i39> [[TMP2]], zeroinitializer
; CHECK-NEXT: ret <2 x i1> [[D]]
;
%B = lshr <2 x i39> %X, %A

View File

@ -1521,12 +1521,10 @@ define i1 @icmp_add_and_shr_ne_0(i32 %X) {
ret i1 %tobool
}
; FIXME: Vectors should fold the same way.
define <2 x i1> @icmp_add_and_shr_ne_0_vec(<2 x i32> %X) {
; CHECK-LABEL: @icmp_add_and_shr_ne_0_vec(
; CHECK-NEXT: [[SHR:%.*]] = lshr <2 x i32> %X, <i32 4, i32 4>
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[SHR]], <i32 15, i32 15>
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne <2 x i32> [[AND]], <i32 14, i32 14>
; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> %X, <i32 240, i32 240>
; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne <2 x i32> [[AND]], <i32 224, i32 224>
; CHECK-NEXT: ret <2 x i1> [[TOBOOL]]
;
%shr = lshr <2 x i32> %X, <i32 4, i32 4>

View File

@ -80,12 +80,11 @@ define i1 @test_shift_and_cmp_changed2(i8 %p) {
ret i1 %cmp
}
; FIXME: Vectors should fold the same way.
; FIXME: icmp ult X, 1 -> icmp eq X, 0
define <2 x i1> @test_shift_and_cmp_changed2_vec(<2 x i8> %p) {
; CHECK-LABEL: @test_shift_and_cmp_changed2_vec(
; CHECK-NEXT: [[SHLP:%.*]] = shl <2 x i8> %p, <i8 5, i8 5>
; CHECK-NEXT: [[ANDP:%.*]] = and <2 x i8> [[SHLP]], <i8 -64, i8 -64>
; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i8> [[ANDP]], <i8 32, i8 32>
; CHECK-NEXT: [[ANDP:%.*]] = and <2 x i8> %p, <i8 6, i8 6>
; CHECK-NEXT: [[CMP:%.*]] = icmp ult <2 x i8> [[ANDP]], <i8 1, i8 1>
; CHECK-NEXT: ret <2 x i1> [[CMP]]
;
%shlp = shl <2 x i8> %p, <i8 5, i8 5>