From e9f15e538a87dcc92880ebd1ca5c3e88f1e1cb53 Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Wed, 26 Oct 2005 17:18:16 +0000 Subject: [PATCH] fold nested and's early to avoid inefficiencies in MaskedValueIsZero. This fixes a very slow compile in PR639. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@24011 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Transforms/Scalar/InstructionCombining.cpp | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/Transforms/Scalar/InstructionCombining.cpp b/lib/Transforms/Scalar/InstructionCombining.cpp index 9ba0512a78b..f3325096895 100644 --- a/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/lib/Transforms/Scalar/InstructionCombining.cpp @@ -1725,6 +1725,15 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) { // and X, -1 == X if (AndRHS->isAllOnesValue()) return ReplaceInstUsesWith(I, Op0); + + // and (and X, c1), c2 -> and (x, c1&c2). Handle this case here, before + // calling MaskedValueIsZero, to avoid inefficient cases where we traipse + // through many levels of ands. + { + Value *X; ConstantInt *C1; + if (match(Op0, m_And(m_Value(X), m_ConstantInt(C1)))) + return BinaryOperator::createAnd(X, ConstantExpr::getAnd(C1, AndRHS)); + } if (MaskedValueIsZero(Op0, AndRHS)) // LHS & RHS == 0 return ReplaceInstUsesWith(I, Constant::getNullValue(I.getType()));