From 4c45e36dd832ef0c400bb650076cf946285760b4 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Thu, 22 Jun 2017 15:46:54 +0000 Subject: [PATCH] [InstCombine] reverse bitcast + bitwise-logic canonicalization (PR33138) There are 2 parts to this patch made simultaneously to avoid a regression. We're reversing the canonicalization that moves bitwise vector ops before bitcasts. We're moving bitwise vector ops *after* bitcasts instead. That's the 1st and 3rd hunks of the patch. The motivation is that there's only one fold that currently depends on the existing canonicalization (see next), but there are many folds that would automatically benefit from the new canonicalization. PR33138 ( https://bugs.llvm.org/show_bug.cgi?id=33138 ) shows why/how we have these patterns in IR. There's an or(and,andn) pattern that requires an adjustment in order to continue matching to 'select' because the bitcast changes position. This match is unfortunately complicated because it requires 4 logic ops with optional bitcast and sext ops. Test diffs: 1. The bitcast.ll and bitcast-bigendian.ll changes show the most basic difference - bitcast comes before logic. 2. There are also tests with no diffs in bitcast.ll that verify that we're still doing folds that were enabled by the previous canonicalization. 3. icmp-xor-signbit.ll shows the payoff. We don't need to adjust existing icmp patterns to look through bitcasts. 4. logical-select.ll contains several tests for the or(and,andn) --> select fold to verify that we are still handling those cases. The lone diff shows the movement of the bitcast from the new canonicalization rule. Differential Revision: https://reviews.llvm.org/D33517 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@306011 91177308-0d34-0410-b5e6-96231b3b80d8 --- .../InstCombine/InstCombineAndOrXor.cpp | 26 +++++++------------ .../InstCombine/InstCombineCasts.cpp | 12 +++++++++ .../InstCombine/bitcast-bigendian.ll | 18 ++++++------- test/Transforms/InstCombine/bitcast.ll | 26 +++++++++---------- .../InstCombine/icmp-xor-signbit.ll | 21 +++++---------- test/Transforms/InstCombine/logical-select.ll | 4 +-- 6 files changed, 52 insertions(+), 55 deletions(-) diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index 2de83a01062..98e3fde95b3 100644 --- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -1097,20 +1097,11 @@ static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast, Type *DestTy = Logic.getType(); Type *SrcTy = Cast->getSrcTy(); - // If the first operand is bitcast, move the logic operation ahead of the - // bitcast (do the logic operation in the original type). This can eliminate - // bitcasts and allow combines that would otherwise be impeded by the bitcast. + // Move the logic operation ahead of a zext if the constant is unchanged in + // the smaller source type. Performing the logic in a smaller type may provide + // more information to later folds, and the smaller logic instruction may be + // cheaper (particularly in the case of vectors). Value *X; - if (match(Cast, m_BitCast(m_Value(X)))) { - Value *NewConstant = ConstantExpr::getBitCast(C, SrcTy); - Value *NewOp = Builder->CreateBinOp(LogicOpc, X, NewConstant); - return CastInst::CreateBitOrPointerCast(NewOp, DestTy); - } - - // Similarly, move the logic operation ahead of a zext if the constant is - // unchanged in the smaller source type. Performing the logic in a smaller - // type may provide more information to later folds, and the smaller logic - // instruction may be cheaper (particularly in the case of vectors). if (match(Cast, m_OneUse(m_ZExt(m_Value(X))))) { Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy); Constant *ZextTruncC = ConstantExpr::getZExt(TruncC, DestTy); @@ -1579,11 +1570,14 @@ static Value *getSelectCondition(Value *A, Value *B, // If A and B are sign-extended, look through the sexts to find the booleans. Value *Cond; + Value *NotB; if (match(A, m_SExt(m_Value(Cond))) && Cond->getType()->getScalarType()->isIntegerTy(1) && - match(B, m_CombineOr(m_Not(m_SExt(m_Specific(Cond))), - m_SExt(m_Not(m_Specific(Cond)))))) - return Cond; + match(B, m_OneUse(m_Not(m_Value(NotB))))) { + NotB = peekThroughBitcast(NotB, true); + if (match(NotB, m_SExt(m_Specific(Cond)))) + return Cond; + } // All scalar (and most vector) possibilities should be handled now. // Try more matches that only apply to non-splat constant vectors. diff --git a/lib/Transforms/InstCombine/InstCombineCasts.cpp b/lib/Transforms/InstCombine/InstCombineCasts.cpp index 38e95fb1163..d3049389dfb 100644 --- a/lib/Transforms/InstCombine/InstCombineCasts.cpp +++ b/lib/Transforms/InstCombine/InstCombineCasts.cpp @@ -1896,6 +1896,18 @@ static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast, return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X); } + // Canonicalize vector bitcasts to come before vector bitwise logic with a + // constant. This eases recognition of special constants for later ops. + // Example: + // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b + Constant *C; + if (match(BO->getOperand(1), m_Constant(C))) { + // bitcast (logic X, C) --> logic (bitcast X, C') + Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy); + Value *CastedC = ConstantExpr::getBitCast(C, DestTy); + return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC); + } + return nullptr; } diff --git a/test/Transforms/InstCombine/bitcast-bigendian.ll b/test/Transforms/InstCombine/bitcast-bigendian.ll index e940f0fcec7..0001fab8c16 100644 --- a/test/Transforms/InstCombine/bitcast-bigendian.ll +++ b/test/Transforms/InstCombine/bitcast-bigendian.ll @@ -92,12 +92,12 @@ define <2 x float> @test6(float %A){ ret <2 x float> %tmp35 } -; Verify that 'xor' of vector and constant is done as a vector bitwise op before the bitcast. +; No change. Bitcasts are canonicalized above bitwise logic. define <2 x i32> @xor_bitcast_vec_to_vec(<1 x i64> %a) { ; CHECK-LABEL: @xor_bitcast_vec_to_vec( -; CHECK-NEXT: [[TMP1:%.*]] = xor <1 x i64> [[A:%.*]], -; CHECK-NEXT: [[T2:%.*]] = bitcast <1 x i64> [[TMP1]] to <2 x i32> +; CHECK-NEXT: [[T1:%.*]] = bitcast <1 x i64> [[A:%.*]] to <2 x i32> +; CHECK-NEXT: [[T2:%.*]] = xor <2 x i32> [[T1]], ; CHECK-NEXT: ret <2 x i32> [[T2]] ; %t1 = bitcast <1 x i64> %a to <2 x i32> @@ -105,12 +105,12 @@ define <2 x i32> @xor_bitcast_vec_to_vec(<1 x i64> %a) { ret <2 x i32> %t2 } -; Verify that 'and' of integer and constant is done as a vector bitwise op before the bitcast. +; No change. Bitcasts are canonicalized above bitwise logic. define i64 @and_bitcast_vec_to_int(<2 x i32> %a) { ; CHECK-LABEL: @and_bitcast_vec_to_int( -; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], -; CHECK-NEXT: [[T2:%.*]] = bitcast <2 x i32> [[TMP1]] to i64 +; CHECK-NEXT: [[T1:%.*]] = bitcast <2 x i32> [[A:%.*]] to i64 +; CHECK-NEXT: [[T2:%.*]] = and i64 [[T1]], 3 ; CHECK-NEXT: ret i64 [[T2]] ; %t1 = bitcast <2 x i32> %a to i64 @@ -118,12 +118,12 @@ define i64 @and_bitcast_vec_to_int(<2 x i32> %a) { ret i64 %t2 } -; Verify that 'or' of vector and constant is done as an integer bitwise op before the bitcast. +; No change. Bitcasts are canonicalized above bitwise logic. define <2 x i32> @or_bitcast_int_to_vec(i64 %a) { ; CHECK-LABEL: @or_bitcast_int_to_vec( -; CHECK-NEXT: [[TMP1:%.*]] = or i64 [[A:%.*]], 4294967298 -; CHECK-NEXT: [[T2:%.*]] = bitcast i64 [[TMP1]] to <2 x i32> +; CHECK-NEXT: [[T1:%.*]] = bitcast i64 [[A:%.*]] to <2 x i32> +; CHECK-NEXT: [[T2:%.*]] = or <2 x i32> [[T1]], ; CHECK-NEXT: ret <2 x i32> [[T2]] ; %t1 = bitcast i64 %a to <2 x i32> diff --git a/test/Transforms/InstCombine/bitcast.ll b/test/Transforms/InstCombine/bitcast.ll index 4cf3f27ab01..0f0cbdb364a 100644 --- a/test/Transforms/InstCombine/bitcast.ll +++ b/test/Transforms/InstCombine/bitcast.ll @@ -31,12 +31,12 @@ define <2 x i32> @xor_two_vector_bitcasts(<1 x i64> %a, <1 x i64> %b) { ret <2 x i32> %t3 } -; Verify that 'xor' of vector and constant is done as a vector bitwise op before the bitcast. +; No change. Bitcasts are canonicalized above bitwise logic. define <2 x i32> @xor_bitcast_vec_to_vec(<1 x i64> %a) { ; CHECK-LABEL: @xor_bitcast_vec_to_vec( -; CHECK-NEXT: [[TMP1:%.*]] = xor <1 x i64> [[A:%.*]], -; CHECK-NEXT: [[T2:%.*]] = bitcast <1 x i64> [[TMP1]] to <2 x i32> +; CHECK-NEXT: [[T1:%.*]] = bitcast <1 x i64> [[A:%.*]] to <2 x i32> +; CHECK-NEXT: [[T2:%.*]] = xor <2 x i32> [[T1]], ; CHECK-NEXT: ret <2 x i32> [[T2]] ; %t1 = bitcast <1 x i64> %a to <2 x i32> @@ -44,12 +44,12 @@ define <2 x i32> @xor_bitcast_vec_to_vec(<1 x i64> %a) { ret <2 x i32> %t2 } -; Verify that 'and' of integer and constant is done as a vector bitwise op before the bitcast. +; No change. Bitcasts are canonicalized above bitwise logic. define i64 @and_bitcast_vec_to_int(<2 x i32> %a) { ; CHECK-LABEL: @and_bitcast_vec_to_int( -; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], -; CHECK-NEXT: [[T2:%.*]] = bitcast <2 x i32> [[TMP1]] to i64 +; CHECK-NEXT: [[T1:%.*]] = bitcast <2 x i32> [[A:%.*]] to i64 +; CHECK-NEXT: [[T2:%.*]] = and i64 [[T1]], 3 ; CHECK-NEXT: ret i64 [[T2]] ; %t1 = bitcast <2 x i32> %a to i64 @@ -57,12 +57,12 @@ define i64 @and_bitcast_vec_to_int(<2 x i32> %a) { ret i64 %t2 } -; Verify that 'or' of vector and constant is done as an integer bitwise op before the bitcast. +; No change. Bitcasts are canonicalized above bitwise logic. define <2 x i32> @or_bitcast_int_to_vec(i64 %a) { ; CHECK-LABEL: @or_bitcast_int_to_vec( -; CHECK-NEXT: [[TMP1:%.*]] = or i64 [[A:%.*]], 8589934593 -; CHECK-NEXT: [[T2:%.*]] = bitcast i64 [[TMP1]] to <2 x i32> +; CHECK-NEXT: [[T1:%.*]] = bitcast i64 [[A:%.*]] to <2 x i32> +; CHECK-NEXT: [[T2:%.*]] = or <2 x i32> [[T1]], ; CHECK-NEXT: ret <2 x i32> [[T2]] ; %t1 = bitcast i64 %a to <2 x i32> @@ -71,7 +71,7 @@ define <2 x i32> @or_bitcast_int_to_vec(i64 %a) { } ; PR26702 - https://bugs.llvm.org//show_bug.cgi?id=26702 -; Bitcast is canonicalized below logic, so we can see the not-not pattern. +; Bitcast is canonicalized above logic, so we can see the not-not pattern. define <2 x i64> @is_negative(<4 x i32> %x) { ; CHECK-LABEL: @is_negative( @@ -102,12 +102,12 @@ define <4 x i32> @is_negative_bonus_bitcast(<4 x i32> %x) { ret <4 x i32> %bc2 } -; Negative test: bitcasts are canonicalized below bitwise logic. No changes here. +; Bitcasts are canonicalized above bitwise logic. define <2 x i8> @canonicalize_bitcast_logic_with_constant(<4 x i4> %x) { ; CHECK-LABEL: @canonicalize_bitcast_logic_with_constant( -; CHECK-NEXT: [[A:%.*]] = and <4 x i4> %x, -; CHECK-NEXT: [[B:%.*]] = bitcast <4 x i4> [[A]] to <2 x i8> +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i4> [[X:%.*]] to <2 x i8> +; CHECK-NEXT: [[B:%.*]] = and <2 x i8> [[TMP1]], ; CHECK-NEXT: ret <2 x i8> [[B]] ; %a = and <4 x i4> %x, diff --git a/test/Transforms/InstCombine/icmp-xor-signbit.ll b/test/Transforms/InstCombine/icmp-xor-signbit.ll index 30a9668f37d..dab9b5e9fef 100644 --- a/test/Transforms/InstCombine/icmp-xor-signbit.ll +++ b/test/Transforms/InstCombine/icmp-xor-signbit.ll @@ -188,16 +188,13 @@ define <2 x i1> @uge_to_slt_splat(<2 x i8> %x) { } ; PR33138, part 2: https://bugs.llvm.org/show_bug.cgi?id=33138 -; TODO: We could look through vector bitcasts for icmp folds, -; or we could canonicalize bitcast ahead of logic ops with constants. +; Bitcast canonicalization ensures that we recognize the signbit constant. define <8 x i1> @sgt_to_ugt_bitcasted_splat(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @sgt_to_ugt_bitcasted_splat( -; CHECK-NEXT: [[A:%.*]] = xor <2 x i32> %x, -; CHECK-NEXT: [[B:%.*]] = xor <2 x i32> %y, -; CHECK-NEXT: [[C:%.*]] = bitcast <2 x i32> [[A]] to <8 x i8> -; CHECK-NEXT: [[D:%.*]] = bitcast <2 x i32> [[B]] to <8 x i8> -; CHECK-NEXT: [[E:%.*]] = icmp sgt <8 x i8> [[C]], [[D]] +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x i32> %x to <8 x i8> +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <2 x i32> %y to <8 x i8> +; CHECK-NEXT: [[E:%.*]] = icmp ugt <8 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret <8 x i1> [[E]] ; %a = xor <2 x i32> %x, ; 0x80808080 @@ -208,17 +205,11 @@ define <8 x i1> @sgt_to_ugt_bitcasted_splat(<2 x i32> %x, <2 x i32> %y) { ret <8 x i1> %e } -; TODO: This is false (little-endian). How should that be recognized? -; Ie, should InstSimplify know this directly, should InstCombine canonicalize -; this so InstSimplify can know this, or is that not something that we want -; either pass to recognize? +; Bitcast canonicalization ensures that we recognize the signbit constant. define <2 x i1> @negative_simplify_splat(<4 x i8> %x) { ; CHECK-LABEL: @negative_simplify_splat( -; CHECK-NEXT: [[A:%.*]] = or <4 x i8> %x, -; CHECK-NEXT: [[B:%.*]] = bitcast <4 x i8> [[A]] to <2 x i16> -; CHECK-NEXT: [[C:%.*]] = icmp sgt <2 x i16> [[B]], zeroinitializer -; CHECK-NEXT: ret <2 x i1> [[C]] +; CHECK-NEXT: ret <2 x i1> zeroinitializer ; %a = or <4 x i8> %x, %b = bitcast <4 x i8> %a to <2 x i16> diff --git a/test/Transforms/InstCombine/logical-select.ll b/test/Transforms/InstCombine/logical-select.ll index 7f0bd23eb8a..6c00dec60ed 100644 --- a/test/Transforms/InstCombine/logical-select.ll +++ b/test/Transforms/InstCombine/logical-select.ll @@ -342,8 +342,8 @@ define <2 x i64> @bitcast_select_multi_uses(<4 x i1> %cmp, <2 x i64> %a, <2 x i6 ; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> %cmp to <4 x i32> ; CHECK-NEXT: [[BC1:%.*]] = bitcast <4 x i32> [[SEXT]] to <2 x i64> ; CHECK-NEXT: [[AND1:%.*]] = and <2 x i64> [[BC1]], %a -; CHECK-NEXT: [[NEG:%.*]] = xor <4 x i32> [[SEXT]], -; CHECK-NEXT: [[BC2:%.*]] = bitcast <4 x i32> [[NEG]] to <2 x i64> +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[SEXT]] to <2 x i64> +; CHECK-NEXT: [[BC2:%.*]] = xor <2 x i64> [[TMP1]], ; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[BC2]], %b ; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[AND2]], [[AND1]] ; CHECK-NEXT: [[ADD:%.*]] = add <2 x i64> [[AND2]], [[BC2]]