diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index bc788f6b5ce..c589b06d1ec 100644 --- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -2742,33 +2742,24 @@ static Instruction *foldXorToXor(BinaryOperator &I, // (A | B) ^ (A & B) -> A ^ B // (A | B) ^ (B & A) -> A ^ B if (match(&I, m_c_Xor(m_And(m_Value(A), m_Value(B)), - m_c_Or(m_Deferred(A), m_Deferred(B))))) { - I.setOperand(0, A); - I.setOperand(1, B); - return &I; - } + m_c_Or(m_Deferred(A), m_Deferred(B))))) + return BinaryOperator::CreateXor(A, B); // (A | ~B) ^ (~A | B) -> A ^ B // (~B | A) ^ (~A | B) -> A ^ B // (~A | B) ^ (A | ~B) -> A ^ B // (B | ~A) ^ (A | ~B) -> A ^ B if (match(&I, m_Xor(m_c_Or(m_Value(A), m_Not(m_Value(B))), - m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B))))) { - I.setOperand(0, A); - I.setOperand(1, B); - return &I; - } + m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B))))) + return BinaryOperator::CreateXor(A, B); // (A & ~B) ^ (~A & B) -> A ^ B // (~B & A) ^ (~A & B) -> A ^ B // (~A & B) ^ (A & ~B) -> A ^ B // (B & ~A) ^ (A & ~B) -> A ^ B if (match(&I, m_Xor(m_c_And(m_Value(A), m_Not(m_Value(B))), - m_c_And(m_Not(m_Deferred(A)), m_Deferred(B))))) { - I.setOperand(0, A); - I.setOperand(1, B); - return &I; - } + m_c_And(m_Not(m_Deferred(A)), m_Deferred(B))))) + return BinaryOperator::CreateXor(A, B); // For the remaining cases we need to get rid of one of the operands. if (!Op0->hasOneUse() && !Op1->hasOneUse()) @@ -3109,9 +3100,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) { MaskedValueIsZero(X, *C, 0, &I)) { Constant *NewC = ConstantInt::get(I.getType(), *C ^ *RHSC); Worklist.push(cast(Op0)); - I.setOperand(0, X); - I.setOperand(1, NewC); - return &I; + return BinaryOperator::CreateXor(X, NewC); } } } diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp index e0c90b24041..d3ea079d4b8 100644 --- a/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -1680,12 +1680,11 @@ Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And, if (Cmp.getPredicate() == ICmpInst::ICMP_NE) return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType())); } else { - Cmp.setOperand(1, ConstantInt::get(And->getType(), NewCst)); APInt NewAndCst = IsShl ? C2.lshr(*C3) : C2.shl(*C3); - And->setOperand(1, ConstantInt::get(And->getType(), NewAndCst)); - And->setOperand(0, Shift->getOperand(0)); - Worklist.push(Shift); // Shift is dead. - return &Cmp; + Value *NewAnd = Builder.CreateAnd( + Shift->getOperand(0), ConstantInt::get(And->getType(), NewAndCst)); + return new ICmpInst(Cmp.getPredicate(), + NewAnd, ConstantInt::get(And->getType(), NewCst)); } } } @@ -4154,9 +4153,7 @@ Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) { if (X) { // Build (X^Y) & Z Op1 = Builder.CreateXor(X, Y); Op1 = Builder.CreateAnd(Op1, Z); - I.setOperand(0, Op1); - I.setOperand(1, Constant::getNullValue(Op1->getType())); - return &I; + return new ICmpInst(Pred, Op1, Constant::getNullValue(Op1->getType())); } } diff --git a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp index 516da231bb2..4277774cff2 100644 --- a/lib/Transforms/InstCombine/InstCombineVectorOps.cpp +++ b/lib/Transforms/InstCombine/InstCombineVectorOps.cpp @@ -1917,10 +1917,8 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) { else Elts.push_back(ConstantInt::get(Int32Ty, Mask[i] % LHSWidth)); } - SVI.setOperand(0, SVI.getOperand(1)); - SVI.setOperand(1, UndefValue::get(RHS->getType())); - SVI.setOperand(2, ConstantVector::get(Elts)); - return &SVI; + return new ShuffleVectorInst(LHS, UndefValue::get(RHS->getType()), + ConstantVector::get(Elts)); } // shuffle undef, x, mask --> shuffle x, undef, mask' diff --git a/test/Transforms/InstCombine/icmp-custom-dl.ll b/test/Transforms/InstCombine/icmp-custom-dl.ll index 6b7cb1cdd4b..09a3b2b5ff4 100644 --- a/test/Transforms/InstCombine/icmp-custom-dl.ll +++ b/test/Transforms/InstCombine/icmp-custom-dl.ll @@ -183,10 +183,10 @@ define i1 @test62_as1(i8 addrspace(1)* %a) { ; Variation of the above with an ashr define i1 @icmp_and_ashr_multiuse(i32 %X) { ; CHECK-LABEL: @icmp_and_ashr_multiuse( -; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 240 -; CHECK-NEXT: [[AND2:%.*]] = and i32 [[X]], 496 -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[AND]], 224 -; CHECK-NEXT: [[TOBOOL2:%.*]] = icmp ne i32 [[AND2]], 432 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 240 +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 224 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X]], 496 +; CHECK-NEXT: [[TOBOOL2:%.*]] = icmp ne i32 [[TMP2]], 432 ; CHECK-NEXT: [[AND3:%.*]] = and i1 [[TOBOOL]], [[TOBOOL2]] ; CHECK-NEXT: ret i1 [[AND3]] ; diff --git a/test/Transforms/InstCombine/icmp.ll b/test/Transforms/InstCombine/icmp.ll index 4ee2d4ec154..6a213fb2986 100644 --- a/test/Transforms/InstCombine/icmp.ll +++ b/test/Transforms/InstCombine/icmp.ll @@ -98,8 +98,8 @@ define <2 x i1> @test5_zero() { define i32 @test6(i32 %a, i32 %b) { ; CHECK-LABEL: @test6( -; CHECK-NEXT: [[E:%.*]] = ashr i32 [[A:%.*]], 31 -; CHECK-NEXT: [[F:%.*]] = and i32 [[E]], [[B:%.*]] +; CHECK-NEXT: [[A_LOBIT_NEG:%.*]] = ashr i32 [[A:%.*]], 31 +; CHECK-NEXT: [[F:%.*]] = and i32 [[A_LOBIT_NEG]], [[B:%.*]] ; CHECK-NEXT: ret i32 [[F]] ; %c = icmp sle i32 %a, -1 @@ -1775,8 +1775,8 @@ define i1 @icmp_and_shl_neg_eq_0(i32 %A, i32 %B) { define i1 @icmp_add_and_shr_ne_0(i32 %X) { ; CHECK-LABEL: @icmp_add_and_shr_ne_0( -; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 240 -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[AND]], 224 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 240 +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 224 ; CHECK-NEXT: ret i1 [[TOBOOL]] ; %shr = lshr i32 %X, 4 @@ -1788,8 +1788,8 @@ define i1 @icmp_add_and_shr_ne_0(i32 %X) { define <2 x i1> @icmp_add_and_shr_ne_0_vec(<2 x i32> %X) { ; CHECK-LABEL: @icmp_add_and_shr_ne_0_vec( -; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne <2 x i32> [[AND]], +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[X:%.*]], +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne <2 x i32> [[TMP1]], ; CHECK-NEXT: ret <2 x i1> [[TOBOOL]] ; %shr = lshr <2 x i32> %X, @@ -1802,10 +1802,10 @@ define <2 x i1> @icmp_add_and_shr_ne_0_vec(<2 x i32> %X) { ; Variation of the above with an extra use of the shift define i1 @icmp_and_shr_multiuse(i32 %X) { ; CHECK-LABEL: @icmp_and_shr_multiuse( -; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 240 -; CHECK-NEXT: [[AND2:%.*]] = and i32 [[X]], 496 -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[AND]], 224 -; CHECK-NEXT: [[TOBOOL2:%.*]] = icmp ne i32 [[AND2]], 432 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 240 +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 224 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X]], 496 +; CHECK-NEXT: [[TOBOOL2:%.*]] = icmp ne i32 [[TMP2]], 432 ; CHECK-NEXT: [[AND3:%.*]] = and i1 [[TOBOOL]], [[TOBOOL2]] ; CHECK-NEXT: ret i1 [[AND3]] ; @@ -1821,10 +1821,10 @@ define i1 @icmp_and_shr_multiuse(i32 %X) { ; Variation of the above with an ashr define i1 @icmp_and_ashr_multiuse(i32 %X) { ; CHECK-LABEL: @icmp_and_ashr_multiuse( -; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 240 -; CHECK-NEXT: [[AND2:%.*]] = and i32 [[X]], 496 -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[AND]], 224 -; CHECK-NEXT: [[TOBOOL2:%.*]] = icmp ne i32 [[AND2]], 432 +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], 240 +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 224 +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X]], 496 +; CHECK-NEXT: [[TOBOOL2:%.*]] = icmp ne i32 [[TMP2]], 432 ; CHECK-NEXT: [[AND3:%.*]] = and i1 [[TOBOOL]], [[TOBOOL2]] ; CHECK-NEXT: ret i1 [[AND3]] ;