diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 1af36086ad9..5b8c645f824 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -886,18 +886,21 @@ static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm, // Create the new constant immediate node. EVT VT = Op.getValueType(); SDLoc DL(Op); + SDValue New; // If the new constant immediate is all-zeros or all-ones, let the target // independent DAG combine optimize this node. - if (NewImm == 0 || NewImm == OrigMask) - return TLO.CombineTo(Op.getOperand(1), TLO.DAG.getConstant(NewImm, DL, VT)); - + if (NewImm == 0 || NewImm == OrigMask) { + New = TLO.DAG.getNode(Op.getOpcode(), DL, VT, Op.getOperand(0), + TLO.DAG.getConstant(NewImm, DL, VT)); // Otherwise, create a machine node so that target independent DAG combine // doesn't undo this optimization. - Enc = AArch64_AM::encodeLogicalImmediate(NewImm, Size); - SDValue EncConst = TLO.DAG.getTargetConstant(Enc, DL, VT); - SDValue New( - TLO.DAG.getMachineNode(NewOpc, DL, VT, Op.getOperand(0), EncConst), 0); + } else { + Enc = AArch64_AM::encodeLogicalImmediate(NewImm, Size); + SDValue EncConst = TLO.DAG.getTargetConstant(Enc, DL, VT); + New = SDValue( + TLO.DAG.getMachineNode(NewOpc, DL, VT, Op.getOperand(0), EncConst), 0); + } return TLO.CombineTo(Op, New); } diff --git a/test/CodeGen/AArch64/optimize-imm.ll b/test/CodeGen/AArch64/optimize-imm.ll index a4725c65aa2..f960a3a95fc 100644 --- a/test/CodeGen/AArch64/optimize-imm.ll +++ b/test/CodeGen/AArch64/optimize-imm.ll @@ -62,3 +62,22 @@ entry: %and = xor i32 %xor, 56 ret i32 %and } + +; Check that, when (and %t1, 129) is transformed to (and %t0, 0), +; (xor %arg, 129) doesn't get transformed to (xor %arg, 0). +; +; CHECK-LABEL: PR33100: +; CHECK: mov w[[R0:[0-9]+]], #129 +; CHECK: eor {{x[0-9]+}}, {{x[0-9]+}}, x[[R0]] + +define i64 @PR33100(i64 %arg) { +entry: + %alloca0 = alloca i64 + store i64 8, i64* %alloca0, align 4 + %t0 = load i64, i64* %alloca0, align 4 + %t1 = shl i64 %arg, %t0 + %and0 = and i64 %t1, 129 + %xor0 = xor i64 %arg, 129 + %t2 = add i64 %and0, %xor0 + ret i64 %t2 +}