From c2b879fcfe3834597948d5dd6044a3f32baee275 Mon Sep 17 00:00:00 2001 From: Jim Grosbach Date: Sat, 31 Oct 2009 19:38:01 +0000 Subject: [PATCH] Expand 64 bit left shift inline rather than using the libcall. For now, this is unconditional. Making it still use the libcall when optimizing for size would be a good adjustment. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@85675 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/ARM/ARMISelLowering.cpp | 37 +++++++++++++++++++++++++++++- test/CodeGen/ARM/long_shift.ll | 2 +- 2 files changed, 37 insertions(+), 2 deletions(-) diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index 07bee3e9301..6263fbb825f 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -330,7 +330,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) if (!Subtarget->hasV6Ops()) setOperationAction(ISD::MULHS, MVT::i32, Expand); } - setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand); + setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom); setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand); setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand); setOperationAction(ISD::SRL, MVT::i64, Custom); @@ -2096,6 +2096,40 @@ static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); } +/// LowerShiftLeftParts - Lower SHL_PARTS, which returns two +/// i32 values and take a 2 x i32 value to shift plus a shift amount. +static SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG, + const ARMSubtarget *ST) { + assert(Op.getNumOperands() == 3 && "Not a double-shift!"); + EVT VT = Op.getValueType(); + unsigned VTBits = VT.getSizeInBits(); + DebugLoc dl = Op.getDebugLoc(); + SDValue ShOpLo = Op.getOperand(0); + SDValue ShOpHi = Op.getOperand(1); + SDValue ShAmt = Op.getOperand(2); + SDValue ARMCC; + + assert(Op.getOpcode() == ISD::SHL_PARTS); + SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, + DAG.getConstant(VTBits, MVT::i32), ShAmt); + SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); + SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt, + DAG.getConstant(VTBits, MVT::i32)); + SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); + SDValue Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); + + SDValue FalseVal = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2); + SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32); + SDValue Cmp = getARMCmp(ExtraShAmt, DAG.getConstant(0, MVT::i32), ISD::SETGE, + ARMCC, DAG, ST->isThumb1Only(), dl); + SDValue Lo = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); + SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, Tmp3, ARMCC, + CCR, Cmp); + + SDValue Ops[2] = { Lo, Hi }; + return DAG.getMergeValues(Ops, 2, dl); +} + static SDValue LowerShift(SDNode *N, SelectionDAG &DAG, const ARMSubtarget *ST) { EVT VT = N->getValueType(0); @@ -2788,6 +2822,7 @@ SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) { case ISD::SHL: case ISD::SRL: case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget); + case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG, Subtarget); case ISD::VSETCC: return LowerVSETCC(Op, DAG); case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG); diff --git a/test/CodeGen/ARM/long_shift.ll b/test/CodeGen/ARM/long_shift.ll index 6dbd781a7cc..e72ae785124 100644 --- a/test/CodeGen/ARM/long_shift.ll +++ b/test/CodeGen/ARM/long_shift.ll @@ -11,7 +11,7 @@ define i64 @f0(i64 %A, i64 %B) { define i32 @f1(i64 %x, i64 %y) { ; CHECK: f1 -; CHECK: __ashldi3 +; CHECK: mov r0, r0, lsl r2 %a = shl i64 %x, %y %b = trunc i64 %a to i32 ret i32 %b