R600: Swap the legality of rotl and rotr

The hardware supports rotr and not rotl.

llvm-svn: 182285
This commit is contained in:
Tom Stellard 2013-05-20 15:02:19 +00:00
parent 1609774b15
commit 5ca265d214
8 changed files with 40 additions and 28 deletions

View File

@ -46,6 +46,9 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) :
setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
setOperationAction(ISD::FRINT, MVT::f32, Legal);
// The hardware supports ROTR, but not ROTL
setOperationAction(ISD::ROTL, MVT::i32, Expand);
// Lower floating point store/load to integer store/load to reduce the number
// of patterns in tablegen.
setOperationAction(ISD::STORE, MVT::f32, Promote);

View File

@ -115,7 +115,6 @@ enum {
RET_FLAG,
BRANCH_COND,
// End AMDIL ISD Opcodes
BITALIGN,
BUFFER_STORE,
DWORDADDR,
FRACT,

View File

@ -23,12 +23,6 @@ def AMDGPUDTIntTernaryOp : SDTypeProfile<1, 3, [
// AMDGPU DAG Nodes
//
// out = ((a << 32) | b) >> c)
//
// Can be used to optimize rtol:
// rotl(a, b) = bitalign(a, a, 32 - b)
def AMDGPUbitalign : SDNode<"AMDGPUISD::BITALIGN", AMDGPUDTIntTernaryOp>;
// This argument to this node is a dword address.
def AMDGPUdwordaddr : SDNode<"AMDGPUISD::DWORDADDR", SDTIntUnaryOp>;

View File

@ -295,6 +295,12 @@ class BFEPattern <Instruction BFE> : Pat <
(BFE $x, $y, $z)
>;
// rotr pattern
class ROTRPattern <Instruction BIT_ALIGN> : Pat <
(rotr i32:$src0, i32:$src1),
(BIT_ALIGN $src0, $src0, $src1)
>;
include "R600Instructions.td"
include "SIInstrInfo.td"

View File

@ -138,8 +138,6 @@ void AMDGPUTargetLowering::InitAMDILLowering() {
setOperationAction(ISD::SMUL_LOHI, VT, Expand);
setOperationAction(ISD::UMUL_LOHI, VT, Expand);
// GPU doesn't have a rotl, rotr, or byteswap instruction
setOperationAction(ISD::ROTR, VT, Expand);
setOperationAction(ISD::BSWAP, VT, Expand);
// GPU doesn't have any counting operators

View File

@ -72,8 +72,6 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) :
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i1, Custom);
setOperationAction(ISD::ROTL, MVT::i32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
@ -480,7 +478,6 @@ using namespace llvm::AMDGPUIntrinsic;
SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
switch (Op.getOpcode()) {
default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
case ISD::ROTL: return LowerROTL(Op, DAG);
case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
case ISD::SELECT: return LowerSELECT(Op, DAG);
case ISD::STORE: return LowerSTORE(Op, DAG);
@ -765,18 +762,6 @@ SDValue R600TargetLowering::LowerFrameIndex(SDValue Op, SelectionDAG &DAG) const
return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), MVT::i32);
}
SDValue R600TargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
DebugLoc DL = Op.getDebugLoc();
EVT VT = Op.getValueType();
return DAG.getNode(AMDGPUISD::BITALIGN, DL, VT,
Op.getOperand(0),
Op.getOperand(0),
DAG.getNode(ISD::SUB, DL, VT,
DAG.getConstant(32, MVT::i32),
Op.getOperand(1)));
}
bool R600TargetLowering::isZero(SDValue Op) const {
if(ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
return Cst->isNullValue();

View File

@ -1635,10 +1635,8 @@ let Predicates = [isEGorCayman] in {
def BFI_INT_eg : R600_3OP <0x06, "BFI_INT", [], VecALU>;
defm : BFIPatterns <BFI_INT_eg>;
def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT",
[(set i32:$dst, (AMDGPUbitalign i32:$src0, i32:$src1, i32:$src2))],
VecALU
>;
def BIT_ALIGN_INT_eg : R600_3OP <0xC, "BIT_ALIGN_INT", [], VecALU>;
def : ROTRPattern <BIT_ALIGN_INT_eg>;
def MULADD_eg : MULADD_Common<0x14>;
def MULADD_IEEE_eg : MULADD_IEEE_Common<0x18>;

29
test/CodeGen/R600/rotr.ll Normal file
View File

@ -0,0 +1,29 @@
; RUN: llc < %s -debug-only=isel -march=r600 -mcpu=redwood -o - 2>&1 | FileCheck %s
; CHECK: rotr
; CHECK: @rotr
; CHECK: BIT_ALIGN_INT
define void @rotr(i32 addrspace(1)* %in, i32 %x, i32 %y) {
entry:
%0 = sub i32 32, %y
%1 = shl i32 %x, %0
%2 = lshr i32 %x, %y
%3 = or i32 %1, %2
store i32 %3, i32 addrspace(1)* %in
ret void
}
; CHECK: rotr
; CHECK: @rotl
; CHECK: SUB_INT {{\** T[0-9]+\.[XYZW]}}, literal.x
; CHECK-NEXT: 32
; CHECK: BIT_ALIGN_INT {{\** T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], PV.[xyzw]}}
define void @rotl(i32 addrspace(1)* %in, i32 %x, i32 %y) {
entry:
%0 = shl i32 %x, %y
%1 = sub i32 32, %y
%2 = lshr i32 %x, %1
%3 = or i32 %0, %2
store i32 %3, i32 addrspace(1)* %in
ret void
}