R600: Add support for 24-bit MUL instructions

Reviewed-by: Vincent Lejeune <vljn at ovi.com>

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@186922 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Tom Stellard 2013-07-23 01:48:42 +00:00
parent eb643b9b37
commit 3f5d63b956
6 changed files with 159 additions and 5 deletions

View File

@ -58,6 +58,9 @@ private:
bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2);
bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2);
bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2);
SDValue SimplifyI24(SDValue &Op);
bool SelectI24(SDValue Addr, SDValue &Op);
bool SelectU24(SDValue Addr, SDValue &Op);
static bool checkType(const Value *ptr, unsigned int addrspace);
@ -674,7 +677,9 @@ const char *AMDGPUDAGToDAGISel::getPassName() const {
#endif
#undef DEBUGTMP
///==== AMDGPU Functions ====///
//===----------------------------------------------------------------------===//
// Complex Patterns
//===----------------------------------------------------------------------===//
bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
SDValue& IntPtr) {
@ -741,6 +746,49 @@ bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
return true;
}
SDValue AMDGPUDAGToDAGISel::SimplifyI24(SDValue &Op) {
APInt Demanded = APInt(32, 0x00FFFFFF);
APInt KnownZero, KnownOne;
TargetLowering::TargetLoweringOpt TLO(*CurDAG, true, true);
const TargetLowering *TLI = getTargetLowering();
if (TLI->SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) {
CurDAG->ReplaceAllUsesWith(Op, TLO.New);
CurDAG->RepositionNode(Op.getNode(), TLO.New.getNode());
return SimplifyI24(TLO.New);
} else {
return Op;
}
}
bool AMDGPUDAGToDAGISel::SelectI24(SDValue Op, SDValue &I24) {
assert(Op.getValueType() == MVT::i32);
if (CurDAG->ComputeNumSignBits(Op) == 9) {
I24 = SimplifyI24(Op);
return true;
}
return false;
}
bool AMDGPUDAGToDAGISel::SelectU24(SDValue Op, SDValue &U24) {
APInt KnownZero;
APInt KnownOne;
CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);
assert (Op.getValueType() == MVT::i32);
// ANY_EXTEND and EXTLOAD operations can only be done on types smaller than
// i32. These smaller types are legal to use with the i24 instructions.
if ((KnownZero & APInt(KnownZero.getBitWidth(), 0xFF000000)) == 0xFF000000 ||
Op.getOpcode() == ISD::ANY_EXTEND ||
ISD::isEXTLoad(Op.getNode())) {
U24 = SimplifyI24(Op);
return true;
}
return false;
}
void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
if (Subtarget.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) {

View File

@ -173,6 +173,9 @@ def FP_ONE : PatLeaf <
[{return N->isExactlyValue(1.0);}]
>;
def U24 : ComplexPattern<i32, 1, "SelectU24", [], []>;
def I24 : ComplexPattern<i32, 1, "SelectI24", [], []>;
let isCodeGenOnly = 1, isPseudo = 1 in {
let usesCustomInserter = 1 in {
@ -366,6 +369,16 @@ class ROTRPattern <Instruction BIT_ALIGN> : Pat <
(BIT_ALIGN $src0, $src0, $src1)
>;
// 24-bit arithmetic patterns
def umul24 : PatFrag <(ops node:$x, node:$y), (mul node:$x, node:$y)>;
/*
class UMUL24Pattern <Instruction UMUL24> : Pat <
(mul U24:$x, U24:$y),
(UMUL24 $x, $y)
>;
*/
include "R600Instructions.td"
include "SIInstrInfo.td"

View File

@ -1473,6 +1473,9 @@ let Predicates = [isEGorCayman] in {
def CNDGE_eg : CNDGE_Common<0x1B>;
def MUL_LIT_eg : MUL_LIT_Common<0x1F>;
def LOG_CLAMPED_eg : LOG_CLAMPED_Common<0x82>;
def MUL_UINT24_eg : R600_2OP <0xB5, "MUL_UINT24",
[(set i32:$dst, (mul U24:$src0, U24:$src1))], VecALU
>;
def DOT4_eg : DOT4_Common<0xBE>;
defm CUBE_eg : CUBE_Common<0xC0>;
@ -1703,6 +1706,10 @@ defm R600_ : RegisterLoadStore <R600_Reg32, FRAMEri, ADDRIndirect>;
let Predicates = [isCayman] in {
def MUL_INT24_cm : R600_2OP <0x5B, "MUL_INT24",
[(set i32:$dst, (mul I24:$src0, I24:$src1))], VecALU
>;
let isVector = 1 in {
def RECIP_IEEE_cm : RECIP_IEEE_Common<0x86>;

View File

@ -866,14 +866,16 @@ defm V_MUL_F32 : VOP2_32 <0x00000008, "V_MUL_F32",
[(set f32:$dst, (fmul f32:$src0, f32:$src1))]
>;
} // End isCommutable = 1
//defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24", []>;
defm V_MUL_I32_I24 : VOP2_32 <0x00000009, "V_MUL_I32_I24",
[(set i32:$dst, (mul I24:$src0, I24:$src1))]
>;
//defm V_MUL_HI_I32_I24 : VOP2_32 <0x0000000a, "V_MUL_HI_I32_I24", []>;
//defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24", []>;
defm V_MUL_U32_U24 : VOP2_32 <0x0000000b, "V_MUL_U32_U24",
[(set i32:$dst, (mul U24:$src0, U24:$src1))]
>;
//defm V_MUL_HI_U32_U24 : VOP2_32 <0x0000000c, "V_MUL_HI_U32_U24", []>;
let isCommutable = 1 in {
defm V_MIN_LEGACY_F32 : VOP2_32 <0x0000000d, "V_MIN_LEGACY_F32",
[(set f32:$dst, (AMDGPUfmin f32:$src0, f32:$src1))]

View File

@ -0,0 +1,19 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM-CHECK
; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
; EG-CHECK: @i32_mul24
; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
; EG-CHECK: MULLO_INT
; CM-CHECK: MUL_INT24 {{[ *]*}}T{{[0-9].[XYZW]}}, KC0[2].Z, KC0[2].W
; SI-CHECK: V_MUL_I32_I24
define void @i32_mul24(i32 addrspace(1)* %out, i32 %a, i32 %b) {
entry:
%0 = shl i32 %a, 8
%a_24 = ashr i32 %0, 8
%1 = shl i32 %b, 8
%b_24 = ashr i32 %1, 8
%2 = mul i32 %a_24, %b_24
store i32 %2, i32 addrspace(1)* %out
ret void
}

View File

@ -0,0 +1,65 @@
; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG-CHECK
; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG-CHECK
; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK
; EG-CHECK: @u32_mul24
; EG-CHECK: MUL_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
; SI-CHECK: @u32_mul24
; SI-CHECK: V_MUL_U32_U24
define void @u32_mul24(i32 addrspace(1)* %out, i32 %a, i32 %b) {
entry:
%0 = shl i32 %a, 8
%a_24 = lshr i32 %0, 8
%1 = shl i32 %b, 8
%b_24 = lshr i32 %1, 8
%2 = mul i32 %a_24, %b_24
store i32 %2, i32 addrspace(1)* %out
ret void
}
; EG-CHECK: @i16_mul24
; EG-CHECK-DAG: VTX_READ_16 [[A:T[0-9]\.X]], T{{[0-9]}}.X, 40
; EG-CHECK-DAG: VTX_READ_16 [[B:T[0-9]\.X]], T{{[0-9]}}.X, 44
; The order of A and B does not matter.
; EG-CHECK: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]], [[A]], [[B]]
; The result must be sign-extended
; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MUL_CHAN]], literal.x
; EG-CHECK: 16
; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
; EG-CHECK: 16
; SI-CHECK: @i16_mul24
; SI-CHECK: V_MUL_U32_U24_e{{(32|64)}} [[MUL:VGPR[0-9]]], {{[SV]GPR[0-9], [SV]GPR[0-9]}}
; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:VGPR[0-9]]], 16, [[MUL]]
; SI-CHECK: V_ASHRREV_I32_e32 VGPR{{[0-9]}}, 16, [[LSHL]]
define void @i16_mul24(i32 addrspace(1)* %out, i16 %a, i16 %b) {
entry:
%0 = mul i16 %a, %b
%1 = sext i16 %0 to i32
store i32 %1, i32 addrspace(1)* %out
ret void
}
; EG-CHECK: @i8_mul24
; EG-CHECK-DAG: VTX_READ_8 [[A:T[0-9]\.X]], T{{[0-9]}}.X, 40
; EG-CHECK-DAG: VTX_READ_8 [[B:T[0-9]\.X]], T{{[0-9]}}.X, 44
; The order of A and B does not matter.
; EG-CHECK: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]], [[A]], [[B]]
; The result must be sign-extended
; EG-CHECK: LSHL {{[* ]*}}T{{[0-9]}}.[[LSHL_CHAN:[XYZW]]], PV.[[MUL_CHAN]], literal.x
; EG-CHECK: 24
; EG-CHECK: ASHR {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[LSHL_CHAN]], literal.x
; EG-CHECK: 24
; SI-CHECK: @i8_mul24
; SI-CHECK: V_MUL_U32_U24_e{{(32|64)}} [[MUL:VGPR[0-9]]], {{[SV]GPR[0-9], [SV]GPR[0-9]}}
; SI-CHECK: V_LSHLREV_B32_e32 [[LSHL:VGPR[0-9]]], 24, [[MUL]]
; SI-CHECK: V_ASHRREV_I32_e32 VGPR{{[0-9]}}, 24, [[LSHL]]
define void @i8_mul24(i32 addrspace(1)* %out, i8 %a, i8 %b) {
entry:
%0 = mul i8 %a, %b
%1 = sext i8 %0 to i32
store i32 %1, i32 addrspace(1)* %out
ret void
}