AMDGPU: Treat undef as an inline immediate

This should only matter in vectors with an undef component, since a
full undef vector would have been folded out.

llvm-svn: 363941
This commit is contained in:
Matt Arsenault 2019-06-20 16:01:09 +00:00
parent ab0abbf6e9
commit abe33e0352
3 changed files with 21 additions and 9 deletions

View File

@ -67,7 +67,22 @@ class R600InstrInfo;
namespace {
static bool isNullConstantOrUndef(SDValue V) {
if (V.isUndef())
return true;
ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
return Const != nullptr && Const->isNullValue();
}
static bool getConstantValue(SDValue N, uint32_t &Out) {
// This is only used for packed vectors, where ussing 0 for undef should
// always be good.
if (N.isUndef()) {
Out = 0;
return true;
}
if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
Out = C->getAPIntValue().getSExtValue();
return true;
@ -479,7 +494,8 @@ bool AMDGPUDAGToDAGISel::isNoNanSrc(SDValue N) const {
bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N,
bool Negated) const {
// TODO: Handle undef
if (N->isUndef())
return true;
const SIInstrInfo *TII = Subtarget->getInstrInfo();
if (Negated) {

View File

@ -610,8 +610,6 @@ def getNegV2I16Imm : SDNodeXForm<build_vector, [{
return SDValue(packNegConstantV2I16(N, *CurDAG), 0);
}]>;
// TODO: Handle undef as 0
def NegSubInlineConstV216 : PatLeaf<(build_vector), [{
assert(N->getNumOperands() == 2);
assert(N->getOperand(0).getValueType().getSizeInBits() == 16);
@ -620,8 +618,8 @@ def NegSubInlineConstV216 : PatLeaf<(build_vector), [{
if (Src0 == Src1)
return isNegInlineImmediate(Src0.getNode());
return (isNullConstant(Src0) && isNegInlineImmediate(Src1.getNode())) ||
(isNullConstant(Src1) && isNegInlineImmediate(Src0.getNode()));
return (isNullConstantOrUndef(Src0) && isNegInlineImmediate(Src1.getNode())) ||
(isNullConstantOrUndef(Src1) && isNegInlineImmediate(Src0.getNode()));
}], getNegV2I16Imm>;
//===----------------------------------------------------------------------===//

View File

@ -1885,7 +1885,6 @@ define amdgpu_kernel void @v_test_v2i16_x_add_undef_neg32(<2 x i16> addrspace(1)
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 2, v0
; GFX9-NEXT: s_mov_b32 s4, 0xffe00000
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s3
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s2, v2
@ -1895,7 +1894,7 @@ define amdgpu_kernel void @v_test_v2i16_x_add_undef_neg32(<2 x i16> addrspace(1)
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_pk_add_u16 v2, v3, s4
; GFX9-NEXT: v_pk_sub_u16 v2, v3, 32 op_sel:[0,1] op_sel_hi:[1,0]
; GFX9-NEXT: global_store_dword v[0:1], v2, off
; GFX9-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@ -1947,7 +1946,6 @@ define amdgpu_kernel void @v_test_v2i16_x_add_neg32_undef(<2 x i16> addrspace(1)
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 2, v0
; GFX9-NEXT: s_movk_i32 s4, 0xffe0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v1, s3
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s2, v2
@ -1957,7 +1955,7 @@ define amdgpu_kernel void @v_test_v2i16_x_add_neg32_undef(<2 x i16> addrspace(1)
; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_pk_add_u16 v2, v3, s4
; GFX9-NEXT: v_pk_sub_u16 v2, v3, 32
; GFX9-NEXT: global_store_dword v[0:1], v2, off
; GFX9-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()