mirror of
https://github.com/RPCSX/llvm.git
synced 2024-12-05 02:16:46 +00:00
AMDGPU: Fix folding immediates into mac src2
Whether it is legal or not needs to check for the instruction it will be replaced with. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@291711 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
4214b4143a
commit
c6b1aed80d
@ -99,6 +99,34 @@ char SIFoldOperands::ID = 0;
|
||||
|
||||
char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
|
||||
|
||||
// Wrapper around isInlineConstant that understands special cases when
|
||||
// instruction types are replaced during operand folding.
|
||||
static bool isInlineConstantIfFolded(const SIInstrInfo *TII,
|
||||
const MachineInstr &UseMI,
|
||||
unsigned OpNo,
|
||||
const MachineOperand &OpToFold) {
|
||||
if (TII->isInlineConstant(UseMI, OpNo, OpToFold))
|
||||
return true;
|
||||
|
||||
unsigned Opc = UseMI.getOpcode();
|
||||
switch (Opc) {
|
||||
case AMDGPU::V_MAC_F32_e64:
|
||||
case AMDGPU::V_MAC_F16_e64: {
|
||||
// Special case for mac. Since this is replaced with mad when folded into
|
||||
// src2, we need to check the legality for the final instruction.
|
||||
int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2);
|
||||
if (static_cast<int>(OpNo) == Src2Idx) {
|
||||
bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
|
||||
const MCInstrDesc &MadDesc
|
||||
= TII->get(IsF32 ? AMDGPU::V_MAD_F32 : AMDGPU::V_MAD_F16);
|
||||
return TII->isInlineConstant(OpToFold, MadDesc.OpInfo[OpNo].OperandType);
|
||||
}
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
FunctionPass *llvm::createSIFoldOperandsPass() {
|
||||
return new SIFoldOperands();
|
||||
}
|
||||
@ -171,7 +199,7 @@ static bool tryAddToFoldList(SmallVectorImpl<FoldCandidate> &FoldList,
|
||||
unsigned Opc = MI->getOpcode();
|
||||
if ((Opc == AMDGPU::V_MAC_F32_e64 || Opc == AMDGPU::V_MAC_F16_e64) &&
|
||||
(int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
|
||||
bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
|
||||
bool IsF32 = Opc == AMDGPU::V_MAC_F32_e64;
|
||||
|
||||
// Check if changing this to a v_mad_{f16, f32} instruction will allow us
|
||||
// to fold the operand.
|
||||
@ -611,7 +639,7 @@ void SIFoldOperands::foldInstOperand(MachineInstr &MI,
|
||||
// Folding immediates with more than one use will increase program size.
|
||||
// FIXME: This will also reduce register usage, which may be better
|
||||
// in some cases. A better heuristic is needed.
|
||||
if (TII->isInlineConstant(*UseMI, OpNo, OpToFold)) {
|
||||
if (isInlineConstantIfFolded(TII, *UseMI, OpNo, OpToFold)) {
|
||||
foldOperand(OpToFold, UseMI, OpNo, FoldList, CopiesToReplace);
|
||||
} else {
|
||||
if (++NumLiteralUses == 1) {
|
||||
|
@ -212,5 +212,71 @@ entry:
|
||||
ret void
|
||||
}
|
||||
|
||||
; Without special casing the inline constant check for v_mac_f32's
|
||||
; src2, this fails to fold the 1.0 into a mad.
|
||||
|
||||
; GCN-LABEL: {{^}}fold_inline_imm_into_mac_src2_f32:
|
||||
; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
|
||||
; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
|
||||
|
||||
; GCN: v_add_f32_e32 [[TMP2:v[0-9]+]], [[A]], [[A]]
|
||||
; GCN: v_mad_f32 v{{[0-9]+}}, [[TMP2]], -4.0, 1.0
|
||||
define void @fold_inline_imm_into_mac_src2_f32(float addrspace(1)* %out, float addrspace(1)* %a, float addrspace(1)* %b) #3 {
|
||||
bb:
|
||||
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
||||
%tid.ext = sext i32 %tid to i64
|
||||
%gep.a = getelementptr inbounds float, float addrspace(1)* %a, i64 %tid.ext
|
||||
%gep.b = getelementptr inbounds float, float addrspace(1)* %b, i64 %tid.ext
|
||||
%gep.out = getelementptr inbounds float, float addrspace(1)* %out, i64 %tid.ext
|
||||
%tmp = load volatile float, float addrspace(1)* %gep.a
|
||||
%tmp1 = load volatile float, float addrspace(1)* %gep.b
|
||||
%tmp2 = fadd float %tmp, %tmp
|
||||
%tmp3 = fmul float %tmp2, 4.0
|
||||
%tmp4 = fsub float 1.0, %tmp3
|
||||
%tmp5 = fadd float %tmp4, %tmp1
|
||||
%tmp6 = fadd float %tmp1, %tmp1
|
||||
%tmp7 = fmul float %tmp6, %tmp
|
||||
%tmp8 = fsub float 1.0, %tmp7
|
||||
%tmp9 = fmul float %tmp8, 8.0
|
||||
%tmp10 = fadd float %tmp5, %tmp9
|
||||
store float %tmp10, float addrspace(1)* %gep.out
|
||||
ret void
|
||||
}
|
||||
|
||||
; GCN-LABEL: {{^}}fold_inline_imm_into_mac_src2_f16:
|
||||
; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
|
||||
; GCN: {{buffer|flat}}_load_ushort [[B:v[0-9]+]]
|
||||
|
||||
; FIXME: How is this not folded?
|
||||
; SI: v_cvt_f32_f16_e32 v{{[0-9]+}}, 0x3c00
|
||||
|
||||
; VI: v_add_f16_e32 [[TMP2:v[0-9]+]], [[A]], [[A]]
|
||||
; VI: v_mad_f16 v{{[0-9]+}}, [[TMP2]], -4.0, 1.0
|
||||
define void @fold_inline_imm_into_mac_src2_f16(half addrspace(1)* %out, half addrspace(1)* %a, half addrspace(1)* %b) #3 {
|
||||
bb:
|
||||
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
||||
%tid.ext = sext i32 %tid to i64
|
||||
%gep.a = getelementptr inbounds half, half addrspace(1)* %a, i64 %tid.ext
|
||||
%gep.b = getelementptr inbounds half, half addrspace(1)* %b, i64 %tid.ext
|
||||
%gep.out = getelementptr inbounds half, half addrspace(1)* %out, i64 %tid.ext
|
||||
%tmp = load volatile half, half addrspace(1)* %gep.a
|
||||
%tmp1 = load volatile half, half addrspace(1)* %gep.b
|
||||
%tmp2 = fadd half %tmp, %tmp
|
||||
%tmp3 = fmul half %tmp2, 4.0
|
||||
%tmp4 = fsub half 1.0, %tmp3
|
||||
%tmp5 = fadd half %tmp4, %tmp1
|
||||
%tmp6 = fadd half %tmp1, %tmp1
|
||||
%tmp7 = fmul half %tmp6, %tmp
|
||||
%tmp8 = fsub half 1.0, %tmp7
|
||||
%tmp9 = fmul half %tmp8, 8.0
|
||||
%tmp10 = fadd half %tmp5, %tmp9
|
||||
store half %tmp10, half addrspace(1)* %gep.out
|
||||
ret void
|
||||
}
|
||||
|
||||
declare i32 @llvm.amdgcn.workitem.id.x() #2
|
||||
|
||||
attributes #0 = { nounwind "unsafe-fp-math"="false" }
|
||||
attributes #1 = { nounwind "unsafe-fp-math"="true" }
|
||||
attributes #2 = { nounwind readnone }
|
||||
attributes #3 = { nounwind }
|
||||
|
Loading…
Reference in New Issue
Block a user