AMDGPU: Enable LocalStackSlotAllocation pass

This resolves more frame indexes early and folds
the immediate offsets into the scratch mubuf instructions.

This cleans up a lot of the mess that's currently emitted,
such as emitting add 0s and repeatedly initializing the same
register to 0 when spilling.

llvm-svn: 266508
This commit is contained in:
Matt Arsenault 2016-04-16 02:13:37 +00:00
parent 51eff3c93d
commit ff544fe603
4 changed files with 231 additions and 20 deletions

View File

@ -227,6 +227,144 @@ SIRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const {
return MF.getFrameInfo()->hasStackObjects();
}
bool SIRegisterInfo::requiresVirtualBaseRegisters(
const MachineFunction &) const {
// There are no special dedicated stack or frame pointers.
return true;
}
int64_t SIRegisterInfo::getFrameIndexInstrOffset(const MachineInstr *MI,
int Idx) const {
const MachineFunction *MF = MI->getParent()->getParent();
const AMDGPUSubtarget &Subtarget = MF->getSubtarget<AMDGPUSubtarget>();
const SIInstrInfo *TII
= static_cast<const SIInstrInfo *>(Subtarget.getInstrInfo());
if (!TII->isMUBUF(*MI))
return 0;
assert(Idx == AMDGPU::getNamedOperandIdx(MI->getOpcode(),
AMDGPU::OpName::vaddr) &&
"Should never see frame index on non-address operand");
int OffIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(),
AMDGPU::OpName::offset);
return MI->getOperand(OffIdx).getImm();
}
bool SIRegisterInfo::needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
return MI->mayLoadOrStore();
}
void SIRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
unsigned BaseReg,
int FrameIdx,
int64_t Offset) const {
MachineBasicBlock::iterator Ins = MBB->begin();
DebugLoc DL; // Defaults to "unknown"
if (Ins != MBB->end())
DL = Ins->getDebugLoc();
MachineFunction *MF = MBB->getParent();
const AMDGPUSubtarget &Subtarget = MF->getSubtarget<AMDGPUSubtarget>();
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
assert(isUInt<27>(Offset) &&
"Private offset should never exceed maximum private size");
if (Offset == 0) {
BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_MOV_B32_e32), BaseReg)
.addFrameIndex(FrameIdx);
return;
}
MachineRegisterInfo &MRI = MF->getRegInfo();
unsigned UnusedCarry = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
BuildMI(*MBB, Ins, DL, TII->get(AMDGPU::V_ADD_I32_e64), BaseReg)
.addReg(UnusedCarry, RegState::Define | RegState::Dead)
.addImm(Offset)
.addFrameIndex(FrameIdx);
}
void SIRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
int64_t Offset) const {
MachineBasicBlock *MBB = MI.getParent();
MachineFunction *MF = MBB->getParent();
const AMDGPUSubtarget &Subtarget = MF->getSubtarget<AMDGPUSubtarget>();
const SIInstrInfo *TII
= static_cast<const SIInstrInfo *>(Subtarget.getInstrInfo());
#ifndef NDEBUG
// FIXME: Is it possible to be storing a frame index to itself?
bool SeenFI = false;
for (const MachineOperand &MO: MI.operands()) {
if (MO.isFI()) {
if (SeenFI)
llvm_unreachable("should not see multiple frame indices");
SeenFI = true;
}
}
#endif
MachineOperand *FIOp = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
assert(FIOp && FIOp->isFI() && "frame index must be address operand");
assert(TII->isMUBUF(MI));
MachineOperand *OffsetOp = TII->getNamedOperand(MI, AMDGPU::OpName::offset);
int64_t NewOffset = OffsetOp->getImm() + Offset;
if (isUInt<12>(NewOffset)) {
// If we have a legal offset, fold it directly into the instruction.
FIOp->ChangeToRegister(BaseReg, false);
OffsetOp->setImm(NewOffset);
return;
}
// The offset is not legal, so we must insert an add of the offset.
MachineRegisterInfo &MRI = MF->getRegInfo();
unsigned NewReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
DebugLoc DL = MI.getDebugLoc();
assert(Offset != 0 && "Non-zero offset expected");
unsigned UnusedCarry = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
// In the case the instruction already had an immediate offset, here only
// the requested new offset is added because we are leaving the original
// immediate in place.
BuildMI(*MBB, MI, DL, TII->get(AMDGPU::V_ADD_I32_e64), NewReg)
.addReg(UnusedCarry, RegState::Define | RegState::Dead)
.addImm(Offset)
.addReg(BaseReg);
FIOp->ChangeToRegister(NewReg, false);
}
bool SIRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
unsigned BaseReg,
int64_t Offset) const {
const MachineFunction *MF = MI->getParent()->getParent();
const AMDGPUSubtarget &Subtarget = MF->getSubtarget<AMDGPUSubtarget>();
const SIInstrInfo *TII
= static_cast<const SIInstrInfo *>(Subtarget.getInstrInfo());
return TII->isMUBUF(*MI) && isUInt<12>(Offset);
}
const TargetRegisterClass *SIRegisterInfo::getPointerRegClass(
const MachineFunction &MF, unsigned Kind) const {
// This is inaccurate. It depends on the instruction and address space. The
// only place where we should hit this is for dealing with frame indexes /
// private accesses, so this is correct in that case.
return &AMDGPU::VGPR_32RegClass;
}
static unsigned getNumSubRegsForSpillOp(unsigned Op) {
switch (Op) {

View File

@ -51,9 +51,30 @@ public:
unsigned getRegPressureSetLimit(const MachineFunction &MF,
unsigned Idx) const override;
bool requiresRegisterScavenging(const MachineFunction &Fn) const override;
bool requiresFrameIndexScavenging(const MachineFunction &MF) const override;
bool requiresVirtualBaseRegisters(const MachineFunction &Fn) const override;
int64_t getFrameIndexInstrOffset(const MachineInstr *MI,
int Idx) const override;
bool needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const override;
void materializeFrameBaseRegister(MachineBasicBlock *MBB,
unsigned BaseReg, int FrameIdx,
int64_t Offset) const override;
void resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
int64_t Offset) const override;
bool isFrameOffsetLegal(const MachineInstr *MI, unsigned BaseReg,
int64_t Offset) const override;
const TargetRegisterClass *getPointerRegClass(
const MachineFunction &MF, unsigned Kind = 0) const override;
void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj,
unsigned FIOperandNum,

View File

@ -15,8 +15,11 @@ declare void @llvm.amdgcn.s.barrier() #2
; FIXME: We end up with zero argument for ADD, because
; SIRegisterInfo::eliminateFrameIndex() blindly replaces the frame index
; with the appropriate offset. We should fold this into the store.
; SI-ALLOCA: v_add_i32_e32 [[PTRREG:v[0-9]+]], vcc, 0, v{{[0-9]+}}
; SI-ALLOCA: buffer_store_dword {{v[0-9]+}}, [[PTRREG]], s[{{[0-9]+:[0-9]+}}]
; SI-ALLOCA: buffer_store_dword {{v[0-9]+}}, [[PTRREG]], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:16
; SI-ALLOCA: s_barrier
; SI-ALLOCA: buffer_load_dword {{v[0-9]+}}, [[PTRREG]], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen offset:16
;
; FIXME: The AMDGPUPromoteAlloca pass should be able to convert this
; alloca to a vector. It currently fails because it does not know how

View File

@ -16,25 +16,22 @@ define void @stored_fi_to_lds(float* addrspace(3)* %ptr) #0 {
; Offset is applied
; GCN-LABEL: {{^}}stored_fi_to_lds_2_small_objects:
; GCN: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
; GCN: buffer_store_dword v{{[0-9]+}}, [[ZERO]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
; GCN: buffer_store_dword v{{[0-9]+}}, [[ZERO]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:4{{$}}
; GCN: s_load_dword [[LDSPTR:s[0-9]+]]
; GCN: v_mov_b32_e32 [[ZERO1:v[0-9]+]], 0{{$}}
; GCN: buffer_store_dword v{{[0-9]+}}, [[ZERO1]]
; GCN: v_mov_b32_e32 [[FI1:v[0-9]+]], 4{{$}}
; GCN: buffer_store_dword v{{[0-9]+}}, [[FI1]]
; GCN-DAG: v_mov_b32_e32 [[ZERO0:v[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 [[VLDSPTR:v[0-9]+]], [[LDSPTR]]
; GCN: ds_write_b32 [[VLDSPTR]], [[ZERO0]]
; GCN: ds_write_b32 [[VLDSPTR]], [[ZERO]]
; GCN-DAG: v_mov_b32_e32 [[FI1:v[0-9]+]], 4{{$}}
; GCN: ds_write_b32 [[VLDSPTR]], [[FI1]]
define void @stored_fi_to_lds_2_small_objects(float* addrspace(3)* %ptr) #0 {
%tmp0 = alloca float
%tmp1 = alloca float
store float 4.0, float *%tmp0
store float 4.0, float *%tmp1
store float 4.0, float* %tmp0
store float 4.0, float* %tmp1
store volatile float* %tmp0, float* addrspace(3)* %ptr
store volatile float* %tmp1, float* addrspace(3)* %ptr
ret void
@ -42,6 +39,10 @@ define void @stored_fi_to_lds_2_small_objects(float* addrspace(3)* %ptr) #0 {
; Same frame index is used multiple times in the store
; GCN-LABEL: {{^}}stored_fi_to_self:
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x4d2{{$}}
; GCN-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
; GCN: buffer_store_dword [[K]], [[ZERO]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
; GCN: buffer_store_dword [[ZERO]], [[ZERO]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
define void @stored_fi_to_self() #0 {
%tmp = alloca i32*
@ -52,18 +53,42 @@ define void @stored_fi_to_self() #0 {
ret void
}
; GCN-LABEL: {{^}}stored_fi_to_self_offset:
; GCN-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
; GCN-DAG: v_mov_b32_e32 [[K0:v[0-9]+]], 32{{$}}
; GCN: buffer_store_dword [[K0]], [[ZERO]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
; GCN-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0x4d2{{$}}
; GCN: buffer_store_dword [[K1]], [[ZERO]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:2048{{$}}
; GCN: v_mov_b32_e32 [[OFFSETK:v[0-9]+]], 0x800{{$}}
; GCN: buffer_store_dword [[OFFSETK]], [[ZERO]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:2048{{$}}
define void @stored_fi_to_self_offset() #0 {
%tmp0 = alloca [512 x i32]
%tmp1 = alloca i32*
; Avoid optimizing everything out
%tmp0.cast = bitcast [512 x i32]* %tmp0 to i32*
store volatile i32 32, i32* %tmp0.cast
store volatile i32* inttoptr (i32 1234 to i32*), i32** %tmp1
%bitcast = bitcast i32** %tmp1 to i32*
store volatile i32* %bitcast, i32** %tmp1
ret void
}
; GCN-LABEL: {{^}}stored_fi_to_fi:
; GCN: buffer_store_dword
; GCN: buffer_store_dword
; GCN: buffer_store_dword
; GCN: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
; GCN: buffer_store_dword v{{[0-9]+}}, [[ZERO]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
; GCN: buffer_store_dword v{{[0-9]+}}, [[ZERO]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:4{{$}}
; GCN: buffer_store_dword v{{[0-9]+}}, [[ZERO]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:8{{$}}
; GCN-DAG: v_mov_b32_e32 [[FI1:v[0-9]+]], 4{{$}}
; GCN-DAG: v_mov_b32_e32 [[FI2:v[0-9]+]], 8{{$}}
; GCN: buffer_store_dword [[FI1]], [[FI2]]
; GCN: v_mov_b32_e32 [[FI1:v[0-9]+]], 4{{$}}
; GCN: buffer_store_dword [[FI1]], [[ZERO]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:8{{$}}
; GCN-DAG: v_mov_b32_e32 [[FI1:v[0-9]+]], 4{{$}}
; GCN-DAG: v_mov_b32_e32 [[FI2:v[0-9]+]], 8{{$}}
; GCN: buffer_store_dword [[FI2]], [[FI1]]
; GCN: v_mov_b32_e32 [[FI2:v[0-9]+]], 8{{$}}
; GCN: buffer_store_dword [[FI2]], [[ZERO]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:4{{$}}
define void @stored_fi_to_fi() #0 {
%tmp0 = alloca i32*
%tmp1 = alloca i32*
@ -114,4 +139,28 @@ define void @stored_fi_to_global_2_small_objects(float* addrspace(1)* %ptr) #0 {
ret void
}
; GCN-LABEL: {{^}}stored_fi_to_global_huge_frame_offset:
; GCN: v_mov_b32_e32 [[BASE_0:v[0-9]+]], 0{{$}}
; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen
; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0x3e7{{$}}
; GCN-DAG: s_add_i32 [[BASE_1_OFF_0:s[0-9]+]], 0, 0x3ffc
; GCN-DAG: v_mov_b32_e32 [[V_BASE_1_OFF_0:v[0-9]+]], [[BASE_1_OFF_0]]
; GCN: buffer_store_dword [[K]], [[V_BASE_1_OFF_0]], s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}}
; GCN-DAG: s_add_i32 [[BASE_1_OFF_1:s[0-9]+]], 0, 56
; GCN-DAG: v_mov_b32_e32 [[V_BASE_1_OFF_1:v[0-9]+]], [[BASE_1_OFF_1]]
; GCN: buffer_store_dword [[V_BASE_1_OFF_1]], s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
define void @stored_fi_to_global_huge_frame_offset(i32* addrspace(1)* %ptr) #0 {
%tmp0 = alloca [4096 x i32]
%tmp1 = alloca [4096 x i32]
%gep0.tmp0 = getelementptr [4096 x i32], [4096 x i32]* %tmp0, i32 0, i32 0
store volatile i32 0, i32* %gep0.tmp0
%gep1.tmp0 = getelementptr [4096 x i32], [4096 x i32]* %tmp0, i32 0, i32 4095
store volatile i32 999, i32* %gep1.tmp0
%gep0.tmp1 = getelementptr [4096 x i32], [4096 x i32]* %tmp0, i32 0, i32 14
store i32* %gep0.tmp1, i32* addrspace(1)* %ptr
ret void
}
attributes #0 = { nounwind }