AMDGPU: Fix dropping mem operands when moving to VALU

Without a memory operand, mayLoad or mayStore instructions
are treated as hasUnorderedMemRef, which results in much worse
scheduling.

We really should have a verifier check that any
non-side effecting mayLoad or mayStore has a memory operand.
There are a few instructions (interp and images) which I'm
not sure what / where to add these.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@246356 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Matt Arsenault 2015-08-29 06:48:46 +00:00
parent 127a3d74f1
commit 0601263423
2 changed files with 64 additions and 11 deletions

View File

@ -1888,17 +1888,18 @@ void SIInstrInfo::legalizeOperands(MachineInstr *MI) const {
// Create the new instruction.
unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI->getOpcode());
MachineInstr *Addr64 =
BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode))
.addOperand(*VData)
.addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
// This will be replaced later
// with the new value of vaddr.
.addOperand(*SRsrc)
.addOperand(*SOffset)
.addOperand(*Offset)
.addImm(0) // glc
.addImm(0) // slc
.addImm(0); // tfe
BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode))
.addOperand(*VData)
.addReg(AMDGPU::NoRegister) // Dummy value for vaddr.
// This will be replaced later
// with the new value of vaddr.
.addOperand(*SRsrc)
.addOperand(*SOffset)
.addOperand(*Offset)
.addImm(0) // glc
.addImm(0) // slc
.addImm(0) // tfe
.setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
MI->removeFromParent();
MI = Addr64;

View File

@ -0,0 +1,52 @@
; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s
; The memory operand was dropped from the buffer_load_dword_offset
; when replaced with the addr64 during operand legalization, resulting
; in the global loads not being scheduled together.
; GCN-LABEL: {{^}}reschedule_global_load_lds_store:
; GCN: buffer_load_dword
; GCN: buffer_load_dword
; GCN: ds_write_b32
; GCN: ds_write_b32
; GCN: s_endpgm
define void @reschedule_global_load_lds_store(i32 addrspace(1)* noalias %gptr0, i32 addrspace(1)* noalias %gptr1, i32 addrspace(3)* noalias %lptr, i32 %c) #0 {
entry:
%tid = tail call i32 @llvm.r600.read.tidig.x() #1
%idx = shl i32 %tid, 2
%gep0 = getelementptr i32, i32 addrspace(1)* %gptr0, i32 %idx
%gep1 = getelementptr i32, i32 addrspace(1)* %gptr1, i32 %idx
%gep2 = getelementptr i32, i32 addrspace(3)* %lptr, i32 %tid
%cmp0 = icmp eq i32 %c, 0
br i1 %cmp0, label %for.body, label %exit
for.body: ; preds = %for.body, %entry
%i = phi i32 [ 0, %entry ], [ %i.inc, %for.body ]
%gptr0.phi = phi i32 addrspace(1)* [ %gep0, %entry ], [ %gep0.inc, %for.body ]
%gptr1.phi = phi i32 addrspace(1)* [ %gep1, %entry ], [ %gep1.inc, %for.body ]
%lptr0.phi = phi i32 addrspace(3)* [ %gep2, %entry ], [ %gep2.inc, %for.body ]
%lptr1 = getelementptr i32, i32 addrspace(3)* %lptr0.phi, i32 1
%val0 = load i32, i32 addrspace(1)* %gep0
store i32 %val0, i32 addrspace(3)* %lptr0.phi
%val1 = load i32, i32 addrspace(1)* %gep1
store i32 %val1, i32 addrspace(3)* %lptr1
%gep0.inc = getelementptr i32, i32 addrspace(1)* %gptr0.phi, i32 4
%gep1.inc = getelementptr i32, i32 addrspace(1)* %gptr1.phi, i32 4
%gep2.inc = getelementptr i32, i32 addrspace(3)* %lptr0.phi, i32 4
%i.inc = add nsw i32 %i, 1
%cmp1 = icmp ne i32 %i, 256
br i1 %cmp1, label %for.body, label %exit
exit: ; preds = %for.body, %entry
ret void
}
; Function Attrs: nounwind readnone
declare i32 @llvm.r600.read.tidig.x() #1
; Function Attrs: nounwind readnone
declare i32 @llvm.r600.read.tgid.x() #1
attributes #0 = { nounwind }
attributes #1 = { nounwind readnone }
attributes #2 = { noduplicate nounwind }