llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
Matt Arsenault e61b6779e4 AMDGPU: Fix handling of alignment padding in DAG argument lowering
This was completely broken if there was ever a struct argument, as
this information is thrown away during the argument analysis.

The offsets as passed in to LowerFormalArguments are not useful,
as they partially depend on the legalized result register type,
and they don't consider the alignment in the first place.

Ignore the Ins array, and instead figure out from the raw IR type
what we need to do. This seems to fix the padding computation
if the DAG lowering is forced (and stops breaking arguments
following padded arguments if the arguments were only partially
lowered in the IR)

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@337021 91177308-0d34-0410-b5e6-96231b3b80d8
2018-07-13 16:40:25 +00:00

66 lines
2.3 KiB
C++

//===-- AMDGPUMachineFunctionInfo.cpp ---------------------------------------=//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "AMDGPUMachineFunction.h"
#include "AMDGPUSubtarget.h"
#include "AMDGPUPerfHintAnalysis.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
using namespace llvm;
AMDGPUMachineFunction::AMDGPUMachineFunction(const MachineFunction &MF) :
MachineFunctionInfo(),
LocalMemoryObjects(),
ExplicitKernArgSize(0),
MaxKernArgAlign(0),
LDSSize(0),
IsEntryFunction(AMDGPU::isEntryFunctionCC(MF.getFunction().getCallingConv())),
NoSignedZerosFPMath(MF.getTarget().Options.NoSignedZerosFPMath),
MemoryBound(false),
WaveLimiter(false) {
const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF);
// FIXME: Should initialize KernArgSize based on ExplicitKernelArgOffset,
// except reserved size is not correctly aligned.
const Function &F = MF.getFunction();
if (auto *Resolver = MF.getMMI().getResolver()) {
if (AMDGPUPerfHintAnalysis *PHA = static_cast<AMDGPUPerfHintAnalysis*>(
Resolver->getAnalysisIfAvailable(&AMDGPUPerfHintAnalysisID, true))) {
MemoryBound = PHA->isMemoryBound(&F);
WaveLimiter = PHA->needsWaveLimiter(&F);
}
}
CallingConv::ID CC = F.getCallingConv();
if (CC == CallingConv::AMDGPU_KERNEL || CC == CallingConv::SPIR_KERNEL)
ExplicitKernArgSize = ST.getExplicitKernArgSize(F, MaxKernArgAlign);
}
unsigned AMDGPUMachineFunction::allocateLDSGlobal(const DataLayout &DL,
const GlobalValue &GV) {
auto Entry = LocalMemoryObjects.insert(std::make_pair(&GV, 0));
if (!Entry.second)
return Entry.first->second;
unsigned Align = GV.getAlignment();
if (Align == 0)
Align = DL.getABITypeAlignment(GV.getValueType());
/// TODO: We should sort these to minimize wasted space due to alignment
/// padding. Currently the padding is decided by the first encountered use
/// during lowering.
unsigned Offset = LDSSize = alignTo(LDSSize, Align);
Entry.first->second = Offset;
LDSSize += DL.getTypeAllocSize(GV.getValueType());
return Offset;
}