Add a divided flag for the first piece of an argument divided into mulitple parts. Fixes PR1643

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@49611 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Nicolas Geoffray 2008-04-13 13:40:22 +00:00
parent fa0e66471f
commit c0cb28fd3a
3 changed files with 24 additions and 14 deletions

View File

@ -1734,6 +1734,8 @@ namespace ISD {
static const uint64_t NestOffs = 5; static const uint64_t NestOffs = 5;
static const uint64_t ByValAlign = 0xFULL << 6; //< Struct alignment static const uint64_t ByValAlign = 0xFULL << 6; //< Struct alignment
static const uint64_t ByValAlignOffs = 6; static const uint64_t ByValAlignOffs = 6;
static const uint64_t Divided = 1ULL << 10;
static const uint64_t DividedOffs = 10;
static const uint64_t OrigAlign = 0x1FULL<<27; static const uint64_t OrigAlign = 0x1FULL<<27;
static const uint64_t OrigAlignOffs = 27; static const uint64_t OrigAlignOffs = 27;
static const uint64_t ByValSize = 0xffffffffULL << 32; //< Struct size static const uint64_t ByValSize = 0xffffffffULL << 32; //< Struct size
@ -1770,6 +1772,9 @@ namespace ISD {
Flags = (Flags & ~ByValAlign) | Flags = (Flags & ~ByValAlign) |
(uint64_t(Log2_32(A) + 1) << ByValAlignOffs); (uint64_t(Log2_32(A) + 1) << ByValAlignOffs);
} }
bool isDivided() const { return Flags & Divided; }
void setDivided() { Flags |= One << DividedOffs; }
unsigned getOrigAlign() const { unsigned getOrigAlign() const {
return (One << ((Flags & OrigAlign) >> OrigAlignOffs)) / 2; return (One << ((Flags & OrigAlign) >> OrigAlignOffs)) / 2;

View File

@ -4162,8 +4162,11 @@ TargetLowering::LowerArguments(Function &F, SelectionDAG &DAG) {
unsigned NumRegs = getNumRegisters(VT); unsigned NumRegs = getNumRegisters(VT);
for (unsigned i = 0; i != NumRegs; ++i) { for (unsigned i = 0; i != NumRegs; ++i) {
RetVals.push_back(RegisterVT); RetVals.push_back(RegisterVT);
if (NumRegs > 1 && i == 0)
Flags.setDivided();
// if it isn't first piece, alignment must be 1 // if it isn't first piece, alignment must be 1
if (i > 0) else if (i > 0)
Flags.setOrigAlign(1); Flags.setOrigAlign(1);
Ops.push_back(DAG.getArgFlags(Flags)); Ops.push_back(DAG.getArgFlags(Flags));
} }
@ -4285,7 +4288,9 @@ TargetLowering::LowerCallTo(SDOperand Chain, const Type *RetTy,
for (unsigned i = 0; i != NumParts; ++i) { for (unsigned i = 0; i != NumParts; ++i) {
// if it isn't first piece, alignment must be 1 // if it isn't first piece, alignment must be 1
ISD::ArgFlagsTy MyFlags = Flags; ISD::ArgFlagsTy MyFlags = Flags;
if (i != 0) if (NumParts > 1 && i == 0)
MyFlags.setDivided();
else if (i != 0)
MyFlags.setOrigAlign(1); MyFlags.setOrigAlign(1);
Ops.push_back(Parts[i]); Ops.push_back(Parts[i]);

View File

@ -1410,7 +1410,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op,
// //
// In the ELF 32 ABI, GPRs and stack are double word align: an argument // In the ELF 32 ABI, GPRs and stack are double word align: an argument
// represented with two words (long long or double) must be copied to an // represented with two words (long long or double) must be copied to an
// even GPR_idx value or to an even ArgOffset value. TODO: implement this. // even GPR_idx value or to an even ArgOffset value.
SmallVector<SDOperand, 8> MemOps; SmallVector<SDOperand, 8> MemOps;
@ -1423,7 +1423,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op,
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy Flags =
cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags(); cast<ARG_FLAGSSDNode>(Op.getOperand(ArgNo+3))->getArgFlags();
// See if next argument requires stack alignment in ELF // See if next argument requires stack alignment in ELF
bool Expand = false; // TODO: implement this. bool Align = Flags.isDivided();
unsigned CurArgOffset = ArgOffset; unsigned CurArgOffset = ArgOffset;
@ -1435,7 +1435,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op,
ObjSize = Flags.getByValSize(); ObjSize = Flags.getByValSize();
ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize; ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
// Double word align in ELF // Double word align in ELF
if (Expand && isELF32_ABI) GPR_idx += (GPR_idx % 2); if (Align && isELF32_ABI) GPR_idx += (GPR_idx % 2);
// Objects of size 1 and 2 are right justified, everything else is // Objects of size 1 and 2 are right justified, everything else is
// left justified. This means the memory address is adjusted forwards. // left justified. This means the memory address is adjusted forwards.
if (ObjSize==1 || ObjSize==2) { if (ObjSize==1 || ObjSize==2) {
@ -1487,7 +1487,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op,
case MVT::i32: case MVT::i32:
if (!isPPC64) { if (!isPPC64) {
// Double word align in ELF // Double word align in ELF
if (Expand && isELF32_ABI) GPR_idx += (GPR_idx % 2); if (Align && isELF32_ABI) GPR_idx += (GPR_idx % 2);
if (GPR_idx != Num_GPR_Regs) { if (GPR_idx != Num_GPR_Regs) {
unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass); unsigned VReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
@ -1499,7 +1499,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op,
ArgSize = PtrByteSize; ArgSize = PtrByteSize;
} }
// Stack align in ELF // Stack align in ELF
if (needsLoad && Expand && isELF32_ABI) if (needsLoad && Align && isELF32_ABI)
ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
// All int arguments reserve stack space in Macho ABI. // All int arguments reserve stack space in Macho ABI.
if (isMachoABI || needsLoad) ArgOffset += PtrByteSize; if (isMachoABI || needsLoad) ArgOffset += PtrByteSize;
@ -1556,7 +1556,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op,
} }
// Stack align in ELF // Stack align in ELF
if (needsLoad && Expand && isELF32_ABI) if (needsLoad && Align && isELF32_ABI)
ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
// All FP arguments reserve stack space in Macho ABI. // All FP arguments reserve stack space in Macho ABI.
if (isMachoABI || needsLoad) ArgOffset += isPPC64 ? 8 : ObjSize; if (isMachoABI || needsLoad) ArgOffset += isPPC64 ? 8 : ObjSize;
@ -1855,14 +1855,14 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG,
ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy Flags =
cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags(); cast<ARG_FLAGSSDNode>(Op.getOperand(5+2*i+1))->getArgFlags();
// See if next argument requires stack alignment in ELF // See if next argument requires stack alignment in ELF
bool Expand = false; // TODO: implement this. bool Align = Flags.isDivided();
// PtrOff will be used to store the current argument to the stack if a // PtrOff will be used to store the current argument to the stack if a
// register cannot be found for it. // register cannot be found for it.
SDOperand PtrOff; SDOperand PtrOff;
// Stack align in ELF 32 // Stack align in ELF 32
if (isELF32_ABI && Expand) if (isELF32_ABI && Align)
PtrOff = DAG.getConstant(ArgOffset + ((ArgOffset/4) % 2) * PtrByteSize, PtrOff = DAG.getConstant(ArgOffset + ((ArgOffset/4) % 2) * PtrByteSize,
StackPtr.getValueType()); StackPtr.getValueType());
else else
@ -1881,7 +1881,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG,
// FIXME memcpy is used way more than necessary. Correctness first. // FIXME memcpy is used way more than necessary. Correctness first.
if (Flags.isByVal()) { if (Flags.isByVal()) {
unsigned Size = Flags.getByValSize(); unsigned Size = Flags.getByValSize();
if (isELF32_ABI && Expand) GPR_idx += (GPR_idx % 2); if (isELF32_ABI && Align) GPR_idx += (GPR_idx % 2);
if (Size==1 || Size==2) { if (Size==1 || Size==2) {
// Very small objects are passed right-justified. // Very small objects are passed right-justified.
// Everything else is passed left-justified. // Everything else is passed left-justified.
@ -1942,7 +1942,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG,
case MVT::i32: case MVT::i32:
case MVT::i64: case MVT::i64:
// Double word align in ELF // Double word align in ELF
if (isELF32_ABI && Expand) GPR_idx += (GPR_idx % 2); if (isELF32_ABI && Align) GPR_idx += (GPR_idx % 2);
if (GPR_idx != NumGPRs) { if (GPR_idx != NumGPRs) {
RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg)); RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
} else { } else {
@ -1951,7 +1951,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG,
} }
if (inMem || isMachoABI) { if (inMem || isMachoABI) {
// Stack align in ELF // Stack align in ELF
if (isELF32_ABI && Expand) if (isELF32_ABI && Align)
ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
ArgOffset += PtrByteSize; ArgOffset += PtrByteSize;
@ -1999,7 +1999,7 @@ SDOperand PPCTargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG,
} }
if (inMem || isMachoABI) { if (inMem || isMachoABI) {
// Stack align in ELF // Stack align in ELF
if (isELF32_ABI && Expand) if (isELF32_ABI && Align)
ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize; ArgOffset += ((ArgOffset/4) % 2) * PtrByteSize;
if (isPPC64) if (isPPC64)
ArgOffset += 8; ArgOffset += 8;