Improve codegen for the LLVM offsetof/sizeof "operator". Before we compiled

this LLVM function:

int %foo() {
        ret int cast (int** getelementptr (int** null, int 1) to int)
}

into:

foo:
        mov %EAX, 0
        lea %EAX, DWORD PTR [%EAX + 4]
        ret

now we compile it into:

foo:
        mov %EAX, 4
        ret

This sequence is frequently generated by the MSIL front-end, and soon the malloc lowering pass and
Java front-ends as well..

-Chris

llvm-svn: 14834
This commit is contained in:
Chris Lattner 2004-07-15 00:58:53 +00:00
parent 626552badd
commit 210ffe4b77

View File

@ -3663,6 +3663,21 @@ void ISel::emitGEPOperation(MachineBasicBlock *MBB,
if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(Src))
Src = CPR->getValue();
// If this is a getelementptr null, with all constant integer indices, just
// replace it with TargetReg = 42.
if (isa<ConstantPointerNull>(Src)) {
User::op_iterator I = IdxBegin;
for (; I != IdxEnd; ++I)
if (!isa<ConstantInt>(*I))
break;
if (I == IdxEnd) { // All constant indices
unsigned Offset = TD.getIndexedOffset(Src->getType(),
std::vector<Value*>(IdxBegin, IdxEnd));
BuildMI(*MBB, IP, X86::MOV32ri, 1, TargetReg).addImm(Offset);
return;
}
}
std::vector<Value*> GEPOps;
GEPOps.resize(IdxEnd-IdxBegin+1);
GEPOps[0] = Src;