mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-01-14 15:38:57 +00:00
It's not possible to insert code immediately after an invoke in the
same basic block, and it's not safe to insert code in the successor blocks if the edges are critical edges. Splitting those edges is possible, but undesirable, especially on the unwind side. Instead, make the bottom-up code motion to consider invokes to be part of their successor blocks, rather than part of their parent blocks, so that it doesn't push code past them and onto the edges. This fixes PR12307. llvm-svn: 153343
This commit is contained in:
parent
7da86bf59f
commit
0c02608af1
@ -1679,6 +1679,7 @@ namespace {
|
||||
DenseMap<const BasicBlock *, BBState> &BBStates,
|
||||
BBState &MyStates) const;
|
||||
bool VisitInstructionBottomUp(Instruction *Inst,
|
||||
BasicBlock *BB,
|
||||
MapVector<Value *, RRInfo> &Retains,
|
||||
BBState &MyStates);
|
||||
bool VisitBottomUp(BasicBlock *BB,
|
||||
@ -2523,6 +2524,7 @@ ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
|
||||
|
||||
bool
|
||||
ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
|
||||
BasicBlock *BB,
|
||||
MapVector<Value *, RRInfo> &Retains,
|
||||
BBState &MyStates) {
|
||||
bool NestingDetected = false;
|
||||
@ -2642,14 +2644,24 @@ ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
|
||||
case S_MovableRelease:
|
||||
if (CanUse(Inst, Ptr, PA, Class)) {
|
||||
assert(S.RRI.ReverseInsertPts.empty());
|
||||
S.RRI.ReverseInsertPts.insert(Inst);
|
||||
// If this is an invoke instruction, we're scanning it as part of
|
||||
// one of its successor blocks, since we can't insert code after it
|
||||
// in its own block, and we don't want to split critical edges.
|
||||
if (isa<InvokeInst>(Inst))
|
||||
S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
|
||||
else
|
||||
S.RRI.ReverseInsertPts.insert(next(BasicBlock::iterator(Inst)));
|
||||
S.SetSeq(S_Use);
|
||||
} else if (Seq == S_Release &&
|
||||
(Class == IC_User || Class == IC_CallOrUser)) {
|
||||
// Non-movable releases depend on any possible objc pointer use.
|
||||
S.SetSeq(S_Stop);
|
||||
assert(S.RRI.ReverseInsertPts.empty());
|
||||
S.RRI.ReverseInsertPts.insert(Inst);
|
||||
// As above; handle invoke specially.
|
||||
if (isa<InvokeInst>(Inst))
|
||||
S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
|
||||
else
|
||||
S.RRI.ReverseInsertPts.insert(next(BasicBlock::iterator(Inst)));
|
||||
}
|
||||
break;
|
||||
case S_Stop:
|
||||
@ -2713,7 +2725,23 @@ ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
|
||||
// Visit all the instructions, bottom-up.
|
||||
for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
|
||||
Instruction *Inst = llvm::prior(I);
|
||||
NestingDetected |= VisitInstructionBottomUp(Inst, Retains, MyStates);
|
||||
|
||||
// Invoke instructions are visited as part of their successors (below).
|
||||
if (isa<InvokeInst>(Inst))
|
||||
continue;
|
||||
|
||||
NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
|
||||
}
|
||||
|
||||
// If there's a predecessor with an invoke, visit the invoke as
|
||||
// if it were part of this block, since we can't insert code after
|
||||
// an invoke in its own block, and we don't want to split critical
|
||||
// edges.
|
||||
for (pred_iterator PI(BB), PE(BB, false); PI != PE; ++PI) {
|
||||
BasicBlock *Pred = *PI;
|
||||
TerminatorInst *PredTI = cast<TerminatorInst>(&Pred->back());
|
||||
if (isa<InvokeInst>(PredTI))
|
||||
NestingDetected |= VisitInstructionBottomUp(PredTI, BB, Retains, MyStates);
|
||||
}
|
||||
|
||||
return NestingDetected;
|
||||
@ -3058,35 +3086,17 @@ void ObjCARCOpt::MoveCalls(Value *Arg,
|
||||
for (SmallPtrSet<Instruction *, 2>::const_iterator
|
||||
PI = RetainsToMove.ReverseInsertPts.begin(),
|
||||
PE = RetainsToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
|
||||
Instruction *LastUse = *PI;
|
||||
Instruction *InsertPts[] = { 0, 0, 0 };
|
||||
if (InvokeInst *II = dyn_cast<InvokeInst>(LastUse)) {
|
||||
// We can't insert code immediately after an invoke instruction, so
|
||||
// insert code at the beginning of both successor blocks instead.
|
||||
// The invoke's return value isn't available in the unwind block,
|
||||
// but our releases will never depend on it, because they must be
|
||||
// paired with retains from before the invoke.
|
||||
InsertPts[0] = II->getNormalDest()->getFirstInsertionPt();
|
||||
if (!II->getMetadata(NoObjCARCExceptionsMDKind))
|
||||
InsertPts[1] = II->getUnwindDest()->getFirstInsertionPt();
|
||||
} else {
|
||||
// Insert code immediately after the last use.
|
||||
InsertPts[0] = llvm::next(BasicBlock::iterator(LastUse));
|
||||
}
|
||||
|
||||
for (Instruction **I = InsertPts; *I; ++I) {
|
||||
Instruction *InsertPt = *I;
|
||||
Value *MyArg = ArgTy == ParamTy ? Arg :
|
||||
new BitCastInst(Arg, ParamTy, "", InsertPt);
|
||||
CallInst *Call = CallInst::Create(getReleaseCallee(M), MyArg,
|
||||
"", InsertPt);
|
||||
// Attach a clang.imprecise_release metadata tag, if appropriate.
|
||||
if (MDNode *M = ReleasesToMove.ReleaseMetadata)
|
||||
Call->setMetadata(ImpreciseReleaseMDKind, M);
|
||||
Call->setDoesNotThrow();
|
||||
if (ReleasesToMove.IsTailCallRelease)
|
||||
Call->setTailCall();
|
||||
}
|
||||
Instruction *InsertPt = *PI;
|
||||
Value *MyArg = ArgTy == ParamTy ? Arg :
|
||||
new BitCastInst(Arg, ParamTy, "", InsertPt);
|
||||
CallInst *Call = CallInst::Create(getReleaseCallee(M), MyArg,
|
||||
"", InsertPt);
|
||||
// Attach a clang.imprecise_release metadata tag, if appropriate.
|
||||
if (MDNode *M = ReleasesToMove.ReleaseMetadata)
|
||||
Call->setMetadata(ImpreciseReleaseMDKind, M);
|
||||
Call->setDoesNotThrow();
|
||||
if (ReleasesToMove.IsTailCallRelease)
|
||||
Call->setTailCall();
|
||||
}
|
||||
|
||||
// Delete the original retain and release calls.
|
||||
|
@ -103,6 +103,72 @@ finally.rethrow: ; preds = %invoke.cont, %entry
|
||||
unreachable
|
||||
}
|
||||
|
||||
; Don't try to place code on invoke critical edges.
|
||||
|
||||
; CHECK: define void @test3(
|
||||
; CHECK: if.end:
|
||||
; CHECK-NEXT: call void @objc_release(i8* %p) nounwind
|
||||
; CHECK-NEXT: ret void
|
||||
define void @test3(i8* %p, i1 %b) {
|
||||
entry:
|
||||
%0 = call i8* @objc_retain(i8* %p)
|
||||
call void @callee()
|
||||
br i1 %b, label %if.else, label %if.then
|
||||
|
||||
if.then:
|
||||
invoke void @use_pointer(i8* %p)
|
||||
to label %if.end unwind label %lpad, !clang.arc.no_objc_arc_exceptions !0
|
||||
|
||||
if.else:
|
||||
invoke void @use_pointer(i8* %p)
|
||||
to label %if.end unwind label %lpad, !clang.arc.no_objc_arc_exceptions !0
|
||||
|
||||
lpad:
|
||||
%r = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*)
|
||||
cleanup
|
||||
ret void
|
||||
|
||||
if.end:
|
||||
call void @objc_release(i8* %p)
|
||||
ret void
|
||||
}
|
||||
|
||||
; Like test3, but with ARC-relevant exception handling.
|
||||
|
||||
; CHECK: define void @test4(
|
||||
; CHECK: lpad:
|
||||
; CHECK-NEXT: %r = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*)
|
||||
; CHECK-NEXT: cleanup
|
||||
; CHECK-NEXT: call void @objc_release(i8* %p) nounwind
|
||||
; CHECK-NEXT: ret void
|
||||
; CHECK: if.end:
|
||||
; CHECK-NEXT: call void @objc_release(i8* %p) nounwind
|
||||
; CHECK-NEXT: ret void
|
||||
define void @test4(i8* %p, i1 %b) {
|
||||
entry:
|
||||
%0 = call i8* @objc_retain(i8* %p)
|
||||
call void @callee()
|
||||
br i1 %b, label %if.else, label %if.then
|
||||
|
||||
if.then:
|
||||
invoke void @use_pointer(i8* %p)
|
||||
to label %if.end unwind label %lpad
|
||||
|
||||
if.else:
|
||||
invoke void @use_pointer(i8* %p)
|
||||
to label %if.end unwind label %lpad
|
||||
|
||||
lpad:
|
||||
%r = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__objc_personality_v0 to i8*)
|
||||
cleanup
|
||||
call void @objc_release(i8* %p)
|
||||
ret void
|
||||
|
||||
if.end:
|
||||
call void @objc_release(i8* %p)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare i32 @__gxx_personality_v0(...)
|
||||
declare i32 @__objc_personality_v0(...)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user