Introduce a @llvm.experimental.guard intrinsic

Summary:
As discussed on llvm-dev[1].

This change adds the basic boilerplate code around having this intrinsic
in LLVM:

 - Changes in Intrinsics.td, and the IR Verifier
 - A lowering pass to lower @llvm.experimental.guard to normal
   control flow
 - Inliner support

[1]: http://lists.llvm.org/pipermail/llvm-dev/2016-February/095523.html

Reviewers: reames, atrick, chandlerc, rnk, JosephTremoulet, echristo

Subscribers: mcrosier, llvm-commits

Differential Revision: http://reviews.llvm.org/D18527

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@264976 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Sanjoy Das 2016-03-31 00:18:46 +00:00
parent d3f401b2e9
commit de765686f8
12 changed files with 307 additions and 5 deletions

View File

@ -12181,6 +12181,50 @@ ensure that this symbol is defined). The call arguments to
arguments of the specified types, and not as varargs. arguments of the specified types, and not as varargs.
'``llvm.experimental.guard``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
::
declare void @llvm.experimental.guard(i1, ...) [ "deopt"(...) ]
Overview:
"""""""""
This intrinsic, together with :ref:`deoptimization operand bundles
<deopt_opbundles>`, allows frontends to express guards or checks on
optimistic assumptions made during compilation. The semantics of
``@llvm.experimental.guard`` is defined in terms of
``@llvm.experimental.deoptimize`` -- its body is defined to be
equivalent to:
.. code-block:: llvm
define void @llvm.experimental.guard(i1 %pred, <args...>) {
%realPred = and i1 %pred, undef
br i1 %realPred, label %continue, label %leave
leave:
call void @llvm.experimental.deoptimize(<args...>) [ "deopt"() ]
ret void
continue:
ret void
}
In words, ``@llvm.experimental.guard`` executes the attached
``"deopt"`` continuation if (but **not** only if) its first argument
is ``false``. Since the optimizer is allowed to replace the ``undef``
with an arbitrary value, it can optimize guard to fail "spuriously",
i.e. without the original condition being false (hence the "not only
if"); and this allows for "check widening" type optimizations.
``@llvm.experimental.guard`` cannot be invoked.
Stack Map Intrinsics Stack Map Intrinsics
-------------------- --------------------

View File

@ -597,6 +597,10 @@ def int_debugtrap : Intrinsic<[]>,
def int_experimental_deoptimize : Intrinsic<[llvm_any_ty], [llvm_vararg_ty], def int_experimental_deoptimize : Intrinsic<[llvm_any_ty], [llvm_vararg_ty],
[Throws]>; [Throws]>;
// Support for speculative runtime guards
def int_experimental_guard : Intrinsic<[], [llvm_i1_ty, llvm_vararg_ty],
[Throws]>;
// NOP: calls/invokes to this intrinsic are removed by codegen // NOP: calls/invokes to this intrinsic are removed by codegen
def int_donothing : Intrinsic<[], [], [IntrNoMem]>; def int_donothing : Intrinsic<[], [], [IntrNoMem]>;

View File

@ -188,6 +188,7 @@ void initializeLoopIdiomRecognizePass(PassRegistry&);
void initializeLowerAtomicPass(PassRegistry&); void initializeLowerAtomicPass(PassRegistry&);
void initializeLowerBitSetsPass(PassRegistry&); void initializeLowerBitSetsPass(PassRegistry&);
void initializeLowerExpectIntrinsicPass(PassRegistry&); void initializeLowerExpectIntrinsicPass(PassRegistry&);
void initializeLowerGuardIntrinsicPass(PassRegistry&);
void initializeLowerIntrinsicsPass(PassRegistry&); void initializeLowerIntrinsicsPass(PassRegistry&);
void initializeLowerInvokePass(PassRegistry&); void initializeLowerInvokePass(PassRegistry&);
void initializeLowerSwitchPass(PassRegistry&); void initializeLowerSwitchPass(PassRegistry&);

View File

@ -372,6 +372,12 @@ FunctionPass *createSinkingPass();
// //
Pass *createLowerAtomicPass(); Pass *createLowerAtomicPass();
//===----------------------------------------------------------------------===//
//
// LowerGuardIntrinsic - Lower guard intrinsics to normal control flow.
//
Pass *createLowerGuardIntrinsicPass();
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// //
// ValuePropagation - Propagate CFG-derived value information // ValuePropagation - Propagate CFG-derived value information

View File

@ -4106,6 +4106,14 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
break; break;
} }
case Intrinsic::experimental_guard: {
Assert(CS.isCall(), "experimental_guard cannot be invoked", CS);
Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
"experimental_guard must have exactly one "
"\"deopt\" operand bundle");
break;
}
case Intrinsic::experimental_deoptimize: { case Intrinsic::experimental_deoptimize: {
Assert(CS.isCall(), "experimental_deoptimize cannot be invoked", CS); Assert(CS.isCall(), "experimental_deoptimize cannot be invoked", CS);
Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1, Assert(CS.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,

View File

@ -32,6 +32,7 @@ add_llvm_library(LLVMScalarOpts
LoopVersioningLICM.cpp LoopVersioningLICM.cpp
LowerAtomic.cpp LowerAtomic.cpp
LowerExpectIntrinsic.cpp LowerExpectIntrinsic.cpp
LowerGuardIntrinsic.cpp
MemCpyOptimizer.cpp MemCpyOptimizer.cpp
MergedLoadStoreMotion.cpp MergedLoadStoreMotion.cpp
NaryReassociate.cpp NaryReassociate.cpp

View File

@ -0,0 +1,108 @@
//===- LowerGuardIntrinsic.cpp - Lower the guard intrinsic ---------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass lowers the llvm.experimental.guard intrinsic to a conditional call
// to @llvm.experimental.deoptimize. Once this happens, the guard can no longer
// be widened.
//
//===----------------------------------------------------------------------===//
#include "llvm/Transforms/Scalar.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Module.h"
#include "llvm/Pass.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
using namespace llvm;
namespace {
struct LowerGuardIntrinsic : public FunctionPass {
static char ID;
LowerGuardIntrinsic() : FunctionPass(ID) {
initializeLowerGuardIntrinsicPass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
};
}
static void MakeGuardControlFlowExplicit(Function *DeoptIntrinsic,
CallInst *CI) {
OperandBundleDef DeoptOB(*CI->getOperandBundle(LLVMContext::OB_deopt));
SmallVector<Value *, 4> Args(std::next(CI->arg_begin()), CI->arg_end());
auto *CheckBB = CI->getParent();
auto *DeoptBlockTerm =
SplitBlockAndInsertIfThen(CI->getArgOperand(0), CI, true);
auto *CheckBI = cast<BranchInst>(CheckBB->getTerminator());
// SplitBlockAndInsertIfThen inserts control flow that branches to
// DeoptBlockTerm if the condition is true. We want the opposite.
CheckBI->swapSuccessors();
CheckBI->getSuccessor(0)->setName("guarded");
CheckBI->getSuccessor(1)->setName("deopt");
IRBuilder<> B(DeoptBlockTerm);
auto *DeoptCall = B.CreateCall(DeoptIntrinsic, Args, {DeoptOB}, "");
if (DeoptIntrinsic->getReturnType()->isVoidTy()) {
B.CreateRetVoid();
} else {
DeoptCall->setName("deoptcall");
B.CreateRet(DeoptCall);
}
DeoptBlockTerm->eraseFromParent();
}
bool LowerGuardIntrinsic::runOnFunction(Function &F) {
// Check if we can cheaply rule out the possibility of not having any work to
// do.
auto *GuardDecl = F.getParent()->getFunction(
Intrinsic::getName(Intrinsic::experimental_guard));
if (!GuardDecl || GuardDecl->use_empty())
return false;
SmallVector<CallInst *, 8> ToLower;
for (auto &I : instructions(F))
if (auto *CI = dyn_cast<CallInst>(&I))
if (auto *F = CI->getCalledFunction())
if (F->getIntrinsicID() == Intrinsic::experimental_guard)
ToLower.push_back(CI);
if (ToLower.empty())
return false;
auto *DeoptIntrinsic = Intrinsic::getDeclaration(
F.getParent(), Intrinsic::experimental_deoptimize, {F.getReturnType()});
for (auto *CI : ToLower) {
MakeGuardControlFlowExplicit(DeoptIntrinsic, CI);
CI->eraseFromParent();
}
return true;
}
char LowerGuardIntrinsic::ID = 0;
INITIALIZE_PASS(LowerGuardIntrinsic, "lower-guard-intrinsic",
"Lower the guard intrinsic to normal control flow", false,
false)
Pass *llvm::createLowerGuardIntrinsicPass() {
return new LowerGuardIntrinsic();
}

View File

@ -62,6 +62,7 @@ void llvm::initializeScalarOpts(PassRegistry &Registry) {
initializeLoopIdiomRecognizePass(Registry); initializeLoopIdiomRecognizePass(Registry);
initializeLowerAtomicPass(Registry); initializeLowerAtomicPass(Registry);
initializeLowerExpectIntrinsicPass(Registry); initializeLowerExpectIntrinsicPass(Registry);
initializeLowerGuardIntrinsicPass(Registry);
initializeMemCpyOptPass(Registry); initializeMemCpyOptPass(Registry);
initializeMergedLoadStoreMotionPass(Registry); initializeMergedLoadStoreMotionPass(Registry);
initializeNaryReassociatePass(Registry); initializeNaryReassociatePass(Registry);

View File

@ -428,12 +428,14 @@ static BasicBlock *HandleCallsInBlockInlinedThroughInvoke(
continue; continue;
// We do not need to (and in fact, cannot) convert possibly throwing calls // We do not need to (and in fact, cannot) convert possibly throwing calls
// to @llvm.experimental_deoptimize into invokes. The caller's "segment" of // to @llvm.experimental_deoptimize (resp. @llvm.experimental.guard) into
// the deoptimization continuation attached to the newly inlined // invokes. The caller's "segment" of the deoptimization continuation
// @llvm.experimental_deoptimize call should contain the exception handling // attached to the newly inlined @llvm.experimental_deoptimize
// logic, if any. // (resp. @llvm.experimental.guard) call should contain the exception
// handling logic, if any.
if (auto *F = CI->getCalledFunction()) if (auto *F = CI->getCalledFunction())
if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize) if (F->getIntrinsicID() == Intrinsic::experimental_deoptimize ||
F->getIntrinsicID() == Intrinsic::experimental_guard)
continue; continue;
if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) { if (auto FuncletBundle = CI->getOperandBundle(LLVMContext::OB_funclet)) {

View File

@ -0,0 +1,39 @@
; RUN: opt -S -always-inline < %s | FileCheck %s
declare void @llvm.experimental.guard(i1, ...)
define i8 @callee(i1* %c_ptr) alwaysinline {
%c = load volatile i1, i1* %c_ptr
call void(i1, ...) @llvm.experimental.guard(i1 %c, i32 1) [ "deopt"(i32 1) ]
ret i8 5
}
define void @caller_0(i1* %c, i8* %ptr) {
; CHECK-LABEL: @caller_0(
entry:
; CHECK: [[COND:%[^ ]+]] = load volatile i1, i1* %c
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND]], i32 1) [ "deopt"(i32 2, i32 1) ]
; CHECK-NEXT: store i8 5, i8* %ptr
%v = call i8 @callee(i1* %c) [ "deopt"(i32 2) ]
store i8 %v, i8* %ptr
ret void
}
define i32 @caller_1(i1* %c, i8* %ptr) personality i8 3 {
; CHECK-LABEL: @caller_1(
; CHECK: [[COND:%[^ ]+]] = load volatile i1, i1* %c
; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[COND]], i32 1) [ "deopt"(i32 3, i32 1) ]
; CHECK-NEXT: br label %normal
entry:
%v = invoke i8 @callee(i1* %c) [ "deopt"(i32 3) ] to label %normal
unwind label %unwind
unwind:
%lp = landingpad i32 cleanup
ret i32 43
normal:
store i8 %v, i8* %ptr
ret i32 42
}

View File

@ -0,0 +1,62 @@
; RUN: opt -S -lower-guard-intrinsic < %s | FileCheck %s
declare void @llvm.experimental.guard(i1, ...)
define i8 @f_basic(i1* %c_ptr) {
; CHECK-LABEL: @f_basic(
%c = load volatile i1, i1* %c_ptr
call void(i1, ...) @llvm.experimental.guard(i1 %c, i32 1) [ "deopt"(i32 1) ]
ret i8 5
; CHECK: br i1 %c, label %guarded, label %deopt
; CHECK: deopt:
; CHECK-NEXT: %deoptcall = call i8 (...) @llvm.experimental.deoptimize.i8(i32 1) [ "deopt"(i32 1) ]
; CHECK-NEXT: ret i8 %deoptcall
; CHECK: guarded:
; CHECK-NEXT: ret i8 5
}
define void @f_void_return_ty(i1* %c_ptr) {
; CHECK-LABEL: @f_void_return_ty(
%c = load volatile i1, i1* %c_ptr
call void(i1, ...) @llvm.experimental.guard(i1 %c, i32 1) [ "deopt"() ]
ret void
; CHECK: br i1 %c, label %guarded, label %deopt
; CHECK: deopt:
; CHECK-NEXT: call void (...) @llvm.experimental.deoptimize.isVoid(i32 1) [ "deopt"() ]
; CHECK-NEXT: ret void
; CHECK: guarded:
; CHECK-NEXT: ret void
}
define void @f_multiple_args(i1* %c_ptr) {
; CHECK-LABEL: @f_multiple_args(
%c = load volatile i1, i1* %c_ptr
call void(i1, ...) @llvm.experimental.guard(i1 %c, i32 1, i32 2, double 500.0) [ "deopt"(i32 2, i32 3) ]
ret void
; CHECK: br i1 %c, label %guarded, label %deopt
; CHECK: deopt:
; CHECK-NEXT: call void (...) @llvm.experimental.deoptimize.isVoid(i32 1, i32 2, double 5.000000e+02) [ "deopt"(i32 2, i32 3) ]
; CHECK-NEXT: ret void
; CHECK: guarded:
; CHECK-NEXT: ret void
}
define i32 @f_zero_args(i1* %c_ptr) {
; CHECK-LABEL: @f_zero_args(
%c = load volatile i1, i1* %c_ptr
call void(i1, ...) @llvm.experimental.guard(i1 %c) [ "deopt"(i32 2, i32 3) ]
ret i32 500
; CHECK: br i1 %c, label %guarded, label %deopt
; CHECK: deopt:
; CHECK-NEXT: %deoptcall = call i32 (...) @llvm.experimental.deoptimize.i32() [ "deopt"(i32 2, i32 3) ]
; CHECK-NEXT: ret i32 %deoptcall
; CHECK: guarded:
; CHECK-NEXT: ret i32 500
}

View File

@ -0,0 +1,26 @@
; RUN: not opt -S -verify < %s 2>&1 | FileCheck %s
declare void @llvm.experimental.guard(i1, ...)
declare void @unknown()
define void @f_nodeopt() {
entry:
call void(i1, ...) @llvm.experimental.guard(i1 undef, i32 1, i32 2)
; CHECK: guard must have exactly one "deopt" operand bundle
ret void
}
define void @f_invoke() personality i8 3 {
entry:
invoke void(i1, ...) @llvm.experimental.guard(i1 undef, i32 0, float 0.0) [ "deopt"() ] to label %ok unwind label %not_ok
; CHECK: guard cannot be invoked
ok:
ret void
not_ok:
%0 = landingpad { i8*, i32 }
filter [0 x i8*] zeroinitializer
ret void
}