[Statepoints 3/4] Statepoint infrastructure for garbage collection: SelectionDAGBuilder

This is the third patch in a small series.  It contains the CodeGen support for lowering the gc.statepoint intrinsic sequences (223078) to the STATEPOINT pseudo machine instruction (223085).  The change also includes the set of helper routines and classes for working with gc.statepoints, gc.relocates, and gc.results since the lowering code uses them.  

With this change, gc.statepoints should be functionally complete.  The documentation will follow in the fourth change, and there will likely be some cleanup changes, but interested parties can start experimenting now.

I'm not particularly happy with the amount of code or complexity involved with the lowering step, but at least it's fairly well isolated.  The statepoint lowering code is split into it's own files and anyone not working on the statepoint support itself should be able to ignore it.  

During the lowering process, we currently spill aggressively to stack. This is not entirely ideal (and we have plans to do better), but it's functional, relatively straight forward, and matches closely the implementations of the patchpoint intrinsics.  Most of the complexity comes from trying to keep relocated copies of values in the same stack slots across statepoints.  Doing so avoids the insertion of pointless load and store instructions to reshuffle the stack.  The current implementation isn't as effective as I'd like, but it is functional and 'good enough' for many common use cases.  

In the long term, I'd like to figure out how to integrate the statepoint lowering with the register allocator.  In principal, we shouldn't need to eagerly spill at all.  The register allocator should do any spilling required and the statepoint should simply record that fact.  Depending on how challenging that turns out to be, we may invest in a smarter global stack slot assignment mechanism as a stop gap measure.  

Reviewed by: atrick, ributzka





git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@223137 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Philip Reames 2014-12-02 18:50:36 +00:00
parent 495e547ef9
commit d021bb8003
14 changed files with 1345 additions and 0 deletions

View File

@ -88,6 +88,12 @@ public:
/// RegFixups - Registers which need to be replaced after isel is done.
DenseMap<unsigned, unsigned> RegFixups;
/// StatepointStackSlots - A list of temporary stack slots (frame indices)
/// used to spill values at a statepoint. We store them here to enable
/// reuse of the same stack slots across different statepoints in different
/// basic blocks.
SmallVector<unsigned, 50> StatepointStackSlots;
/// MBB - The current block.
MachineBasicBlock *MBB;

View File

@ -0,0 +1,208 @@
//===-- llvm/IR/Statepoint.h - gc.statepoint utilities ------ --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains utility functions and a wrapper class analogous to
// CallSite for accessing the fields of gc.statepoint, gc.relocate, and
// gc.result intrinsics
//
//===----------------------------------------------------------------------===//
#ifndef __LLVM_IR_STATEPOINT_H
#define __LLVM_IR_STATEPOINT_H
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/CallSite.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
bool isStatepoint(const ImmutableCallSite &CS);
bool isStatepoint(const Instruction *inst);
bool isStatepoint(const Instruction &inst);
bool isGCRelocate(const Instruction *inst);
bool isGCRelocate(const ImmutableCallSite &CS);
bool isGCResult(const Instruction *inst);
bool isGCResult(const ImmutableCallSite &CS);
/// Analogous to CallSiteBase, this provides most of the actual
/// functionality for Statepoint and ImmutableStatepoint. It is
/// templatized to allow easily specializing of const and non-const
/// concrete subtypes. This is structured analogous to CallSite
/// rather than the IntrinsicInst.h helpers since we want to support
/// invokable statepoints in the near future.
/// TODO: This does not currently allow the if(Statepoint S = ...)
/// idiom used with CallSites. Consider refactoring to support.
template <typename InstructionTy, typename ValueTy, typename CallSiteTy>
class StatepointBase {
CallSiteTy StatepointCS;
void *operator new(size_t, unsigned) LLVM_DELETED_FUNCTION;
void *operator new(size_t s) LLVM_DELETED_FUNCTION;
protected:
explicit StatepointBase(InstructionTy *I) : StatepointCS(I) {
assert(isStatepoint(I));
}
explicit StatepointBase(CallSiteTy CS) : StatepointCS(CS) {
assert(isStatepoint(CS));
}
public:
typedef typename CallSiteTy::arg_iterator arg_iterator;
/// Return the underlying CallSite.
CallSiteTy getCallSite() {
return StatepointCS;
}
/// Return the value actually being called or invoked.
ValueTy *actualCallee() {
return StatepointCS.getArgument(0);
}
/// Number of arguments to be passed to the actual callee.
int numCallArgs() {
return cast<ConstantInt>(StatepointCS.getArgument(1))->getZExtValue();
}
/// Number of additional arguments excluding those intended
/// for garbage collection.
int numTotalVMSArgs() {
return cast<ConstantInt>(StatepointCS.getArgument(3 + numCallArgs()))->getZExtValue();
}
typename CallSiteTy::arg_iterator call_args_begin() {
// 3 = callTarget, #callArgs, flag
int Offset = 3;
assert(Offset <= (int)StatepointCS.arg_size());
return StatepointCS.arg_begin() + Offset;
}
typename CallSiteTy::arg_iterator call_args_end() {
int Offset = 3 + numCallArgs();
assert(Offset <= (int)StatepointCS.arg_size());
return StatepointCS.arg_begin() + Offset;
}
/// range adapter for call arguments
iterator_range<arg_iterator> call_args() {
return iterator_range<arg_iterator>(call_args_begin(), call_args_end());
}
typename CallSiteTy::arg_iterator vm_state_begin() {
return call_args_end();
}
typename CallSiteTy::arg_iterator vm_state_end() {
int Offset = 3 + numCallArgs() + 1 + numTotalVMSArgs();
assert(Offset <= (int)StatepointCS.arg_size());
return StatepointCS.arg_begin() + Offset;
}
/// range adapter for vm state arguments
iterator_range<arg_iterator> vm_state_args() {
return iterator_range<arg_iterator>(vm_state_begin(), vm_state_end());
}
typename CallSiteTy::arg_iterator first_vm_state_stack_begin() {
// 6 = numTotalVMSArgs, 1st_objectID, 1st_bci,
// 1st_#stack, 1st_#local, 1st_#monitor
return vm_state_begin() + 6;
}
typename CallSiteTy::arg_iterator gc_args_begin() {
return vm_state_end();
}
typename CallSiteTy::arg_iterator gc_args_end() {
return StatepointCS.arg_end();
}
/// range adapter for gc arguments
iterator_range<arg_iterator> gc_args() {
return iterator_range<arg_iterator>(gc_args_begin(), gc_args_end());
}
#ifndef NDEBUG
void verify() {
// The internal asserts in the iterator accessors do the rest.
(void)call_args_begin();
(void)call_args_end();
(void)vm_state_begin();
(void)vm_state_end();
(void)gc_args_begin();
(void)gc_args_end();
}
#endif
};
/// A specialization of it's base class for read only access
/// to a gc.statepoint.
class ImmutableStatepoint
: public StatepointBase<const Instruction, const Value,
ImmutableCallSite> {
typedef StatepointBase<const Instruction, const Value, ImmutableCallSite>
Base;
public:
explicit ImmutableStatepoint(const Instruction *I) : Base(I) {}
explicit ImmutableStatepoint(ImmutableCallSite CS) : Base(CS) {}
};
/// A specialization of it's base class for read-write access
/// to a gc.statepoint.
class Statepoint : public StatepointBase<Instruction, Value, CallSite> {
typedef StatepointBase<Instruction, Value, CallSite> Base;
public:
explicit Statepoint(Instruction *I) : Base(I) {}
explicit Statepoint(CallSite CS) : Base(CS) {}
};
/// Wraps a call to a gc.relocate and provides access to it's operands.
/// TODO: This should likely be refactored to resememble the wrappers in
/// InstrinsicInst.h.
class GCRelocateOperands {
ImmutableCallSite RelocateCS;
public:
GCRelocateOperands(const User* U)
: GCRelocateOperands(cast<Instruction>(U)) {}
GCRelocateOperands(const Instruction *inst) : RelocateCS(inst) {
assert(isGCRelocate(inst));
}
GCRelocateOperands(CallSite CS) : RelocateCS(CS) {
assert(isGCRelocate(CS));
}
/// The statepoint with which this gc.relocate is associated.
const Instruction *statepoint() {
return cast<Instruction>(RelocateCS.getArgument(0));
}
/// The index into the associate statepoint's argument list
/// which contains the base pointer of the pointer whose
/// relocation this gc.relocate describes.
int basePtrIndex() {
return cast<ConstantInt>(RelocateCS.getArgument(1))->getZExtValue();
}
/// The index into the associate statepoint's argument list which
/// contains the pointer whose relocation this gc.relocate describes.
int derivedPtrIndex() {
return cast<ConstantInt>(RelocateCS.getArgument(2))->getZExtValue();
}
const Value *basePtr() {
ImmutableCallSite CS(statepoint());
return *(CS.arg_begin() + basePtrIndex());
}
const Value *derivedPtr() {
ImmutableCallSite CS(statepoint());
return *(CS.arg_begin() + derivedPtrIndex());
}
};
}
#endif

View File

@ -19,6 +19,7 @@ add_llvm_library(LLVMSelectionDAG
SelectionDAGDumper.cpp
SelectionDAGISel.cpp
SelectionDAGPrinter.cpp
StatepointLowering.cpp
ScheduleDAGVLIW.cpp
TargetLowering.cpp
TargetSelectionDAGInfo.cpp

View File

@ -273,6 +273,7 @@ void FunctionLoweringInfo::clear() {
ArgDbgValues.clear();
ByValArgFrameIndexMap.clear();
RegFixups.clear();
StatepointStackSlots.clear();
PreferredExtendType.clear();
}

View File

@ -16,6 +16,7 @@
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/ConstantFolding.h"
@ -46,6 +47,7 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Statepoint.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@ -884,6 +886,7 @@ void SelectionDAGBuilder::clear() {
CurInst = nullptr;
HasTailCall = false;
SDNodeOrder = LowestSDNodeOrder;
StatepointLowering.clear();
}
/// clearDanglingDebugInfo - Clear the dangling debug information
@ -5460,6 +5463,20 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
visitPatchpoint(&I);
return nullptr;
}
case Intrinsic::experimental_gc_statepoint: {
visitStatepoint(I);
return nullptr;
}
case Intrinsic::experimental_gc_result_int:
case Intrinsic::experimental_gc_result_float:
case Intrinsic::experimental_gc_result_ptr: {
visitGCResult(I);
return nullptr;
}
case Intrinsic::experimental_gc_relocate: {
visitGCRelocate(I);
return nullptr;
}
}
}

View File

@ -22,6 +22,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetLowering.h"
#include "StatepointLowering.h"
#include <vector>
namespace llvm {
@ -115,6 +116,10 @@ public:
/// get simple disambiguation between loads without worrying about alias
/// analysis.
SmallVector<SDValue, 8> PendingLoads;
/// State used while lowering a statepoint sequence (gc_statepoint,
/// gc_relocate, and gc_result). See StatepointLowering.hpp/cpp for details.
StatepointLoweringState StatepointLowering;
private:
/// PendingExports - CopyToReg nodes that copy values to virtual registers
@ -613,6 +618,13 @@ public:
N = NewN;
}
void removeValue(const Value *V) {
// This is to support hack in lowerCallFromStatepoint
// Should be removed when hack is resolved
if (NodeMap.count(V))
NodeMap.erase(V);
}
void setUnusedArgValue(const Value *V, SDValue NewN) {
SDValue &N = UnusedArgNodeMap[V];
assert(!N.getNode() && "Already set a value for this node!");
@ -785,6 +797,11 @@ private:
void visitPatchpoint(ImmutableCallSite CS,
MachineBasicBlock *LandingPad = nullptr);
// These three are implemented in StatepointLowering.cpp
void visitStatepoint(const CallInst &I);
void visitGCRelocate(const CallInst &I);
void visitGCResult(const CallInst &I);
void visitUserOp1(const Instruction &I) {
llvm_unreachable("UserOp1 should not exist at instruction selection time!");
}

View File

@ -0,0 +1,640 @@
//===-- StatepointLowering.cpp - SDAGBuilder's statepoint code -----------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file includes support code use by SelectionDAGBuilder when lowering a
// statepoint sequence in SelectionDAG IR.
//
//===----------------------------------------------------------------------===//
#include "StatepointLowering.h"
#include "SelectionDAGBuilder.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/StackMaps.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Statepoint.h"
#include "llvm/Target/TargetLowering.h"
#include <algorithm>
using namespace llvm;
#define DEBUG_TYPE "statepoint-lowering"
STATISTIC(NumSlotsAllocatedForStatepoints,
"Number of stack slots allocated for statepoints");
STATISTIC(NumOfStatepoints, "Number of statepoint nodes encountered");
STATISTIC(StatepointMaxSlotsRequired,
"Maximum number of stack slots required for a singe statepoint");
void
StatepointLoweringState::startNewStatepoint(SelectionDAGBuilder &Builder) {
// Consistency check
assert(PendingGCRelocateCalls.empty() &&
"Trying to visit statepoint before finished processing previous one");
Locations.clear();
RelocLocations.clear();
NextSlotToAllocate = 0;
// Need to resize this on each safepoint - we need the two to stay in
// sync and the clear patterns of a SelectionDAGBuilder have no relation
// to FunctionLoweringInfo.
AllocatedStackSlots.resize(Builder.FuncInfo.StatepointStackSlots.size());
for (size_t i = 0; i < AllocatedStackSlots.size(); i++) {
AllocatedStackSlots[i] = false;
}
}
void StatepointLoweringState::clear() {
Locations.clear();
RelocLocations.clear();
AllocatedStackSlots.clear();
assert(PendingGCRelocateCalls.empty() &&
"cleared before statepoint sequence completed");
}
SDValue
StatepointLoweringState::allocateStackSlot(EVT ValueType,
SelectionDAGBuilder &Builder) {
NumSlotsAllocatedForStatepoints++;
// The basic scheme here is to first look for a previously created stack slot
// which is not in use (accounting for the fact arbitrary slots may already
// be reserved), or to create a new stack slot and use it.
// If this doesn't succeed in 40000 iterations, something is seriously wrong
for (int i = 0; i < 40000; i++) {
assert(Builder.FuncInfo.StatepointStackSlots.size() ==
AllocatedStackSlots.size() &&
"broken invariant");
const size_t NumSlots = AllocatedStackSlots.size();
assert(NextSlotToAllocate <= NumSlots && "broken invariant");
if (NextSlotToAllocate >= NumSlots) {
assert(NextSlotToAllocate == NumSlots);
// record stats
if (NumSlots + 1 > StatepointMaxSlotsRequired) {
StatepointMaxSlotsRequired = NumSlots + 1;
}
SDValue SpillSlot = Builder.DAG.CreateStackTemporary(ValueType);
const unsigned FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
Builder.FuncInfo.StatepointStackSlots.push_back(FI);
AllocatedStackSlots.push_back(true);
return SpillSlot;
}
if (!AllocatedStackSlots[NextSlotToAllocate]) {
const int FI = Builder.FuncInfo.StatepointStackSlots[NextSlotToAllocate];
AllocatedStackSlots[NextSlotToAllocate] = true;
return Builder.DAG.getFrameIndex(FI, ValueType);
}
// Note: We deliberately choose to advance this only on the failing path.
// Doing so on the suceeding path involes a bit of complexity that caused a
// minor bug previously. Unless performance shows this matters, please
// keep this code as simple as possible.
NextSlotToAllocate++;
}
llvm_unreachable("infinite loop?");
}
/// Try to find existing copies of the incoming values in stack slots used for
/// statepoint spilling. If we can find a spill slot for the incoming value,
/// mark that slot as allocated, and reuse the same slot for this safepoint.
/// This helps to avoid series of loads and stores that only serve to resuffle
/// values on the stack between calls.
static void reservePreviousStackSlotForValue(SDValue Incoming,
SelectionDAGBuilder &Builder) {
if (isa<ConstantSDNode>(Incoming) || isa<FrameIndexSDNode>(Incoming)) {
// We won't need to spill this, so no need to check for previously
// allocated stack slots
return;
}
SDValue Loc = Builder.StatepointLowering.getLocation(Incoming);
if (Loc.getNode()) {
// duplicates in input
return;
}
// Search back for the load from a stack slot pattern to find the original
// slot we allocated for this value. We could extend this to deal with
// simple modification patterns, but simple dealing with trivial load/store
// sequences helps a lot already.
if (LoadSDNode *Load = dyn_cast<LoadSDNode>(Incoming)) {
if (auto *FI = dyn_cast<FrameIndexSDNode>(Load->getBasePtr())) {
const int Index = FI->getIndex();
auto Itr = std::find(Builder.FuncInfo.StatepointStackSlots.begin(),
Builder.FuncInfo.StatepointStackSlots.end(), Index);
if (Itr == Builder.FuncInfo.StatepointStackSlots.end()) {
// not one of the lowering stack slots, can't reuse!
// TODO: Actually, we probably could reuse the stack slot if the value
// hasn't changed at all, but we'd need to look for intervening writes
return;
} else {
// This is one of our dedicated lowering slots
const int Offset =
std::distance(Builder.FuncInfo.StatepointStackSlots.begin(), Itr);
if (Builder.StatepointLowering.isStackSlotAllocated(Offset)) {
// stack slot already assigned to someone else, can't use it!
// TODO: currently we reserve space for gc arguments after doing
// normal allocation for deopt arguments. We should reserve for
// _all_ deopt and gc arguments, then start allocating. This
// will prevent some moves being inserted when vm state changes,
// but gc state doesn't between two calls.
return;
}
// Reserve this stack slot
Builder.StatepointLowering.reserveStackSlot(Offset);
}
// Cache this slot so we find it when going through the normal
// assignment loop.
SDValue Loc =
Builder.DAG.getTargetFrameIndex(Index, Incoming.getValueType());
Builder.StatepointLowering.setLocation(Incoming, Loc);
}
}
// TODO: handle case where a reloaded value flows through a phi to
// another safepoint. e.g.
// bb1:
// a' = relocated...
// bb2: % pred: bb1, bb3, bb4, etc.
// a_phi = phi(a', ...)
// statepoint ... a_phi
// NOTE: This will require reasoning about cross basic block values. This is
// decidedly non trivial and this might not be the right place to do it. We
// don't really have the information we need here...
// TODO: handle simple updates. If a value is modified and the original
// value is no longer live, it would be nice to put the modified value in the
// same slot. This allows folding of the memory accesses for some
// instructions types (like an increment).
// statepoint (i)
// i1 = i+1
// statepoint (i1)
}
/// Remove any duplicate (as SDValues) from the derived pointer pairs. This
/// is not required for correctness. It's purpose is to reduce the size of
/// StackMap section. It has no effect on the number of spill slots required
/// or the actual lowering.
static void removeDuplicatesGCPtrs(SmallVectorImpl<const Value *> &Bases,
SmallVectorImpl<const Value *> &Ptrs,
SmallVectorImpl<const Value *> &Relocs,
SelectionDAGBuilder &Builder) {
// This is horribly ineffecient, but I don't care right now
SmallSet<SDValue, 64> Seen;
SmallVector<const Value *, 64> NewBases, NewPtrs, NewRelocs;
for (size_t i = 0; i < Ptrs.size(); i++) {
SDValue SD = Builder.getValue(Ptrs[i]);
// Only add non-duplicates
if (Seen.count(SD) == 0) {
NewBases.push_back(Bases[i]);
NewPtrs.push_back(Ptrs[i]);
NewRelocs.push_back(Relocs[i]);
}
Seen.insert(SD);
}
assert(Bases.size() >= NewBases.size());
assert(Ptrs.size() >= NewPtrs.size());
assert(Relocs.size() >= NewRelocs.size());
Bases = NewBases;
Ptrs = NewPtrs;
Relocs = NewRelocs;
assert(Ptrs.size() == Bases.size());
assert(Ptrs.size() == Relocs.size());
}
/// Extract call from statepoint, lower it and return pointer to the
/// call node. Also update NodeMap so that getValue(statepoint) will
/// reference lowered call result
static SDNode *lowerCallFromStatepoint(const CallInst &CI,
SelectionDAGBuilder &Builder) {
assert(Intrinsic::experimental_gc_statepoint ==
dyn_cast<IntrinsicInst>(&CI)->getIntrinsicID() &&
"function called must be the statepoint function");
int NumCallArgs = dyn_cast<ConstantInt>(CI.getArgOperand(1))->getZExtValue();
assert(NumCallArgs >= 0 && "non-negative");
ImmutableStatepoint StatepointOperands(&CI);
// Lower the actual call itself - This is a bit of a hack, but we want to
// avoid modifying the actual lowering code. This is similiar in intent to
// the LowerCallOperands mechanism used by PATCHPOINT, but is structured
// differently. Hopefully, this is slightly more robust w.r.t. calling
// convention, return values, and other function attributes.
Value *ActualCallee = const_cast<Value *>(StatepointOperands.actualCallee());
#ifndef NDEBUG
StatepointOperands.verify();
#endif
std::vector<Value *> Args;
CallInst::const_op_iterator arg_begin = StatepointOperands.call_args_begin();
CallInst::const_op_iterator arg_end = StatepointOperands.call_args_end();
Args.insert(Args.end(), arg_begin, arg_end);
// TODO: remove the creation of a new instruction! We should not be
// modifying the IR (even temporarily) at this point.
CallInst *Tmp = CallInst::Create(ActualCallee, Args);
Tmp->setTailCall(CI.isTailCall());
Tmp->setCallingConv(CI.getCallingConv());
Tmp->setAttributes(CI.getAttributes());
Builder.LowerCallTo(Tmp, Builder.getValue(ActualCallee), false);
// Handle the return value of the call iff any.
const bool HasDef = !Tmp->getType()->isVoidTy();
if (HasDef) {
// The value of the statepoint itself will be the value of call itself.
// We'll replace the actually call node shortly. gc_result will grab
// this value.
Builder.setValue(&CI, Builder.getValue(Tmp));
} else {
// The token value is never used from here on, just generate a poison value
Builder.setValue(&CI, Builder.DAG.getIntPtrConstant(-1));
}
// Remove the fake entry we created so we don't have a hanging reference
// after we delete this node.
Builder.removeValue(Tmp);
delete Tmp;
Tmp = nullptr;
// Search for the call node
// The following code is essentially reverse engineering X86's
// LowerCallTo.
SDNode *CallNode = nullptr;
// We just emitted a call, so it should be last thing generated
SDValue Chain = Builder.DAG.getRoot();
// Find closest CALLSEQ_END walking back through lowered nodes if needed
SDNode *CallEnd = Chain.getNode();
int Sanity = 0;
while (CallEnd->getOpcode() != ISD::CALLSEQ_END) {
CallEnd = CallEnd->getGluedNode();
assert(CallEnd && "Can not find call node");
assert(Sanity < 20 && "should have found call end already");
Sanity++;
}
assert(CallEnd->getOpcode() == ISD::CALLSEQ_END &&
"Expected a callseq node.");
assert(CallEnd->getGluedNode());
// Step back inside the CALLSEQ
CallNode = CallEnd->getGluedNode();
return CallNode;
}
/// Callect all gc pointers coming into statepoint intrinsic, clean them up,
/// and return two arrays:
/// Bases - base pointers incoming to this statepoint
/// Ptrs - derived pointers incoming to this statepoint
/// Relocs - the gc_relocate corresponding to each base/ptr pair
/// Elements of this arrays should be in one-to-one correspondence with each
/// other i.e Bases[i], Ptrs[i] are from the same gcrelocate call
static void
getIncomingStatepointGCValues(SmallVectorImpl<const Value *> &Bases,
SmallVectorImpl<const Value *> &Ptrs,
SmallVectorImpl<const Value *> &Relocs,
ImmutableCallSite Statepoint,
SelectionDAGBuilder &Builder) {
// Search for relocated pointers. Note that working backwards from the
// gc_relocates ensures that we only get pairs which are actually relocated
// and used after the statepoint.
// TODO: This logic should probably become a utility function in Statepoint.h
for (const User *U : cast<CallInst>(Statepoint.getInstruction())->users()) {
if (!isGCRelocate(U)) {
continue;
}
GCRelocateOperands relocateOpers(U);
Relocs.push_back(cast<Value>(U));
Bases.push_back(relocateOpers.basePtr());
Ptrs.push_back(relocateOpers.derivedPtr());
}
// Remove any redundant llvm::Values which map to the same SDValue as another
// input. Also has the effect of removing duplicates in the original
// llvm::Value input list as well. This is a useful optimization for
// reducing the size of the StackMap section. It has no other impact.
removeDuplicatesGCPtrs(Bases, Ptrs, Relocs, Builder);
assert(Bases.size() == Ptrs.size() && Ptrs.size() == Relocs.size());
}
/// Spill a value incoming to the statepoint. It might be either part of
/// vmstate
/// or gcstate. In both cases unconditionally spill it on the stack unless it
/// is a null constant. Return pair with first element being frame index
/// containing saved value and second element with outgoing chain from the
/// emitted store
static std::pair<SDValue, SDValue>
spillIncomingStatepointValue(SDValue Incoming, SDValue Chain,
SelectionDAGBuilder &Builder) {
SDValue Loc = Builder.StatepointLowering.getLocation(Incoming);
// Emit new store if we didn't do it for this ptr before
if (!Loc.getNode()) {
Loc = Builder.StatepointLowering.allocateStackSlot(Incoming.getValueType(),
Builder);
assert(isa<FrameIndexSDNode>(Loc));
int Index = cast<FrameIndexSDNode>(Loc)->getIndex();
// We use TargetFrameIndex so that isel will not select it into LEA
Loc = Builder.DAG.getTargetFrameIndex(Index, Incoming.getValueType());
// TODO: We can create TokenFactor node instead of
// chaining stores one after another, this may allow
// a bit more optimal scheduling for them
Chain = Builder.DAG.getStore(Chain, Builder.getCurSDLoc(), Incoming, Loc,
MachinePointerInfo::getFixedStack(Index),
false, false, 0);
Builder.StatepointLowering.setLocation(Incoming, Loc);
}
assert(Loc.getNode());
return std::make_pair(Loc, Chain);
}
/// Lower a single value incoming to a statepoint node. This value can be
/// either a deopt value or a gc value, the handling is the same. We special
/// case constants and allocas, then fall back to spilling if required.
static void lowerIncomingStatepointValue(SDValue Incoming,
SmallVectorImpl<SDValue> &Ops,
SelectionDAGBuilder &Builder) {
SDValue Chain = Builder.getRoot();
if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Incoming)) {
// If the original value was a constant, make sure it gets recorded as
// such in the stackmap. This is required so that the consumer can
// parse any internal format to the deopt state. It also handles null
// pointers and other constant pointers in GC states
Ops.push_back(
Builder.DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
Ops.push_back(Builder.DAG.getTargetConstant(C->getSExtValue(), MVT::i64));
} else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Incoming)) {
// This handles allocas as arguments to the statepoint
const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo();
Ops.push_back(
Builder.DAG.getTargetFrameIndex(FI->getIndex(), TLI.getPointerTy()));
} else {
// Otherwise, locate a spill slot and explicitly spill it so it
// can be found by the runtime later. We currently do not support
// tracking values through callee saved registers to their eventual
// spill location. This would be a useful optimization, but would
// need to be optional since it requires a lot of complexity on the
// runtime side which not all would support.
std::pair<SDValue, SDValue> Res =
spillIncomingStatepointValue(Incoming, Chain, Builder);
Ops.push_back(Res.first);
Chain = Res.second;
}
Builder.DAG.setRoot(Chain);
}
/// Lower deopt state and gc pointer arguments of the statepoint. The actual
/// lowering is described in lowerIncomingStatepointValue. This function is
/// responsible for lowering everything in the right position and playing some
/// tricks to avoid redundant stack manipulation where possible. On
/// completion, 'Ops' will contain ready to use operands for machine code
/// statepoint. The chain nodes will have already been created and the DAG root
/// will be set to the last value spilled (if any were).
static void lowerStatepointMetaArgs(SmallVectorImpl<SDValue> &Ops,
ImmutableStatepoint Statepoint,
SelectionDAGBuilder &Builder) {
// Lower the deopt and gc arguments for this statepoint. Layout will
// be: deopt argument length, deopt arguments.., gc arguments...
SmallVector<const Value *, 64> Bases, Ptrs, Relocations;
getIncomingStatepointGCValues(Bases, Ptrs, Relocations,
Statepoint.getCallSite(), Builder);
// Before we actually start lowering (and allocating spill slots for values),
// reserve any stack slots which we judge to be profitable to reuse for a
// particular value. This is purely an optimization over the code below and
// doesn't change semantics at all. It is important for performance that we
// reserve slots for both deopt and gc values before lowering either.
for (auto I = Statepoint.vm_state_begin() + 1, E = Statepoint.vm_state_end();
I != E; ++I) {
Value *V = *I;
SDValue Incoming = Builder.getValue(V);
reservePreviousStackSlotForValue(Incoming, Builder);
}
for (unsigned i = 0; i < Bases.size() * 2; ++i) {
// Even elements will contain base, odd elements - derived ptr
const Value *V = i % 2 ? Bases[i / 2] : Ptrs[i / 2];
SDValue Incoming = Builder.getValue(V);
reservePreviousStackSlotForValue(Incoming, Builder);
}
// First, prefix the list with the number of unique values to be
// lowered. Note that this is the number of *Values* not the
// number of SDValues required to lower them.
const int NumVMSArgs = Statepoint.numTotalVMSArgs();
Ops.push_back(
Builder.DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
Ops.push_back(Builder.DAG.getTargetConstant(NumVMSArgs, MVT::i64));
assert(NumVMSArgs + 1 == std::distance(Statepoint.vm_state_begin(),
Statepoint.vm_state_end()));
// The vm state arguments are lowered in an opaque manner. We do
// not know what type of values are contained within. We skip the
// first one since that happens to be the total number we lowered
// explicitly just above. We could have left it in the loop and
// not done it explicitly, but it's far easier to understand this
// way.
for (auto I = Statepoint.vm_state_begin() + 1, E = Statepoint.vm_state_end();
I != E; ++I) {
const Value *V = *I;
SDValue Incoming = Builder.getValue(V);
lowerIncomingStatepointValue(Incoming, Ops, Builder);
}
// Finally, go ahead and lower all the gc arguments. There's no prefixed
// length for this one. After lowering, we'll have the base and pointer
// arrays interwoven with each (lowered) base pointer immediately followed by
// it's (lowered) derived pointer. i.e
// (base[0], ptr[0], base[1], ptr[1], ...)
for (unsigned i = 0; i < Bases.size() * 2; ++i) {
// Even elements will contain base, odd elements - derived ptr
const Value *V = i % 2 ? Bases[i / 2] : Ptrs[i / 2];
SDValue Incoming = Builder.getValue(V);
lowerIncomingStatepointValue(Incoming, Ops, Builder);
}
}
void SelectionDAGBuilder::visitStatepoint(const CallInst &CI) {
// The basic scheme here is that information about both the original call and
// the safepoint is encoded in the CallInst. We create a temporary call and
// lower it, then reverse engineer the calling sequence.
// Check some preconditions for sanity
assert(isStatepoint(&CI) &&
"function called must be the statepoint function");
NumOfStatepoints++;
// Clear state
StatepointLowering.startNewStatepoint(*this);
#ifndef NDEBUG
// Consistency check
for (const User *U : CI.users()) {
const CallInst *Call = cast<CallInst>(U);
if (isGCRelocate(Call))
StatepointLowering.scheduleRelocCall(*Call);
}
#endif
ImmutableStatepoint ISP(&CI);
// Lower statepoint vmstate and gcstate arguments
SmallVector<SDValue, 10> LoweredArgs;
lowerStatepointMetaArgs(LoweredArgs, ISP, *this);
// Get call node, we will replace it later with statepoint
SDNode *CallNode = lowerCallFromStatepoint(CI, *this);
// Construct the actual STATEPOINT node with all the appropriate arguments
// and return values.
// TODO: Currently, all of these operands are being marked as read/write in
// PrologEpilougeInserter.cpp, we should special case the VMState arguments
// and flags to be read-only.
SmallVector<SDValue, 40> Ops;
// Calculate and push starting position of vmstate arguments
// Call Node: Chain, Target, {Args}, RegMask, [Glue]
SDValue Glue;
if (CallNode->getGluedNode()) {
// Glue is always last operand
Glue = CallNode->getOperand(CallNode->getNumOperands() - 1);
}
// Get number of arguments incoming directly into call node
unsigned NumCallRegArgs =
CallNode->getNumOperands() - (Glue.getNode() ? 4 : 3);
Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, MVT::i32));
// Add call target
SDValue CallTarget = SDValue(CallNode->getOperand(1).getNode(), 0);
Ops.push_back(CallTarget);
// Add call arguments
// Get position of register mask in the call
SDNode::op_iterator RegMaskIt;
if (Glue.getNode())
RegMaskIt = CallNode->op_end() - 2;
else
RegMaskIt = CallNode->op_end() - 1;
Ops.insert(Ops.end(), CallNode->op_begin() + 2, RegMaskIt);
// Add a leading constant argument with the Flags and the calling convention
// masked together
CallingConv::ID CallConv = CI.getCallingConv();
int Flags = dyn_cast<ConstantInt>(CI.getArgOperand(2))->getZExtValue();
assert(Flags == 0 && "not expected to be used");
Ops.push_back(DAG.getTargetConstant(StackMaps::ConstantOp, MVT::i64));
Ops.push_back(
DAG.getTargetConstant(Flags | ((unsigned)CallConv << 1), MVT::i64));
// Insert all vmstate and gcstate arguments
Ops.insert(Ops.end(), LoweredArgs.begin(), LoweredArgs.end());
// Add register mask from call node
Ops.push_back(*RegMaskIt);
// Add chain
Ops.push_back(CallNode->getOperand(0));
// Same for the glue, but we add it only if original call had it
if (Glue.getNode())
Ops.push_back(Glue);
// Compute return values
SmallVector<EVT, 21> ValueVTs;
ValueVTs.push_back(MVT::Other);
ValueVTs.push_back(MVT::Glue); // provide a glue output since we consume one
// as input. This allows someone else to chain
// off us as needed.
SDVTList NodeTys = DAG.getVTList(ValueVTs);
SDNode *StatepointMCNode = DAG.getMachineNode(TargetOpcode::STATEPOINT,
getCurSDLoc(), NodeTys, Ops);
// Replace original call
DAG.ReplaceAllUsesWith(CallNode, StatepointMCNode); // This may update Root
// Remove originall call node
DAG.DeleteNode(CallNode);
// DON'T set the root - under the assumption that it's already set past the
// inserted node we created.
// TODO: A better future implementation would be to emit a single variable
// argument, variable return value STATEPOINT node here and then hookup the
// return value of each gc.relocate to the respective output of the
// previously emitted STATEPOINT value. Unfortunately, this doesn't appear
// to actually be possible today.
}
void SelectionDAGBuilder::visitGCResult(const CallInst &CI) {
// The result value of the gc_result is simply the result of the actual
// call. We've already emitted this, so just grab the value.
Instruction *I = cast<Instruction>(CI.getArgOperand(0));
assert(isStatepoint(I) &&
"first argument must be a statepoint token");
setValue(&CI, getValue(I));
}
void SelectionDAGBuilder::visitGCRelocate(const CallInst &CI) {
#ifndef NDEBUG
// Consistency check
StatepointLowering.relocCallVisited(CI);
#endif
GCRelocateOperands relocateOpers(&CI);
SDValue SD = getValue(relocateOpers.derivedPtr());
if (isa<ConstantSDNode>(SD) || isa<FrameIndexSDNode>(SD)) {
// We didn't need to spill these special cases (constants and allocas).
// See the handling in spillIncomingValueForStatepoint for detail.
setValue(&CI, SD);
return;
}
SDValue Loc = StatepointLowering.getRelocLocation(SD);
// Emit new load if we did not emit it before
if (!Loc.getNode()) {
SDValue SpillSlot = StatepointLowering.getLocation(SD);
int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
// Be conservative: flush all pending loads
// TODO: Probably we can be less restrictive on this,
// it may allow more scheduling opprtunities
SDValue Chain = getRoot();
Loc = DAG.getLoad(SpillSlot.getValueType(), getCurSDLoc(), Chain,
SpillSlot, MachinePointerInfo::getFixedStack(FI), false,
false, false, 0);
StatepointLowering.setRelocLocation(SD, Loc);
// Again, be conservative, don't emit pending loads
DAG.setRoot(Loc.getValue(1));
}
assert(Loc.getNode());
setValue(&CI, Loc);
}

View File

@ -0,0 +1,138 @@
//===-- StatepointLowering.h - SDAGBuilder's statepoint code -*- C++ -*---===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file includes support code use by SelectionDAGBuilder when lowering a
// statepoint sequence in SelectionDAG IR.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_STATEPOINTLOWERING_H
#define LLVM_LIB_CODEGEN_SELECTIONDAG_STATEPOINTLOWERING_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include <vector>
namespace llvm {
class SelectionDAGBuilder;
/// This class tracks both per-statepoint and per-selectiondag information.
/// For each statepoint it tracks locations of it's gc valuess (incoming and
/// relocated) and list of gcreloc calls scheduled for visiting (this is
/// used for a debug mode consistency check only). The spill slot tracking
/// works in concert with information in FunctionLoweringInfo.
class StatepointLoweringState {
public:
StatepointLoweringState() : NextSlotToAllocate(0) {
}
/// Reset all state tracking for a newly encountered safepoint. Also
/// performs some consistency checking.
void startNewStatepoint(SelectionDAGBuilder &Builder);
/// Clear the memory usage of this object. This is called from
/// SelectionDAGBuilder::clear. We require this is never called in the
/// midst of processing a statepoint sequence.
void clear();
/// Returns the spill location of a value incoming to the current
/// statepoint. Will return SDValue() if this value hasn't been
/// spilled. Otherwise, the value has already been spilled and no
/// further action is required by the caller.
SDValue getLocation(SDValue val) {
if (!Locations.count(val))
return SDValue();
return Locations[val];
}
void setLocation(SDValue val, SDValue Location) {
assert(!Locations.count(val) &&
"Trying to allocate already allocated location");
Locations[val] = Location;
}
/// Returns the relocated value for a given input pointer. Will
/// return SDValue() if this value hasn't yet been reloaded from
/// it's stack slot after the statepoint. Otherwise, the value
/// has already been reloaded and the SDValue of that reload will
/// be returned. Note that VMState values are spilled but not
/// reloaded (since they don't change at the safepoint unless
/// also listed in the GC pointer section) and will thus never
/// be in this map
SDValue getRelocLocation(SDValue val) {
if (!RelocLocations.count(val))
return SDValue();
return RelocLocations[val];
}
void setRelocLocation(SDValue val, SDValue Location) {
assert(!RelocLocations.count(val) &&
"Trying to allocate already allocated location");
RelocLocations[val] = Location;
}
/// Record the fact that we expect to encounter a given gc_relocate
/// before the next statepoint. If we don't see it, we'll report
/// an assertion.
void scheduleRelocCall(const CallInst &RelocCall) {
PendingGCRelocateCalls.push_back(&RelocCall);
}
/// Remove this gc_relocate from the list we're expecting to see
/// before the next statepoint. If we weren't expecting to see
/// it, we'll report an assertion.
void relocCallVisited(const CallInst &RelocCall) {
SmallVectorImpl<const CallInst *>::iterator itr =
std::find(PendingGCRelocateCalls.begin(), PendingGCRelocateCalls.end(),
&RelocCall);
assert(itr != PendingGCRelocateCalls.end() &&
"Visited unexpected gcrelocate call");
PendingGCRelocateCalls.erase(itr);
}
// TODO: Should add consistency tracking to ensure we encounter
// expected gc_result calls too.
/// Get a stack slot we can use to store an value of type ValueType. This
/// will hopefully be a recylced slot from another statepoint.
SDValue allocateStackSlot(EVT ValueType, SelectionDAGBuilder &Builder);
void reserveStackSlot(int Offset) {
assert(Offset >= 0 && Offset < (int)AllocatedStackSlots.size() &&
"out of bounds");
assert(!AllocatedStackSlots[Offset] && "already reserved!");
assert(NextSlotToAllocate <= (unsigned)Offset && "consistency!");
AllocatedStackSlots[Offset] = true;
}
bool isStackSlotAllocated(int Offset) {
assert(Offset >= 0 && Offset < (int)AllocatedStackSlots.size() &&
"out of bounds");
return AllocatedStackSlots[Offset];
}
private:
/// Maps pre-relocation value (gc pointer directly incoming into statepoint)
/// into it's location (currently only stack slots)
DenseMap<SDValue, SDValue> Locations;
/// Map pre-relocated value into it's new relocated location
DenseMap<SDValue, SDValue> RelocLocations;
/// A boolean indicator for each slot listed in the FunctionInfo as to
/// whether it has been used in the current statepoint. Since we try to
/// preserve stack slots across safepoints, there can be gaps in which
/// slots have been allocated.
SmallVector<bool, 50> AllocatedStackSlots;
/// Points just beyond the last slot known to have been allocated
unsigned NextSlotToAllocate;
/// Keep track of pending gcrelocate calls for consistency check
SmallVector<const CallInst *, 10> PendingGCRelocateCalls;
};
} // end namespace llvm
#endif // LLVM_LIB_CODEGEN_SELECTIONDAG_STATEPOINTLOWERING_H

View File

@ -36,6 +36,7 @@ add_llvm_library(LLVMCore
Pass.cpp
PassManager.cpp
PassRegistry.cpp
Statepoint.cpp
Type.cpp
TypeFinder.cpp
Use.cpp

62
lib/IR/Statepoint.cpp Normal file
View File

@ -0,0 +1,62 @@
//===-- IR/Statepoint.cpp -- gc.statepoint utilities --- -----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
//
//===----------------------------------------------------------------------===//
#include "llvm/IR/Function.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/IR/Statepoint.h"
using namespace std;
using namespace llvm;
bool llvm::isStatepoint(const ImmutableCallSite &CS) {
const Function *F = CS.getCalledFunction();
return (F && F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint);
}
bool llvm::isStatepoint(const Instruction *inst) {
if (isa<InvokeInst>(inst) || isa<CallInst>(inst)) {
ImmutableCallSite CS(inst);
return isStatepoint(CS);
}
return false;
}
bool llvm::isStatepoint(const Instruction &inst) {
return isStatepoint(&inst);
}
bool llvm::isGCRelocate(const ImmutableCallSite &CS) {
return isGCRelocate(CS.getInstruction());
}
bool llvm::isGCRelocate(const Instruction *inst) {
if (const CallInst *call = dyn_cast<CallInst>(inst)) {
if (const Function *F = call->getCalledFunction()) {
return F->getIntrinsicID() == Intrinsic::experimental_gc_relocate;
}
}
return false;
}
bool llvm::isGCResult(const ImmutableCallSite &CS) {
return isGCResult(CS.getInstruction());
}
bool llvm::isGCResult(const Instruction *inst) {
if (const CallInst *call = cast<CallInst>(inst)) {
if (Function *F = call->getCalledFunction()) {
return (F->getIntrinsicID() == Intrinsic::experimental_gc_result_int ||
F->getIntrinsicID() == Intrinsic::experimental_gc_result_float ||
F->getIntrinsicID() == Intrinsic::experimental_gc_result_ptr);
}
}
return false;
}

View File

@ -18,6 +18,7 @@
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/Statepoint.h"
#include "llvm/Transforms/Utils/BuildLibCalls.h"
#include "llvm/Transforms/Utils/Local.h"
using namespace llvm;
@ -1165,6 +1166,14 @@ static bool isSafeToEliminateVarargsCast(const CallSite CS,
if (!CI->isLosslessCast())
return false;
// If this is a GC intrinsic, avoid munging types. We need types for
// statepoint reconstruction in SelectionDAG.
// TODO: This is probably something which should be expanded to all
// intrinsics since the entire point of intrinsics is that
// they are understandable by the optimizer.
if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
return false;
// The size of ByVal or InAlloca arguments is derived from the type, so we
// can't change to a type with a different size. If the size were
// passed explicitly we could avoid this check.

View File

@ -0,0 +1,74 @@
; RUN: llc < %s | FileCheck %s
; This file contains a collection of basic tests to ensure we didn't
; screw up normal call lowering when there are no deopt or gc arguments.
target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-linux-gnu"
declare zeroext i1 @return_i1()
declare zeroext i32 @return_i32()
declare i32* @return_i32ptr()
declare float @return_float()
define i1 @test_i1_return() {
; CHECK-LABEL: test_i1_return
; This is just checking that a i1 gets lowered normally when there's no extra
; state arguments to the statepoint
; CHECK: pushq %rax
; CHECK: callq return_i1
; CHECK: popq %rdx
; CHECK: retq
entry:
%safepoint_token = tail call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 0)
%call1 = call zeroext i1 @llvm.experimental.gc.result.int.i1(i32 %safepoint_token)
ret i1 %call1
}
define i32 @test_i32_return() {
; CHECK-LABEL: test_i32_return
; CHECK: pushq %rax
; CHECK: callq return_i32
; CHECK: popq %rdx
; CHECK: retq
entry:
%safepoint_token = tail call i32 (i32 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i32f(i32 ()* @return_i32, i32 0, i32 0, i32 0)
%call1 = call zeroext i32 @llvm.experimental.gc.result.int.i32(i32 %safepoint_token)
ret i32 %call1
}
define i32* @test_i32ptr_return() {
; CHECK-LABEL: test_i32ptr_return
; CHECK: pushq %rax
; CHECK: callq return_i32ptr
; CHECK: popq %rdx
; CHECK: retq
entry:
%safepoint_token = tail call i32 (i32* ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_p0i32f(i32* ()* @return_i32ptr, i32 0, i32 0, i32 0)
%call1 = call i32* @llvm.experimental.gc.result.ptr.p0i32(i32 %safepoint_token)
ret i32* %call1
}
define float @test_float_return() {
; CHECK-LABEL: test_float_return
; CHECK: pushq %rax
; CHECK: callq return_float
; CHECK: popq %rax
; CHECK: retq
entry:
%safepoint_token = tail call i32 (float ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_f32f(float ()* @return_float, i32 0, i32 0, i32 0)
%call1 = call float @llvm.experimental.gc.result.float.f32(i32 %safepoint_token)
ret float %call1
}
declare i32 @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()*, i32, i32, ...)
declare i1 @llvm.experimental.gc.result.int.i1(i32)
declare i32 @llvm.experimental.gc.statepoint.p0f_i32f(i32 ()*, i32, i32, ...)
declare i32 @llvm.experimental.gc.result.int.i32(i32)
declare i32 @llvm.experimental.gc.statepoint.p0f_p0i32f(i32* ()*, i32, i32, ...)
declare i32* @llvm.experimental.gc.result.ptr.p0i32(i32)
declare i32 @llvm.experimental.gc.statepoint.p0f_f32f(float ()*, i32, i32, ...)
declare float @llvm.experimental.gc.result.float.f32(i32)

View File

@ -0,0 +1,60 @@
; RUN: llc < %s | FileCheck %s
target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-linux-gnu"
; This test is checking to make sure that we reuse the same stack slots
; for GC values spilled over two different call sites. Since the order
; of GC arguments differ, niave lowering code would insert loads and
; stores to rearrange items on the stack. We need to make sure (for
; performance) that this doesn't happen.
define i32 @back_to_back_calls(i32* %a, i32* %b, i32* %c) #1 {
; CHECK-LABEL: back_to_back_calls
; The exact stores don't matter, but there need to be three stack slots created
; CHECK: movq %rdx, 16(%rsp)
; CHECK: movq %rdi, 8(%rsp)
; CHECK: movq %rsi, (%rsp)
%safepoint_token = tail call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* undef, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, i32* %a, i32* %b, i32* %c)
%a1 = tail call coldcc i32* @llvm.experimental.gc.relocate.p0i32(i32 %safepoint_token, i32 9, i32 9)
%b1 = tail call coldcc i32* @llvm.experimental.gc.relocate.p0i32(i32 %safepoint_token, i32 9, i32 10)
%c1 = tail call coldcc i32* @llvm.experimental.gc.relocate.p0i32(i32 %safepoint_token, i32 9, i32 11)
; CHECK: callq
; This is the key check. There should NOT be any memory moves here
; CHECK-NOT: movq
%safepoint_token2 = tail call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* undef, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, i32* %c1, i32* %b1, i32* %a1)
%a2 = tail call coldcc i32* @llvm.experimental.gc.relocate.p0i32(i32 %safepoint_token2, i32 9, i32 11)
%b2 = tail call coldcc i32* @llvm.experimental.gc.relocate.p0i32(i32 %safepoint_token2, i32 9, i32 10)
%c2 = tail call coldcc i32* @llvm.experimental.gc.relocate.p0i32(i32 %safepoint_token2, i32 9, i32 9)
; CHECK: callq
ret i32 1
}
; This test simply checks that minor changes in vm state don't prevent slots
; being reused for gc values.
define i32 @reserve_first(i32* %a, i32* %b, i32* %c) #1 {
; CHECK-LABEL: reserve_first
; The exact stores don't matter, but there need to be three stack slots created
; CHECK: movq %rdx, 16(%rsp)
; CHECK: movq %rdi, 8(%rsp)
; CHECK: movq %rsi, (%rsp)
%safepoint_token = tail call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* undef, i32 0, i32 0, i32 5, i32 0, i32 -1, i32 0, i32 0, i32 0, i32* %a, i32* %b, i32* %c)
%a1 = tail call coldcc i32* @llvm.experimental.gc.relocate.p0i32(i32 %safepoint_token, i32 9, i32 9)
%b1 = tail call coldcc i32* @llvm.experimental.gc.relocate.p0i32(i32 %safepoint_token, i32 9, i32 10)
%c1 = tail call coldcc i32* @llvm.experimental.gc.relocate.p0i32(i32 %safepoint_token, i32 9, i32 11)
; CHECK: callq
; This is the key check. There should NOT be any memory moves here
; CHECK-NOT: movq
%safepoint_token2 = tail call i32 (void ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()* undef, i32 0, i32 0, i32 5, i32* %a1, i32 0, i32* %c1, i32 0, i32 0, i32* %c1, i32* %b1, i32* %a1)
%a2 = tail call coldcc i32* @llvm.experimental.gc.relocate.p0i32(i32 %safepoint_token2, i32 9, i32 11)
%b2 = tail call coldcc i32* @llvm.experimental.gc.relocate.p0i32(i32 %safepoint_token2, i32 9, i32 10)
%c2 = tail call coldcc i32* @llvm.experimental.gc.relocate.p0i32(i32 %safepoint_token2, i32 9, i32 9)
; CHECK: callq
ret i32 1
}
; Function Attrs: nounwind
declare i32* @llvm.experimental.gc.relocate.p0i32(i32, i32, i32) #3
declare i32 @llvm.experimental.gc.statepoint.p0f_isVoidf(void ()*, i32, i32, ...)
attributes #1 = { uwtable }

View File

@ -0,0 +1,111 @@
; RUN: llc < %s | FileCheck %s
; This test is a sanity check to ensure statepoints are generating StackMap
; sections correctly. This is not intended to be a rigorous test of the
; StackMap format (see the stackmap tests for that).
target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-pc-linux-gnu"
declare zeroext i1 @return_i1()
define i1 @test(i32 addrspace(1)* %ptr) {
; CHECK-LABEL: test
; Do we see one spill for the local value and the store to the
; alloca?
; CHECK: subq $24, %rsp
; CHECK: movq $0, 8(%rsp)
; CHECK: movq %rdi, (%rsp)
; CHECK: callq return_i1
; CHECK: addq $24, %rsp
; CHECK: retq
entry:
%metadata1 = alloca i32 addrspace(1)*, i32 2, align 8
store i32 addrspace(1)* null, i32 addrspace(1)** %metadata1
; NOTE: Currently NOT testing alloca lowering in the StackMap format. Its
; known to be broken.
%safepoint_token = tail call i32 (i1 ()*, i32, i32, ...)* @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()* @return_i1, i32 0, i32 0, i32 2, i32 addrspace(1)* %ptr, i32 addrspace(1)* null)
%call1 = call zeroext i1 @llvm.experimental.gc.result.int.i1(i32 %safepoint_token)
%a = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 4, i32 4)
%b = call i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32 %safepoint_token, i32 5, i32 5)
;
ret i1 %call1
}
declare i32 @llvm.experimental.gc.statepoint.p0f_i1f(i1 ()*, i32, i32, ...)
declare i1 @llvm.experimental.gc.result.int.i1(i32)
declare i32 addrspace(1)* @llvm.experimental.gc.relocate.p1i32(i32, i32, i32) #3
; CHECK-LABEL: .section .llvm_stackmaps
; CHECK-NEXT: __LLVM_StackMaps:
; Header
; CHECK-NEXT: .byte 1
; CHECK-NEXT: .byte 0
; CHECK-NEXT: .short 0
; Num Functions
; CHECK-NEXT: .long 1
; Num LargeConstants
; CHECK-NEXT: .long 0
; Num Callsites
; CHECK-NEXT: .long 1
; Functions and stack size
; CHECK-NEXT: .quad test
; CHECK-NEXT: .quad 24
; Large Constants
; Statepoint ID only
; CHECK: .quad 2882400000
; Callsites
; Constant arguments
; CHECK: .long .Ltmp1-test
; CHECK: .short 0
; CHECK: .short 8
; SmallConstant (0)
; CHECK: .byte 4
; CHECK: .byte 8
; CHECK: .short 0
; CHECK: .long 0
; SmallConstant (2)
; CHECK: .byte 4
; CHECK: .byte 8
; CHECK: .short 0
; CHECK: .long 2
; Direct Spill Slot [RSP+0]
; CHECK: .byte 2
; CHECK: .byte 8
; CHECK: .short 7
; CHECK: .long 0
; SmallConstant (0)
; CHECK: .byte 4
; CHECK: .byte 8
; CHECK: .short 0
; CHECK: .long 0
; SmallConstant (0)
; CHECK: .byte 4
; CHECK: .byte 8
; CHECK: .short 0
; CHECK: .long 0
; SmallConstant (0)
; CHECK: .byte 4
; CHECK: .byte 8
; CHECK: .short 0
; CHECK: .long 0
; Direct Spill Slot [RSP+0]
; CHECK: .byte 2
; CHECK: .byte 8
; CHECK: .short 7
; CHECK: .long 0
; Direct Spill Slot [RSP+0]
; CHECK: .byte 2
; CHECK: .byte 8
; CHECK: .short 7
; CHECK: .long 0
; No Padding or LiveOuts
; CHECK: .short 0
; CHECK: .short 0
; CHECK: .align 8