mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-27 05:10:41 +00:00
Remove the old atomic instrinsics. autoupgrade functionality is included with this patch.
llvm-svn: 141333
This commit is contained in:
parent
82b6f24ee4
commit
4d63ca106a
@ -381,74 +381,6 @@ def int_umul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>],
|
||||
[IntrNoMem]>;
|
||||
|
||||
//===------------------------- Atomic Intrinsics --------------------------===//
|
||||
//
|
||||
def int_memory_barrier : Intrinsic<[],
|
||||
[llvm_i1_ty, llvm_i1_ty,
|
||||
llvm_i1_ty, llvm_i1_ty, llvm_i1_ty], []>,
|
||||
GCCBuiltin<"__builtin_llvm_memory_barrier">;
|
||||
|
||||
def int_atomic_cmp_swap : Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMAnyPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>, LLVMMatchType<0>],
|
||||
[IntrReadWriteArgMem, NoCapture<0>]>,
|
||||
GCCBuiltin<"__sync_val_compare_and_swap">;
|
||||
def int_atomic_load_add : Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMAnyPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrReadWriteArgMem, NoCapture<0>]>,
|
||||
GCCBuiltin<"__sync_fetch_and_add">;
|
||||
def int_atomic_swap : Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMAnyPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrReadWriteArgMem, NoCapture<0>]>,
|
||||
GCCBuiltin<"__sync_lock_test_and_set">;
|
||||
def int_atomic_load_sub : Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMAnyPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrReadWriteArgMem, NoCapture<0>]>,
|
||||
GCCBuiltin<"__sync_fetch_and_sub">;
|
||||
def int_atomic_load_and : Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMAnyPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrReadWriteArgMem, NoCapture<0>]>,
|
||||
GCCBuiltin<"__sync_fetch_and_and">;
|
||||
def int_atomic_load_or : Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMAnyPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrReadWriteArgMem, NoCapture<0>]>,
|
||||
GCCBuiltin<"__sync_fetch_and_or">;
|
||||
def int_atomic_load_xor : Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMAnyPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrReadWriteArgMem, NoCapture<0>]>,
|
||||
GCCBuiltin<"__sync_fetch_and_xor">;
|
||||
def int_atomic_load_nand : Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMAnyPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrReadWriteArgMem, NoCapture<0>]>,
|
||||
GCCBuiltin<"__sync_fetch_and_nand">;
|
||||
def int_atomic_load_min : Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMAnyPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrReadWriteArgMem, NoCapture<0>]>,
|
||||
GCCBuiltin<"__sync_fetch_and_min">;
|
||||
def int_atomic_load_max : Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMAnyPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrReadWriteArgMem, NoCapture<0>]>,
|
||||
GCCBuiltin<"__sync_fetch_and_max">;
|
||||
def int_atomic_load_umin : Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMAnyPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrReadWriteArgMem, NoCapture<0>]>,
|
||||
GCCBuiltin<"__sync_fetch_and_umin">;
|
||||
def int_atomic_load_umax : Intrinsic<[llvm_anyint_ty],
|
||||
[LLVMAnyPointerType<LLVMMatchType<0>>,
|
||||
LLVMMatchType<0>],
|
||||
[IntrReadWriteArgMem, NoCapture<0>]>,
|
||||
GCCBuiltin<"__sync_fetch_and_umax">;
|
||||
|
||||
//===------------------------- Memory Use Markers -------------------------===//
|
||||
//
|
||||
def int_lifetime_start : Intrinsic<[],
|
||||
|
@ -763,26 +763,6 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
|
||||
// We know that memset doesn't load anything.
|
||||
Min = Mod;
|
||||
break;
|
||||
case Intrinsic::atomic_cmp_swap:
|
||||
case Intrinsic::atomic_swap:
|
||||
case Intrinsic::atomic_load_add:
|
||||
case Intrinsic::atomic_load_sub:
|
||||
case Intrinsic::atomic_load_and:
|
||||
case Intrinsic::atomic_load_nand:
|
||||
case Intrinsic::atomic_load_or:
|
||||
case Intrinsic::atomic_load_xor:
|
||||
case Intrinsic::atomic_load_max:
|
||||
case Intrinsic::atomic_load_min:
|
||||
case Intrinsic::atomic_load_umax:
|
||||
case Intrinsic::atomic_load_umin:
|
||||
if (TD) {
|
||||
Value *Op1 = II->getArgOperand(0);
|
||||
uint64_t Op1Size = TD->getTypeStoreSize(Op1->getType());
|
||||
MDNode *Tag = II->getMetadata(LLVMContext::MD_tbaa);
|
||||
if (isNoAlias(Location(Op1, Op1Size, Tag), Loc))
|
||||
return NoModRef;
|
||||
}
|
||||
break;
|
||||
case Intrinsic::lifetime_start:
|
||||
case Intrinsic::lifetime_end:
|
||||
case Intrinsic::invariant_start: {
|
||||
|
@ -3581,26 +3581,6 @@ getF32Constant(SelectionDAG &DAG, unsigned Flt) {
|
||||
return DAG.getConstantFP(APFloat(APInt(32, Flt)), MVT::f32);
|
||||
}
|
||||
|
||||
/// Inlined utility function to implement binary input atomic intrinsics for
|
||||
/// visitIntrinsicCall: I is a call instruction
|
||||
/// Op is the associated NodeType for I
|
||||
const char *
|
||||
SelectionDAGBuilder::implVisitBinaryAtomic(const CallInst& I,
|
||||
ISD::NodeType Op) {
|
||||
SDValue Root = getRoot();
|
||||
SDValue L =
|
||||
DAG.getAtomic(Op, getCurDebugLoc(),
|
||||
getValue(I.getArgOperand(1)).getValueType().getSimpleVT(),
|
||||
Root,
|
||||
getValue(I.getArgOperand(0)),
|
||||
getValue(I.getArgOperand(1)),
|
||||
I.getArgOperand(0), 0 /* Alignment */,
|
||||
Monotonic, CrossThread);
|
||||
setValue(&I, L);
|
||||
DAG.setRoot(L.getValue(1));
|
||||
return 0;
|
||||
}
|
||||
|
||||
// implVisitAluOverflow - Lower arithmetic overflow instrinsics.
|
||||
const char *
|
||||
SelectionDAGBuilder::implVisitAluOverflow(const CallInst &I, ISD::NodeType Op) {
|
||||
@ -5109,52 +5089,6 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
|
||||
rw==1)); /* write */
|
||||
return 0;
|
||||
}
|
||||
case Intrinsic::memory_barrier: {
|
||||
SDValue Ops[6];
|
||||
Ops[0] = getRoot();
|
||||
for (int x = 1; x < 6; ++x)
|
||||
Ops[x] = getValue(I.getArgOperand(x - 1));
|
||||
|
||||
DAG.setRoot(DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, &Ops[0], 6));
|
||||
return 0;
|
||||
}
|
||||
case Intrinsic::atomic_cmp_swap: {
|
||||
SDValue Root = getRoot();
|
||||
SDValue L =
|
||||
DAG.getAtomic(ISD::ATOMIC_CMP_SWAP, getCurDebugLoc(),
|
||||
getValue(I.getArgOperand(1)).getValueType().getSimpleVT(),
|
||||
Root,
|
||||
getValue(I.getArgOperand(0)),
|
||||
getValue(I.getArgOperand(1)),
|
||||
getValue(I.getArgOperand(2)),
|
||||
MachinePointerInfo(I.getArgOperand(0)), 0 /* Alignment */,
|
||||
Monotonic, CrossThread);
|
||||
setValue(&I, L);
|
||||
DAG.setRoot(L.getValue(1));
|
||||
return 0;
|
||||
}
|
||||
case Intrinsic::atomic_load_add:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_ADD);
|
||||
case Intrinsic::atomic_load_sub:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_SUB);
|
||||
case Intrinsic::atomic_load_or:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_OR);
|
||||
case Intrinsic::atomic_load_xor:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_XOR);
|
||||
case Intrinsic::atomic_load_and:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_AND);
|
||||
case Intrinsic::atomic_load_nand:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_NAND);
|
||||
case Intrinsic::atomic_load_max:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MAX);
|
||||
case Intrinsic::atomic_load_min:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_MIN);
|
||||
case Intrinsic::atomic_load_umin:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMIN);
|
||||
case Intrinsic::atomic_load_umax:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_LOAD_UMAX);
|
||||
case Intrinsic::atomic_swap:
|
||||
return implVisitBinaryAtomic(I, ISD::ATOMIC_SWAP);
|
||||
|
||||
case Intrinsic::invariant_start:
|
||||
case Intrinsic::lifetime_start:
|
||||
|
@ -555,7 +555,6 @@ private:
|
||||
llvm_unreachable("UserOp2 should not exist at instruction selection time!");
|
||||
}
|
||||
|
||||
const char *implVisitBinaryAtomic(const CallInst& I, ISD::NodeType Op);
|
||||
const char *implVisitAluOverflow(const CallInst &I, ISD::NodeType Op);
|
||||
|
||||
void HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB);
|
||||
|
@ -2839,7 +2839,6 @@ void CWriter::lowerIntrinsics(Function &F) {
|
||||
if (Function *F = CI->getCalledFunction())
|
||||
switch (F->getIntrinsicID()) {
|
||||
case Intrinsic::not_intrinsic:
|
||||
case Intrinsic::memory_barrier:
|
||||
case Intrinsic::vastart:
|
||||
case Intrinsic::vacopy:
|
||||
case Intrinsic::vaend:
|
||||
@ -3030,9 +3029,6 @@ bool CWriter::visitBuiltinCall(CallInst &I, Intrinsic::ID ID,
|
||||
WroteCallee = true;
|
||||
return false;
|
||||
}
|
||||
case Intrinsic::memory_barrier:
|
||||
Out << "__sync_synchronize()";
|
||||
return true;
|
||||
case Intrinsic::vastart:
|
||||
Out << "0; ";
|
||||
|
||||
|
@ -20,101 +20,6 @@
|
||||
#include "llvm/Support/IRBuilder.h"
|
||||
using namespace llvm;
|
||||
|
||||
static bool LowerAtomicIntrinsic(IntrinsicInst *II) {
|
||||
IRBuilder<> Builder(II->getParent(), II);
|
||||
unsigned IID = II->getIntrinsicID();
|
||||
switch (IID) {
|
||||
case Intrinsic::memory_barrier:
|
||||
break;
|
||||
|
||||
case Intrinsic::atomic_load_add:
|
||||
case Intrinsic::atomic_load_sub:
|
||||
case Intrinsic::atomic_load_and:
|
||||
case Intrinsic::atomic_load_nand:
|
||||
case Intrinsic::atomic_load_or:
|
||||
case Intrinsic::atomic_load_xor:
|
||||
case Intrinsic::atomic_load_max:
|
||||
case Intrinsic::atomic_load_min:
|
||||
case Intrinsic::atomic_load_umax:
|
||||
case Intrinsic::atomic_load_umin: {
|
||||
Value *Ptr = II->getArgOperand(0), *Delta = II->getArgOperand(1);
|
||||
|
||||
LoadInst *Orig = Builder.CreateLoad(Ptr);
|
||||
Value *Res = NULL;
|
||||
switch (IID) {
|
||||
default: assert(0 && "Unrecognized atomic modify operation");
|
||||
case Intrinsic::atomic_load_add:
|
||||
Res = Builder.CreateAdd(Orig, Delta);
|
||||
break;
|
||||
case Intrinsic::atomic_load_sub:
|
||||
Res = Builder.CreateSub(Orig, Delta);
|
||||
break;
|
||||
case Intrinsic::atomic_load_and:
|
||||
Res = Builder.CreateAnd(Orig, Delta);
|
||||
break;
|
||||
case Intrinsic::atomic_load_nand:
|
||||
Res = Builder.CreateNot(Builder.CreateAnd(Orig, Delta));
|
||||
break;
|
||||
case Intrinsic::atomic_load_or:
|
||||
Res = Builder.CreateOr(Orig, Delta);
|
||||
break;
|
||||
case Intrinsic::atomic_load_xor:
|
||||
Res = Builder.CreateXor(Orig, Delta);
|
||||
break;
|
||||
case Intrinsic::atomic_load_max:
|
||||
Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Delta),
|
||||
Delta, Orig);
|
||||
break;
|
||||
case Intrinsic::atomic_load_min:
|
||||
Res = Builder.CreateSelect(Builder.CreateICmpSLT(Orig, Delta),
|
||||
Orig, Delta);
|
||||
break;
|
||||
case Intrinsic::atomic_load_umax:
|
||||
Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Delta),
|
||||
Delta, Orig);
|
||||
break;
|
||||
case Intrinsic::atomic_load_umin:
|
||||
Res = Builder.CreateSelect(Builder.CreateICmpULT(Orig, Delta),
|
||||
Orig, Delta);
|
||||
break;
|
||||
}
|
||||
Builder.CreateStore(Res, Ptr);
|
||||
|
||||
II->replaceAllUsesWith(Orig);
|
||||
break;
|
||||
}
|
||||
|
||||
case Intrinsic::atomic_swap: {
|
||||
Value *Ptr = II->getArgOperand(0), *Val = II->getArgOperand(1);
|
||||
LoadInst *Orig = Builder.CreateLoad(Ptr);
|
||||
Builder.CreateStore(Val, Ptr);
|
||||
II->replaceAllUsesWith(Orig);
|
||||
break;
|
||||
}
|
||||
|
||||
case Intrinsic::atomic_cmp_swap: {
|
||||
Value *Ptr = II->getArgOperand(0), *Cmp = II->getArgOperand(1);
|
||||
Value *Val = II->getArgOperand(2);
|
||||
|
||||
LoadInst *Orig = Builder.CreateLoad(Ptr);
|
||||
Value *Equal = Builder.CreateICmpEQ(Orig, Cmp);
|
||||
Value *Res = Builder.CreateSelect(Equal, Val, Orig);
|
||||
Builder.CreateStore(Res, Ptr);
|
||||
II->replaceAllUsesWith(Orig);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(II->use_empty() &&
|
||||
"Lowering should have eliminated any uses of the intrinsic call!");
|
||||
II->eraseFromParent();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) {
|
||||
IRBuilder<> Builder(CXI->getParent(), CXI);
|
||||
Value *Ptr = CXI->getPointerOperand();
|
||||
@ -210,9 +115,7 @@ namespace {
|
||||
bool Changed = false;
|
||||
for (BasicBlock::iterator DI = BB.begin(), DE = BB.end(); DI != DE; ) {
|
||||
Instruction *Inst = DI++;
|
||||
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
|
||||
Changed |= LowerAtomicIntrinsic(II);
|
||||
else if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
|
||||
if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
|
||||
Changed |= LowerFenceInst(FI);
|
||||
else if (AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(Inst))
|
||||
Changed |= LowerAtomicCmpXchgInst(CXI);
|
||||
|
@ -43,6 +43,20 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
|
||||
|
||||
switch (Name[0]) {
|
||||
default: break;
|
||||
case 'a':
|
||||
if (Name.startswith("atomic.cmp.swap") ||
|
||||
Name.startswith("atomic.swap") ||
|
||||
Name.startswith("atomic.load.add") ||
|
||||
Name.startswith("atomic.load.sub") ||
|
||||
Name.startswith("atomic.load.and") ||
|
||||
Name.startswith("atomic.load.nand") ||
|
||||
Name.startswith("atomic.load.or") ||
|
||||
Name.startswith("atomic.load.xor") ||
|
||||
Name.startswith("atomic.load.max") ||
|
||||
Name.startswith("atomic.load.min") ||
|
||||
Name.startswith("atomic.load.umax") ||
|
||||
Name.startswith("atomic.load.umin"))
|
||||
return true;
|
||||
case 'i':
|
||||
// This upgrades the old llvm.init.trampoline to the new
|
||||
// llvm.init.trampoline and llvm.adjust.trampoline pair.
|
||||
@ -63,6 +77,9 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
|
||||
FTy->getParamType(2), (Type *)0));
|
||||
return true;
|
||||
}
|
||||
case 'm':
|
||||
if (Name == "memory.barrier")
|
||||
return true;
|
||||
case 'p':
|
||||
// This upgrades the llvm.prefetch intrinsic to accept one more parameter,
|
||||
// which is a instruction / data cache identifier. The old version only
|
||||
@ -204,6 +221,80 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
SI->setMetadata(M->getMDKindID("nontemporal"), Node);
|
||||
SI->setAlignment(16);
|
||||
|
||||
// Remove intrinsic.
|
||||
CI->eraseFromParent();
|
||||
} else if (F->getName().startswith("llvm.atomic.cmp.swap")) {
|
||||
IRBuilder<> Builder(C);
|
||||
Builder.SetInsertPoint(CI->getParent(), CI);
|
||||
Value *Val = Builder.CreateAtomicCmpXchg(CI->getArgOperand(0),
|
||||
CI->getArgOperand(1),
|
||||
CI->getArgOperand(2),
|
||||
Monotonic);
|
||||
|
||||
// Replace intrinsic.
|
||||
Val->takeName(CI);
|
||||
if (!CI->use_empty())
|
||||
CI->replaceAllUsesWith(Val);
|
||||
CI->eraseFromParent();
|
||||
} else if (F->getName().startswith("llvm.atomic")) {
|
||||
IRBuilder<> Builder(C);
|
||||
Builder.SetInsertPoint(CI->getParent(), CI);
|
||||
|
||||
AtomicRMWInst::BinOp Op;
|
||||
if (F->getName().startswith("llvm.atomic.swap"))
|
||||
Op = AtomicRMWInst::Xchg;
|
||||
else if (F->getName().startswith("llvm.atomic.load.add"))
|
||||
Op = AtomicRMWInst::Add;
|
||||
else if (F->getName().startswith("llvm.atomic.load.sub"))
|
||||
Op = AtomicRMWInst::Sub;
|
||||
else if (F->getName().startswith("llvm.atomic.load.and"))
|
||||
Op = AtomicRMWInst::And;
|
||||
else if (F->getName().startswith("llvm.atomic.load.nand"))
|
||||
Op = AtomicRMWInst::Nand;
|
||||
else if (F->getName().startswith("llvm.atomic.load.or"))
|
||||
Op = AtomicRMWInst::Or;
|
||||
else if (F->getName().startswith("llvm.atomic.load.xor"))
|
||||
Op = AtomicRMWInst::Xor;
|
||||
else if (F->getName().startswith("llvm.atomic.load.max"))
|
||||
Op = AtomicRMWInst::Max;
|
||||
else if (F->getName().startswith("llvm.atomic.load.min"))
|
||||
Op = AtomicRMWInst::Min;
|
||||
else if (F->getName().startswith("llvm.atomic.load.umax"))
|
||||
Op = AtomicRMWInst::UMax;
|
||||
else if (F->getName().startswith("llvm.atomic.load.umin"))
|
||||
Op = AtomicRMWInst::UMin;
|
||||
else
|
||||
llvm_unreachable("Unknown atomic");
|
||||
|
||||
Value *Val = Builder.CreateAtomicRMW(Op, CI->getArgOperand(0),
|
||||
CI->getArgOperand(1),
|
||||
Monotonic);
|
||||
|
||||
// Replace intrinsic.
|
||||
Val->takeName(CI);
|
||||
if (!CI->use_empty())
|
||||
CI->replaceAllUsesWith(Val);
|
||||
CI->eraseFromParent();
|
||||
} else if (F->getName() == "llvm.memory.barrier") {
|
||||
IRBuilder<> Builder(C);
|
||||
Builder.SetInsertPoint(CI->getParent(), CI);
|
||||
|
||||
// Note that this conversion ignores the "device" bit; it was not really
|
||||
// well-defined, and got abused because nobody paid enough attention to
|
||||
// get it right. In practice, this probably doesn't matter; application
|
||||
// code generally doesn't need anything stronger than
|
||||
// SequentiallyConsistent (and realistically, SequentiallyConsistent
|
||||
// is lowered to a strong enough barrier for almost anything).
|
||||
|
||||
if (cast<ConstantInt>(CI->getArgOperand(1))->getZExtValue())
|
||||
Builder.CreateFence(SequentiallyConsistent);
|
||||
else if (!cast<ConstantInt>(CI->getArgOperand(0))->getZExtValue())
|
||||
Builder.CreateFence(Release);
|
||||
else if (!cast<ConstantInt>(CI->getArgOperand(3))->getZExtValue())
|
||||
Builder.CreateFence(Acquire);
|
||||
else
|
||||
Builder.CreateFence(AcquireRelease);
|
||||
|
||||
// Remove intrinsic.
|
||||
CI->eraseFromParent();
|
||||
} else {
|
||||
|
Loading…
Reference in New Issue
Block a user