mirror of
https://github.com/RPCSX/llvm.git
synced 2024-12-03 17:31:50 +00:00
[Hexagon] Add support for atomic RMW operations
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@241804 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
86d3dfce62
commit
b13f72aa2d
@ -2466,3 +2466,45 @@ bool llvm::isPositiveHalfWord(SDNode *N) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
Value *HexagonTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
|
||||
AtomicOrdering Ord) const {
|
||||
BasicBlock *BB = Builder.GetInsertBlock();
|
||||
Module *M = BB->getParent()->getParent();
|
||||
Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
|
||||
unsigned SZ = Ty->getPrimitiveSizeInBits();
|
||||
assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
|
||||
Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
|
||||
: Intrinsic::hexagon_L4_loadd_locked;
|
||||
Value *Fn = Intrinsic::getDeclaration(M, IntID);
|
||||
return Builder.CreateCall(Fn, Addr, "larx");
|
||||
}
|
||||
|
||||
/// Perform a store-conditional operation to Addr. Return the status of the
|
||||
/// store. This should be 0 if the store succeeded, non-zero otherwise.
|
||||
Value *HexagonTargetLowering::emitStoreConditional(IRBuilder<> &Builder,
|
||||
Value *Val, Value *Addr, AtomicOrdering Ord) const {
|
||||
BasicBlock *BB = Builder.GetInsertBlock();
|
||||
Module *M = BB->getParent()->getParent();
|
||||
Type *Ty = Val->getType();
|
||||
unsigned SZ = Ty->getPrimitiveSizeInBits();
|
||||
assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
|
||||
Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
|
||||
: Intrinsic::hexagon_S4_stored_locked;
|
||||
Value *Fn = Intrinsic::getDeclaration(M, IntID);
|
||||
Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx");
|
||||
Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");
|
||||
Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext()));
|
||||
return Ext;
|
||||
}
|
||||
|
||||
bool HexagonTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
|
||||
// Do not expand loads and stores that don't exceed 64 bits.
|
||||
return LI->getType()->getPrimitiveSizeInBits() > 64;
|
||||
}
|
||||
|
||||
bool HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
|
||||
// Do not expand loads and stores that don't exceed 64 bits.
|
||||
return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64;
|
||||
}
|
||||
|
||||
|
@ -207,6 +207,21 @@ bool isPositiveHalfWord(SDNode *N);
|
||||
/// compare a register against the immediate without having to materialize
|
||||
/// the immediate into a register.
|
||||
bool isLegalICmpImmediate(int64_t Imm) const override;
|
||||
|
||||
// Handling of atomic RMW instructions.
|
||||
bool hasLoadLinkedStoreConditional() const override {
|
||||
return true;
|
||||
}
|
||||
Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
|
||||
AtomicOrdering Ord) const override;
|
||||
Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
|
||||
Value *Addr, AtomicOrdering Ord) const override;
|
||||
bool shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
|
||||
bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
|
||||
AtomicRMWExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI)
|
||||
const override {
|
||||
return AtomicRMWExpansionKind::LLSC;
|
||||
}
|
||||
};
|
||||
} // end namespace llvm
|
||||
|
||||
|
@ -144,8 +144,9 @@ TargetPassConfig *HexagonTargetMachine::createPassConfig(PassManagerBase &PM) {
|
||||
|
||||
void HexagonPassConfig::addIRPasses() {
|
||||
TargetPassConfig::addIRPasses();
|
||||
|
||||
bool NoOpt = (getOptLevel() == CodeGenOpt::None);
|
||||
|
||||
addPass(createAtomicExpandPass(TM));
|
||||
if (!NoOpt && EnableCommGEP)
|
||||
addPass(createHexagonCommonGEP());
|
||||
}
|
||||
|
71
test/CodeGen/Hexagon/Atomics.ll
Normal file
71
test/CodeGen/Hexagon/Atomics.ll
Normal file
@ -0,0 +1,71 @@
|
||||
; RUN: llc < %s -march=hexagon
|
||||
|
||||
@si = common global i32 0, align 4
|
||||
@sll = common global i64 0, align 8
|
||||
|
||||
define void @test_op_ignore() nounwind {
|
||||
entry:
|
||||
%t00 = atomicrmw add i32* @si, i32 1 monotonic
|
||||
%t01 = atomicrmw add i64* @sll, i64 1 monotonic
|
||||
%t10 = atomicrmw sub i32* @si, i32 1 monotonic
|
||||
%t11 = atomicrmw sub i64* @sll, i64 1 monotonic
|
||||
%t20 = atomicrmw or i32* @si, i32 1 monotonic
|
||||
%t21 = atomicrmw or i64* @sll, i64 1 monotonic
|
||||
%t30 = atomicrmw xor i32* @si, i32 1 monotonic
|
||||
%t31 = atomicrmw xor i64* @sll, i64 1 monotonic
|
||||
%t40 = atomicrmw and i32* @si, i32 1 monotonic
|
||||
%t41 = atomicrmw and i64* @sll, i64 1 monotonic
|
||||
%t50 = atomicrmw nand i32* @si, i32 1 monotonic
|
||||
%t51 = atomicrmw nand i64* @sll, i64 1 monotonic
|
||||
br label %return
|
||||
|
||||
return: ; preds = %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @test_fetch_and_op() nounwind {
|
||||
entry:
|
||||
%t00 = atomicrmw add i32* @si, i32 11 monotonic
|
||||
store i32 %t00, i32* @si, align 4
|
||||
%t01 = atomicrmw add i64* @sll, i64 11 monotonic
|
||||
store i64 %t01, i64* @sll, align 8
|
||||
%t10 = atomicrmw sub i32* @si, i32 11 monotonic
|
||||
store i32 %t10, i32* @si, align 4
|
||||
%t11 = atomicrmw sub i64* @sll, i64 11 monotonic
|
||||
store i64 %t11, i64* @sll, align 8
|
||||
%t20 = atomicrmw or i32* @si, i32 11 monotonic
|
||||
store i32 %t20, i32* @si, align 4
|
||||
%t21 = atomicrmw or i64* @sll, i64 11 monotonic
|
||||
store i64 %t21, i64* @sll, align 8
|
||||
%t30 = atomicrmw xor i32* @si, i32 11 monotonic
|
||||
store i32 %t30, i32* @si, align 4
|
||||
%t31 = atomicrmw xor i64* @sll, i64 11 monotonic
|
||||
store i64 %t31, i64* @sll, align 8
|
||||
%t40 = atomicrmw and i32* @si, i32 11 monotonic
|
||||
store i32 %t40, i32* @si, align 4
|
||||
%t41 = atomicrmw and i64* @sll, i64 11 monotonic
|
||||
store i64 %t41, i64* @sll, align 8
|
||||
%t50 = atomicrmw nand i32* @si, i32 11 monotonic
|
||||
store i32 %t50, i32* @si, align 4
|
||||
%t51 = atomicrmw nand i64* @sll, i64 11 monotonic
|
||||
store i64 %t51, i64* @sll, align 8
|
||||
br label %return
|
||||
|
||||
return: ; preds = %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @test_lock() nounwind {
|
||||
entry:
|
||||
%t00 = atomicrmw xchg i32* @si, i32 1 monotonic
|
||||
store i32 %t00, i32* @si, align 4
|
||||
%t01 = atomicrmw xchg i64* @sll, i64 1 monotonic
|
||||
store i64 %t01, i64* @sll, align 8
|
||||
fence seq_cst
|
||||
store volatile i32 0, i32* @si, align 4
|
||||
store volatile i64 0, i64* @sll, align 8
|
||||
br label %return
|
||||
|
||||
return: ; preds = %entry
|
||||
ret void
|
||||
}
|
Loading…
Reference in New Issue
Block a user