mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2024-11-23 22:00:10 +00:00
[mlir][llvm] Add atomic support to the LoadOp.
This revision adds atomic support to the LoadOp. It chooses to print the atomic keywords together with the syncscope and ordering arguments, which simplifies parsing and printing compared to the LLVM IR printer that puts the atomic keyword at the beginning. It uses the ordering attribute to check if the load is atomic. The revision also implements verifiers to ensure the constraints that apply to atomic load operations are checked. Reviewed By: Dinistro Differential Revision: https://reviews.llvm.org/D144112
This commit is contained in:
parent
55a18bfe9b
commit
6f4af64b74
@ -187,6 +187,9 @@ class MemoryOpBase {
|
||||
inst->setSyncScopeID(llvmContext.getOrInsertSyncScopeID(*$syncscope));
|
||||
}
|
||||
}];
|
||||
code setOrderingCode = [{
|
||||
inst->setAtomic(convertAtomicOrderingToLLVM($ordering));
|
||||
}];
|
||||
code setNonTemporalMetadataCode = [{
|
||||
if ($nontemporal) {
|
||||
llvm::MDNode *metadata = llvm::MDNode::get(
|
||||
@ -350,17 +353,48 @@ def LLVM_LoadOp : LLVM_Op<"load">, MemoryOpBase {
|
||||
OptionalAttr<SymbolRefArrayAttr>:$alias_scopes,
|
||||
OptionalAttr<SymbolRefArrayAttr>:$noalias_scopes,
|
||||
OptionalAttr<SymbolRefArrayAttr>:$tbaa,
|
||||
OptionalAttr<I64Attr>:$alignment, UnitAttr:$volatile_,
|
||||
UnitAttr:$nontemporal);
|
||||
OptionalAttr<I64Attr>:$alignment,
|
||||
UnitAttr:$volatile_,
|
||||
UnitAttr:$nontemporal,
|
||||
DefaultValuedAttr<AtomicOrdering,
|
||||
"AtomicOrdering::not_atomic">:$ordering,
|
||||
OptionalAttr<StrAttr>:$syncscope);
|
||||
let results = (outs LLVM_LoadableType:$res);
|
||||
string llvmInstName = "Load";
|
||||
let description = [{
|
||||
The `load` operation is used to read from memory. A load may be marked as
|
||||
atomic, volatile, and/or nontemporal, and takes a number of optional
|
||||
attributes that specify aliasing information.
|
||||
|
||||
An atomic load only supports a limited set of pointer, integer, and
|
||||
floating point types, and requires an explicit alignment.
|
||||
|
||||
Examples:
|
||||
```mlir
|
||||
// A volatile load of a float variable.
|
||||
%0 = llvm.load volatile %ptr : !llvm.ptr -> f32
|
||||
|
||||
// A nontemporal load of a float variable.
|
||||
%0 = llvm.load %ptr {nontemporal} : !llvm.ptr -> f32
|
||||
|
||||
// An atomic load of an integer variable.
|
||||
%0 = llvm.load %ptr atomic monotonic {alignment = 8 : i64}
|
||||
: !llvm.ptr -> i64
|
||||
```
|
||||
|
||||
See the following link for more details:
|
||||
https://llvm.org/docs/LangRef.html#load-instruction
|
||||
}];
|
||||
let assemblyFormat = [{
|
||||
(`volatile` $volatile_^)? $addr attr-dict `:`
|
||||
custom<LoadType>(type($addr), type($res))
|
||||
(`volatile` $volatile_^)? $addr
|
||||
(`atomic` (`syncscope` `(` $syncscope^ `)`)? $ordering^)?
|
||||
attr-dict `:` custom<LoadType>(type($addr), type($res))
|
||||
}];
|
||||
string llvmBuilder = [{
|
||||
auto *inst = builder.CreateLoad($_resultType, $addr, $volatile_);
|
||||
}] # setAlignmentCode
|
||||
}] # setOrderingCode
|
||||
# setSyncScopeCode
|
||||
# setAlignmentCode
|
||||
# setNonTemporalMetadataCode
|
||||
# setAccessGroupsMetadataCode
|
||||
# setAliasScopeMetadataCode
|
||||
@ -373,22 +407,19 @@ def LLVM_LoadOp : LLVM_Op<"load">, MemoryOpBase {
|
||||
unsigned alignment = loadInst->getAlign().value();
|
||||
$res = $_builder.create<LLVM::LoadOp>($_location, $_resultType, $addr,
|
||||
alignment, loadInst->isVolatile(),
|
||||
loadInst->hasMetadata(llvm::LLVMContext::MD_nontemporal));
|
||||
loadInst->hasMetadata(llvm::LLVMContext::MD_nontemporal),
|
||||
convertAtomicOrderingFromLLVM(loadInst->getOrdering()),
|
||||
getLLVMSyncScope(loadInst));
|
||||
}];
|
||||
let builders = [
|
||||
OpBuilder<(ins "Value":$addr, CArg<"unsigned", "0">:$alignment,
|
||||
CArg<"bool", "false">:$isVolatile, CArg<"bool", "false">:$isNonTemporal),
|
||||
[{
|
||||
auto type = addr.getType().cast<LLVMPointerType>().getElementType();
|
||||
assert(type && "must provide explicit element type to the constructor "
|
||||
"when the pointer type is opaque");
|
||||
build($_builder, $_state, type, addr, alignment, isVolatile, isNonTemporal);
|
||||
}]>,
|
||||
CArg<"bool", "false">:$isVolatile, CArg<"bool", "false">:$isNonTemporal)>,
|
||||
OpBuilder<(ins "Type":$type, "Value":$addr,
|
||||
CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile,
|
||||
CArg<"bool", "false">:$isNonTemporal)>
|
||||
CArg<"bool", "false">:$isNonTemporal,
|
||||
CArg<"AtomicOrdering", "AtomicOrdering::not_atomic">:$ordering,
|
||||
CArg<"StringRef", "StringRef()">:$syncscope)>
|
||||
];
|
||||
let hasCustomAssemblyFormat = 1;
|
||||
let hasVerifier = 1;
|
||||
}
|
||||
|
||||
|
@ -804,15 +804,65 @@ LogicalResult verifyMemOpMetadata(OpTy memOp) {
|
||||
return success();
|
||||
}
|
||||
|
||||
LogicalResult LoadOp::verify() { return verifyMemOpMetadata(*this); }
|
||||
/// Returns true if the given type is supported by atomic operations. All
|
||||
/// integer and float types with limited bit width are supported. Additionally,
|
||||
/// depending on the operation pointers may be supported as well.
|
||||
static bool isTypeCompatibleWithAtomicOp(Type type, bool isPointerTypeAllowed) {
|
||||
if (type.isa<LLVMPointerType>())
|
||||
return isPointerTypeAllowed;
|
||||
|
||||
std::optional<unsigned> bitWidth = std::nullopt;
|
||||
if (auto floatType = type.dyn_cast<FloatType>()) {
|
||||
if (!isCompatibleFloatingPointType(type))
|
||||
return false;
|
||||
bitWidth = floatType.getWidth();
|
||||
}
|
||||
if (auto integerType = type.dyn_cast<IntegerType>())
|
||||
bitWidth = integerType.getWidth();
|
||||
// The type is neither an integer, float, or pointer type.
|
||||
if (!bitWidth)
|
||||
return false;
|
||||
return *bitWidth == 8 || *bitWidth == 16 || *bitWidth == 32 ||
|
||||
*bitWidth == 64;
|
||||
}
|
||||
|
||||
LogicalResult LoadOp::verify() {
|
||||
if (getOrdering() != AtomicOrdering::not_atomic) {
|
||||
if (!isTypeCompatibleWithAtomicOp(getResult().getType(),
|
||||
/*isPointerTypeAllowed=*/true))
|
||||
return emitOpError("unsupported type ")
|
||||
<< getResult().getType() << " for atomic access";
|
||||
if (getOrdering() == AtomicOrdering::release ||
|
||||
getOrdering() == AtomicOrdering::acq_rel)
|
||||
return emitOpError("unsupported ordering '")
|
||||
<< stringifyAtomicOrdering(getOrdering()) << "'";
|
||||
if (!getAlignment())
|
||||
return emitOpError("expected alignment for atomic access");
|
||||
} else if (getSyncscope()) {
|
||||
return emitOpError("expected syncscope to be null for non-atomic access");
|
||||
}
|
||||
return verifyMemOpMetadata(*this);
|
||||
}
|
||||
|
||||
void LoadOp::build(OpBuilder &builder, OperationState &state, Value addr,
|
||||
unsigned alignment, bool isVolatile, bool isNonTemporal) {
|
||||
auto type = addr.getType().cast<LLVMPointerType>().getElementType();
|
||||
assert(type && "must provide explicit element type to the constructor "
|
||||
"when the pointer type is opaque");
|
||||
build(builder, state, type, addr, alignment, isVolatile, isNonTemporal);
|
||||
}
|
||||
|
||||
void LoadOp::build(OpBuilder &builder, OperationState &state, Type type,
|
||||
Value addr, unsigned alignment, bool isVolatile,
|
||||
bool isNonTemporal) {
|
||||
build(builder, state, type, addr, /*access_groups=*/nullptr,
|
||||
/*alias_scopes=*/nullptr, /*noalias_scopes=*/nullptr, /*tbaa=*/nullptr,
|
||||
bool isNonTemporal, AtomicOrdering ordering,
|
||||
StringRef syncscope) {
|
||||
build(builder, state, type, addr,
|
||||
/*access_groups=*/nullptr,
|
||||
/*alias_scopes=*/nullptr, /*noalias_scopes=*/nullptr,
|
||||
/*tbaa=*/nullptr,
|
||||
alignment ? builder.getI64IntegerAttr(alignment) : nullptr, isVolatile,
|
||||
isNonTemporal);
|
||||
isNonTemporal, ordering,
|
||||
syncscope.empty() ? nullptr : builder.getStringAttr(syncscope));
|
||||
}
|
||||
|
||||
// Extract the pointee type from the LLVM pointer type wrapped in MLIR. Return
|
||||
@ -2266,12 +2316,7 @@ LogicalResult AtomicRMWOp::verify() {
|
||||
if (!mlir::LLVM::isCompatibleFloatingPointType(valType))
|
||||
return emitOpError("expected LLVM IR floating point type");
|
||||
} else if (getBinOp() == AtomicBinOp::xchg) {
|
||||
auto intType = valType.dyn_cast<IntegerType>();
|
||||
unsigned intBitWidth = intType ? intType.getWidth() : 0;
|
||||
if (intBitWidth != 8 && intBitWidth != 16 && intBitWidth != 32 &&
|
||||
intBitWidth != 64 && !valType.isa<BFloat16Type>() &&
|
||||
!valType.isa<Float16Type>() && !valType.isa<Float32Type>() &&
|
||||
!valType.isa<Float64Type>())
|
||||
if (!isTypeCompatibleWithAtomicOp(valType, /*isPointerTypeAllowed=*/false))
|
||||
return emitOpError("unexpected LLVM IR type for 'xchg' bin_op");
|
||||
} else {
|
||||
auto intType = valType.dyn_cast<IntegerType>();
|
||||
@ -2320,12 +2365,8 @@ LogicalResult AtomicCmpXchgOp::verify() {
|
||||
if (!ptrType.isOpaque() && valType != ptrType.getElementType())
|
||||
return emitOpError("expected LLVM IR element type for operand #0 to "
|
||||
"match type for all other operands");
|
||||
auto intType = valType.dyn_cast<IntegerType>();
|
||||
unsigned intBitWidth = intType ? intType.getWidth() : 0;
|
||||
if (!valType.isa<LLVMPointerType>() && intBitWidth != 8 &&
|
||||
intBitWidth != 16 && intBitWidth != 32 && intBitWidth != 64 &&
|
||||
!valType.isa<BFloat16Type>() && !valType.isa<Float16Type>() &&
|
||||
!valType.isa<Float32Type>() && !valType.isa<Float64Type>())
|
||||
if (!isTypeCompatibleWithAtomicOp(valType,
|
||||
/*isPointerTypeAllowed=*/true))
|
||||
return emitOpError("unexpected LLVM IR type");
|
||||
if (getSuccessOrdering() < AtomicOrdering::monotonic ||
|
||||
getFailureOrdering() < AtomicOrdering::monotonic)
|
||||
|
@ -153,6 +153,41 @@ func.func @load_non_ptr_type(%foo : f32) {
|
||||
|
||||
// -----
|
||||
|
||||
func.func @load_syncscope(%ptr : !llvm.ptr) {
|
||||
// expected-error@below {{expected syncscope to be null for non-atomic access}}
|
||||
%1 = "llvm.load"(%ptr) {syncscope = "singlethread"} : (!llvm.ptr) -> (f32)
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func.func @load_unsupported_ordering(%ptr : !llvm.ptr) {
|
||||
// expected-error@below {{unsupported ordering 'release'}}
|
||||
%1 = llvm.load %ptr atomic release {alignment = 4 : i64} : !llvm.ptr -> f32
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func.func @load_unsupported_type(%ptr : !llvm.ptr) {
|
||||
// expected-error@below {{unsupported type 'f80' for atomic access}}
|
||||
%1 = llvm.load %ptr atomic monotonic {alignment = 16 : i64} : !llvm.ptr -> f80
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func.func @load_unsupported_type(%ptr : !llvm.ptr) {
|
||||
// expected-error@below {{unsupported type 'i1' for atomic access}}
|
||||
%1 = llvm.load %ptr atomic monotonic {alignment = 16 : i64} : !llvm.ptr -> i1
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func.func @load_unaligned_atomic(%ptr : !llvm.ptr) {
|
||||
// expected-error@below {{expected alignment for atomic access}}
|
||||
%1 = llvm.load %ptr atomic monotonic : !llvm.ptr -> f32
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func.func @store_non_llvm_type(%foo : memref<f32>, %bar : f32) {
|
||||
// expected-error@+1 {{expected LLVM pointer type}}
|
||||
llvm.store %bar, %foo : memref<f32>
|
||||
|
@ -339,6 +339,15 @@ func.func @null() {
|
||||
llvm.return
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @atomic_load
|
||||
func.func @atomic_load(%ptr : !llvm.ptr) {
|
||||
// CHECK: llvm.load %{{.*}} atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32
|
||||
%0 = llvm.load %ptr atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32
|
||||
// CHECK: llvm.load volatile %{{.*}} atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr -> f32
|
||||
%1 = llvm.load volatile %ptr atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr -> f32
|
||||
llvm.return
|
||||
}
|
||||
|
||||
// CHECK-LABEL: @atomicrmw
|
||||
func.func @atomicrmw(%ptr : !llvm.ptr, %val : f32) {
|
||||
// CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} monotonic : !llvm.ptr, f32
|
||||
|
@ -368,6 +368,18 @@ define void @load_store(ptr %ptr) {
|
||||
|
||||
; // -----
|
||||
|
||||
; CHECK-LABEL: @atomic_load
|
||||
; CHECK-SAME: %[[PTR:[a-zA-Z0-9]+]]
|
||||
define void @atomic_load(ptr %ptr) {
|
||||
; CHECK: %[[V1:[0-9]+]] = llvm.load %[[PTR]] atomic acquire {alignment = 8 : i64} : !llvm.ptr -> f64
|
||||
; CHECK: %[[V2:[0-9]+]] = llvm.load volatile %[[PTR]] atomic syncscope("singlethreaded") acquire {alignment = 16 : i64} : !llvm.ptr -> f64
|
||||
%1 = load atomic double, ptr %ptr acquire, align 8
|
||||
%2 = load atomic volatile double, ptr %ptr syncscope("singlethreaded") acquire, align 16
|
||||
ret void
|
||||
}
|
||||
|
||||
; // -----
|
||||
|
||||
; CHECK-LABEL: @atomic_rmw
|
||||
; CHECK-SAME: %[[PTR1:[a-zA-Z0-9]+]]
|
||||
; CHECK-SAME: %[[VAL1:[a-zA-Z0-9]+]]
|
||||
|
@ -1261,7 +1261,7 @@ llvm.func @indexconstantsplat() -> vector<3xi32> {
|
||||
// CHECK-LABEL: @indexconstantarray
|
||||
llvm.func @indexconstantarray() -> vector<3xi32> {
|
||||
%1 = llvm.mlir.constant(dense<[0, 1, 2]> : vector<3xindex>) : vector<3xi32>
|
||||
// CHECK: ret <3 x i32> <i32 0, i32 1, i32 2>
|
||||
// CHECK: ret <3 x i32> <i32 0, i32 1, i32 2>
|
||||
llvm.return %1 : vector<3xi32>
|
||||
}
|
||||
|
||||
@ -1780,6 +1780,18 @@ llvm.func @nontemporal_store_and_load() {
|
||||
|
||||
// -----
|
||||
|
||||
llvm.func @atomic_load(%ptr : !llvm.ptr) {
|
||||
// CHECK: load atomic
|
||||
// CHECK-SAME: monotonic, align 4
|
||||
%1 = llvm.load %ptr atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32
|
||||
// CHECK: load atomic
|
||||
// CHECK-SAME: syncscope("singlethread") monotonic, align 4
|
||||
%2 = llvm.load %ptr atomic syncscope("singlethread") monotonic {alignment = 4 : i64} : !llvm.ptr -> f32
|
||||
llvm.return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
// Check that the translation does not crash in absence of a data layout.
|
||||
module {
|
||||
// CHECK: declare void @module_default_layout
|
||||
|
Loading…
Reference in New Issue
Block a user