mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2025-01-07 16:42:34 +00:00
[mlir] replace llvm.mlir.cast with unrealized_conversion_cast
The dialect-specific cast between builtin (ex-standard) types and LLVM dialect types was introduced long time before built-in support for unrealized_conversion_cast. It has a similar purpose, but is restricted to compatible builtin and LLVM dialect types, which may hamper progressive lowering and composition with types from other dialects. Replace llvm.mlir.cast with unrealized_conversion_cast, and drop the operation that became unnecessary. Also make unrealized_conversion_cast legal by default in LLVMConversionTarget as the majority of convesions using it are partial conversions that actually want the casts to persist in the IR. The standard-to-llvm conversion, which is still expected to run last, cleans up the remaining casts standard-to-llvm conversion, which is still expected to run last, cleans up the remaining casts Reviewed By: nicolasvasilache Differential Revision: https://reviews.llvm.org/D105880
This commit is contained in:
parent
f57f8f7ccc
commit
881dc34f73
@ -1221,25 +1221,6 @@ def LLVM_ConstantOp
|
||||
let verifier = [{ return ::verify(*this); }];
|
||||
}
|
||||
|
||||
def LLVM_DialectCastOp : LLVM_Op<"mlir.cast", [NoSideEffect]> {
|
||||
let summary = "Type cast between LLVM dialect and Standard.";
|
||||
let description = [{
|
||||
llvm.mlir.cast op casts between Standard and LLVM dialects. It only changes
|
||||
the dialect, but does not change compile-time or runtime semantics.
|
||||
|
||||
Notice that index type is not supported, as it's Standard-specific.
|
||||
|
||||
Example:
|
||||
llvm.mlir.cast %v : f16 to llvm.half
|
||||
llvm.mlir.cast %v : llvm.float to f32
|
||||
llvm.mlir.cast %v : !llvm."<2 x f32>"> to vector<2xf32>
|
||||
}];
|
||||
let arguments = (ins AnyType:$in);
|
||||
let results = (outs AnyType:$res);
|
||||
let assemblyFormat = "$in attr-dict `:` type($in) `to` type($res)";
|
||||
let verifier = "return ::verify(*this);";
|
||||
}
|
||||
|
||||
// Operations that correspond to LLVM intrinsics. With MLIR operation set being
|
||||
// extendable, there is no reason to introduce a hard boundary between "core"
|
||||
// operations and intrinsics. However, we systematically prefix them with
|
||||
|
@ -1003,7 +1003,7 @@ void ConvertAsyncToLLVMPass::runOnOperation() {
|
||||
converter, ctx);
|
||||
|
||||
ConversionTarget target(*ctx);
|
||||
target.addLegalOp<ConstantOp>();
|
||||
target.addLegalOp<ConstantOp, UnrealizedConversionCastOp>();
|
||||
target.addLegalDialect<LLVM::LLVMDialect>();
|
||||
|
||||
// All operations from Async dialect must be lowered to the runtime API and
|
||||
|
@ -327,7 +327,6 @@ void ConvertComplexToLLVMPass::runOnOperation() {
|
||||
|
||||
LLVMConversionTarget target(getContext());
|
||||
target.addLegalOp<ModuleOp, FuncOp>();
|
||||
target.addLegalOp<LLVM::DialectCastOp>();
|
||||
target.addIllegalDialect<complex::ComplexDialect>();
|
||||
if (failed(applyPartialConversion(module, target, std::move(patterns))))
|
||||
signalPassFailure();
|
||||
|
@ -316,6 +316,7 @@ void GpuToLLVMConversionPass::runOnOperation() {
|
||||
LLVMConversionTarget target(getContext());
|
||||
|
||||
target.addIllegalDialect<gpu::GPUDialect>();
|
||||
target.addIllegalOp<UnrealizedConversionCastOp>();
|
||||
|
||||
populateVectorToLLVMConversionPatterns(converter, patterns);
|
||||
populateMemRefToLLVMConversionPatterns(converter, patterns);
|
||||
|
@ -14,5 +14,5 @@ using namespace mlir;
|
||||
mlir::LLVMConversionTarget::LLVMConversionTarget(MLIRContext &ctx)
|
||||
: ConversionTarget(ctx) {
|
||||
this->addLegalDialect<LLVM::LLVMDialect>();
|
||||
this->addIllegalOp<LLVM::DialectCastOp>();
|
||||
this->addLegalOp<UnrealizedConversionCastOp>();
|
||||
}
|
||||
|
@ -58,6 +58,16 @@ LLVMTypeConverter::LLVMTypeConverter(MLIRContext *ctx,
|
||||
addArgumentMaterialization([&](OpBuilder &builder, MemRefType resultType,
|
||||
ValueRange inputs,
|
||||
Location loc) -> Optional<Value> {
|
||||
// Explicit "this" is necessary here because otherwise "options" resolves to
|
||||
// the argument of the parent function (constructor), which is a reference
|
||||
// and not a copy. This can lead to UB when the lambda is actually called.
|
||||
if (this->options.useBarePtrCallConv) {
|
||||
if (!resultType.hasStaticShape())
|
||||
return llvm::None;
|
||||
Value v = MemRefDescriptor::fromStaticShape(builder, loc, *this,
|
||||
resultType, inputs[0]);
|
||||
return v;
|
||||
}
|
||||
if (inputs.size() == 1)
|
||||
return llvm::None;
|
||||
return MemRefDescriptor::pack(builder, loc, *this, resultType, inputs);
|
||||
@ -69,20 +79,18 @@ LLVMTypeConverter::LLVMTypeConverter(MLIRContext *ctx,
|
||||
Location loc) -> Optional<Value> {
|
||||
if (inputs.size() != 1)
|
||||
return llvm::None;
|
||||
// FIXME: These should check LLVM::DialectCastOp can actually be constructed
|
||||
// from the input and result.
|
||||
return builder.create<LLVM::DialectCastOp>(loc, resultType, inputs[0])
|
||||
.getResult();
|
||||
|
||||
return builder.create<UnrealizedConversionCastOp>(loc, resultType, inputs)
|
||||
.getResult(0);
|
||||
});
|
||||
addTargetMaterialization([&](OpBuilder &builder, Type resultType,
|
||||
ValueRange inputs,
|
||||
Location loc) -> Optional<Value> {
|
||||
if (inputs.size() != 1)
|
||||
return llvm::None;
|
||||
// FIXME: These should check LLVM::DialectCastOp can actually be constructed
|
||||
// from the input and result.
|
||||
return builder.create<LLVM::DialectCastOp>(loc, resultType, inputs[0])
|
||||
.getResult();
|
||||
|
||||
return builder.create<UnrealizedConversionCastOp>(loc, resultType, inputs)
|
||||
.getResult(0);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -137,7 +137,7 @@ void ConvertLinalgToLLVMPass::runOnOperation() {
|
||||
|
||||
LLVMConversionTarget target(getContext());
|
||||
target.addIllegalOp<RangeOp>();
|
||||
target.addLegalOp<ModuleOp, LLVM::DialectCastOp>();
|
||||
target.addLegalOp<ModuleOp>();
|
||||
if (failed(applyPartialConversion(module, target, std::move(patterns))))
|
||||
signalPassFailure();
|
||||
}
|
||||
|
@ -201,7 +201,6 @@ struct ConvertMathToLLVMPass
|
||||
LLVMTypeConverter converter(&getContext());
|
||||
populateMathToLLVMConversionPatterns(converter, patterns);
|
||||
LLVMConversionTarget target(getContext());
|
||||
target.addLegalOp<LLVM::DialectCastOp>();
|
||||
if (failed(
|
||||
applyPartialConversion(getFunction(), target, std::move(patterns))))
|
||||
signalPassFailure();
|
||||
|
@ -1172,9 +1172,11 @@ struct SubViewOpLowering : public ConvertOpToLLVMPattern<memref::SubViewOp> {
|
||||
} else {
|
||||
Value pos = rewriter.create<LLVM::ConstantOp>(
|
||||
loc, llvmIndexType, rewriter.getI64IntegerAttr(i));
|
||||
size = rewriter.create<LLVM::DialectCastOp>(
|
||||
loc, llvmIndexType,
|
||||
rewriter.create<memref::DimOp>(loc, subViewOp.source(), pos));
|
||||
Value dim =
|
||||
rewriter.create<memref::DimOp>(loc, subViewOp.source(), pos);
|
||||
auto cast = rewriter.create<UnrealizedConversionCastOp>(
|
||||
loc, llvmIndexType, dim);
|
||||
size = cast.getResult(0);
|
||||
}
|
||||
stride = rewriter.create<LLVM::ConstantOp>(
|
||||
loc, llvmIndexType, rewriter.getI64IntegerAttr(1));
|
||||
@ -1432,7 +1434,7 @@ struct MemRefToLLVMPass : public ConvertMemRefToLLVMBase<MemRefToLLVMPass> {
|
||||
RewritePatternSet patterns(&getContext());
|
||||
populateMemRefToLLVMConversionPatterns(typeConverter, patterns);
|
||||
LLVMConversionTarget target(getContext());
|
||||
target.addLegalOp<LLVM::DialectCastOp, FuncOp>();
|
||||
target.addLegalOp<FuncOp>();
|
||||
if (failed(applyPartialConversion(op, target, std::move(patterns))))
|
||||
signalPassFailure();
|
||||
}
|
||||
|
@ -101,9 +101,9 @@ class LegalizeDataOpForLLVMTranslation : public ConvertOpToLLVMPattern<Op> {
|
||||
originalDataOperand.getType().dyn_cast<MemRefType>()) {
|
||||
Type structType = converter->convertType(memRefType);
|
||||
Value memRefDescriptor = builder
|
||||
.create<LLVM::DialectCastOp>(
|
||||
.create<UnrealizedConversionCastOp>(
|
||||
loc, structType, originalDataOperand)
|
||||
.getResult();
|
||||
.getResult(0);
|
||||
|
||||
// Calculate the size of the memref and get the pointer to the allocated
|
||||
// buffer.
|
||||
@ -164,6 +164,7 @@ void ConvertOpenACCToLLVMPass::runOnOperation() {
|
||||
|
||||
ConversionTarget target(*context);
|
||||
target.addLegalDialect<LLVM::LLVMDialect>();
|
||||
target.addLegalOp<UnrealizedConversionCastOp>();
|
||||
|
||||
auto allDataOperandsAreConverted = [](ValueRange operands) {
|
||||
for (Value operand : operands) {
|
||||
|
@ -309,56 +309,9 @@ struct BarePtrFuncOpConversion : public FuncOpConversionBase {
|
||||
LogicalResult
|
||||
matchAndRewrite(FuncOp funcOp, ArrayRef<Value> operands,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
// Store the type of memref-typed arguments before the conversion so that we
|
||||
// can promote them to MemRef descriptor at the beginning of the function.
|
||||
SmallVector<Type, 8> oldArgTypes =
|
||||
llvm::to_vector<8>(funcOp.getType().getInputs());
|
||||
|
||||
auto newFuncOp = convertFuncOpToLLVMFuncOp(funcOp, rewriter);
|
||||
if (!newFuncOp)
|
||||
return failure();
|
||||
if (newFuncOp.getBody().empty()) {
|
||||
rewriter.eraseOp(funcOp);
|
||||
return success();
|
||||
}
|
||||
|
||||
// Promote bare pointers from memref arguments to memref descriptors at the
|
||||
// beginning of the function so that all the memrefs in the function have a
|
||||
// uniform representation.
|
||||
Block *entryBlock = &newFuncOp.getBody().front();
|
||||
auto blockArgs = entryBlock->getArguments();
|
||||
assert(blockArgs.size() == oldArgTypes.size() &&
|
||||
"The number of arguments and types doesn't match");
|
||||
|
||||
OpBuilder::InsertionGuard guard(rewriter);
|
||||
rewriter.setInsertionPointToStart(entryBlock);
|
||||
for (auto it : llvm::zip(blockArgs, oldArgTypes)) {
|
||||
BlockArgument arg = std::get<0>(it);
|
||||
Type argTy = std::get<1>(it);
|
||||
|
||||
// Unranked memrefs are not supported in the bare pointer calling
|
||||
// convention. We should have bailed out before in the presence of
|
||||
// unranked memrefs.
|
||||
assert(!argTy.isa<UnrankedMemRefType>() &&
|
||||
"Unranked memref is not supported");
|
||||
auto memrefTy = argTy.dyn_cast<MemRefType>();
|
||||
if (!memrefTy)
|
||||
continue;
|
||||
|
||||
// Replace barePtr with a placeholder (undef), promote barePtr to a ranked
|
||||
// or unranked memref descriptor and replace placeholder with the last
|
||||
// instruction of the memref descriptor.
|
||||
// TODO: The placeholder is needed to avoid replacing barePtr uses in the
|
||||
// MemRef descriptor instructions. We may want to have a utility in the
|
||||
// rewriter to properly handle this use case.
|
||||
Location loc = funcOp.getLoc();
|
||||
auto placeholder = rewriter.create<LLVM::UndefOp>(loc, memrefTy);
|
||||
rewriter.replaceUsesOfBlockArgument(arg, placeholder);
|
||||
|
||||
Value desc = MemRefDescriptor::fromStaticShape(
|
||||
rewriter, loc, *getTypeConverter(), memrefTy, arg);
|
||||
rewriter.replaceOp(placeholder, {desc});
|
||||
}
|
||||
|
||||
rewriter.eraseOp(funcOp);
|
||||
return success();
|
||||
@ -562,20 +515,31 @@ struct CallIndirectOpLowering : public CallOpInterfaceLowering<CallIndirectOp> {
|
||||
using Super::Super;
|
||||
};
|
||||
|
||||
struct DialectCastOpLowering
|
||||
: public ConvertOpToLLVMPattern<LLVM::DialectCastOp> {
|
||||
using ConvertOpToLLVMPattern<LLVM::DialectCastOp>::ConvertOpToLLVMPattern;
|
||||
struct UnrealizedConversionCastOpLowering
|
||||
: public ConvertOpToLLVMPattern<UnrealizedConversionCastOp> {
|
||||
using ConvertOpToLLVMPattern<
|
||||
UnrealizedConversionCastOp>::ConvertOpToLLVMPattern;
|
||||
|
||||
LogicalResult
|
||||
matchAndRewrite(LLVM::DialectCastOp castOp, ArrayRef<Value> operands,
|
||||
matchAndRewrite(UnrealizedConversionCastOp op, ArrayRef<Value> operands,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
LLVM::DialectCastOp::Adaptor transformed(operands);
|
||||
if (transformed.in().getType() !=
|
||||
typeConverter->convertType(castOp.getType())) {
|
||||
return failure();
|
||||
UnrealizedConversionCastOp::Adaptor transformed(operands);
|
||||
SmallVector<Type> convertedTypes;
|
||||
if (succeeded(typeConverter->convertTypes(op.outputs().getTypes(),
|
||||
convertedTypes)) &&
|
||||
convertedTypes == transformed.inputs().getTypes()) {
|
||||
rewriter.replaceOp(op, transformed.inputs());
|
||||
return success();
|
||||
}
|
||||
rewriter.replaceOp(castOp, transformed.in());
|
||||
return success();
|
||||
|
||||
convertedTypes.clear();
|
||||
if (succeeded(typeConverter->convertTypes(transformed.inputs().getTypes(),
|
||||
convertedTypes)) &&
|
||||
convertedTypes == op.outputs().getType()) {
|
||||
rewriter.replaceOp(op, transformed.inputs());
|
||||
return success();
|
||||
}
|
||||
return failure();
|
||||
}
|
||||
};
|
||||
|
||||
@ -1118,7 +1082,6 @@ void mlir::populateStdToLLVMConversionPatterns(LLVMTypeConverter &converter,
|
||||
CondBranchOpLowering,
|
||||
CopySignOpLowering,
|
||||
ConstantOpLowering,
|
||||
DialectCastOpLowering,
|
||||
DivFOpLowering,
|
||||
FloorFOpLowering,
|
||||
FmaFOpLowering,
|
||||
@ -1153,6 +1116,7 @@ void mlir::populateStdToLLVMConversionPatterns(LLVMTypeConverter &converter,
|
||||
UnsignedRemIOpLowering,
|
||||
UnsignedShiftRightOpLowering,
|
||||
XOrOpLowering,
|
||||
UnrealizedConversionCastOpLowering,
|
||||
ZeroExtendIOpLowering>(converter);
|
||||
// clang-format on
|
||||
}
|
||||
@ -1205,8 +1169,10 @@ struct LLVMLoweringPass : public ConvertStandardToLLVMBase<LLVMLoweringPass> {
|
||||
populateStdToLLVMConversionPatterns(typeConverter, patterns);
|
||||
|
||||
LLVMConversionTarget target(getContext());
|
||||
target.addIllegalOp<UnrealizedConversionCastOp>();
|
||||
if (failed(applyPartialConversion(m, target, std::move(patterns))))
|
||||
signalPassFailure();
|
||||
|
||||
m->setAttr(LLVM::LLVMDialect::getDataLayoutAttrName(),
|
||||
StringAttr::get(m.getContext(), this->dataLayout));
|
||||
}
|
||||
|
@ -78,7 +78,6 @@ void LowerVectorToLLVMPass::runOnOperation() {
|
||||
|
||||
// Architecture specific augmentations.
|
||||
LLVMConversionTarget target(getContext());
|
||||
target.addLegalOp<LLVM::DialectCastOp>();
|
||||
target.addLegalDialect<memref::MemRefDialect>();
|
||||
target.addLegalDialect<StandardOpsDialect>();
|
||||
target.addLegalOp<UnrealizedConversionCastOp>();
|
||||
|
@ -1323,193 +1323,6 @@ static void printGlobalOp(OpAsmPrinter &p, GlobalOp op) {
|
||||
p.printRegion(initializer, /*printEntryBlockArgs=*/false);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Verifier for LLVM::DialectCastOp.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// Checks if `llvmType` is dialect cast-compatible with `index` type. Does not
|
||||
/// report the error, the user is expected to produce an appropriate message.
|
||||
// TODO: make the size depend on data layout rather than on the conversion
|
||||
// pass option, and pull that information here.
|
||||
static LogicalResult verifyCastWithIndex(Type llvmType) {
|
||||
return success(llvmType.isa<IntegerType>());
|
||||
}
|
||||
|
||||
/// Checks if `llvmType` is dialect cast-compatible with built-in `type` and
|
||||
/// reports errors to the location of `op`. `isElement` indicates whether the
|
||||
/// verification is performed for types that are element types inside a
|
||||
/// container; we don't want casts from X to X at the top level, but c1<X> to
|
||||
/// c2<X> may be fine.
|
||||
static LogicalResult verifyCast(DialectCastOp op, Type llvmType, Type type,
|
||||
bool isElement = false) {
|
||||
// Equal element types are directly compatible.
|
||||
if (isElement && llvmType == type)
|
||||
return success();
|
||||
|
||||
// Index is compatible with any integer.
|
||||
if (type.isIndex()) {
|
||||
if (succeeded(verifyCastWithIndex(llvmType)))
|
||||
return success();
|
||||
|
||||
return op.emitOpError("invalid cast between index and non-integer type");
|
||||
}
|
||||
|
||||
if (type.isa<IntegerType>()) {
|
||||
auto llvmIntegerType = llvmType.dyn_cast<IntegerType>();
|
||||
if (!llvmIntegerType)
|
||||
return op->emitOpError("invalid cast between integer and non-integer");
|
||||
if (llvmIntegerType.getWidth() != type.getIntOrFloatBitWidth())
|
||||
return op.emitOpError("invalid cast changing integer width");
|
||||
return success();
|
||||
}
|
||||
|
||||
// Vectors are compatible if they are 1D non-scalable, and their element types
|
||||
// are compatible. nD vectors are compatible with (n-1)D arrays containing 1D
|
||||
// vector.
|
||||
if (auto vectorType = type.dyn_cast<VectorType>()) {
|
||||
if (vectorType == llvmType && !isElement)
|
||||
return op.emitOpError("vector types should not be casted");
|
||||
|
||||
if (vectorType.getRank() == 1) {
|
||||
auto llvmVectorType = llvmType.dyn_cast<VectorType>();
|
||||
if (!llvmVectorType || llvmVectorType.getRank() != 1)
|
||||
return op.emitOpError("invalid cast for vector types");
|
||||
|
||||
return verifyCast(op, llvmVectorType.getElementType(),
|
||||
vectorType.getElementType(), /*isElement=*/true);
|
||||
}
|
||||
|
||||
auto arrayType = llvmType.dyn_cast<LLVM::LLVMArrayType>();
|
||||
if (!arrayType ||
|
||||
arrayType.getNumElements() != vectorType.getShape().front())
|
||||
return op.emitOpError("invalid cast for vector, expected array");
|
||||
return verifyCast(op, arrayType.getElementType(),
|
||||
VectorType::get(vectorType.getShape().drop_front(),
|
||||
vectorType.getElementType()),
|
||||
/*isElement=*/true);
|
||||
}
|
||||
|
||||
if (auto memrefType = type.dyn_cast<MemRefType>()) {
|
||||
// Bare pointer convention: statically-shaped memref is compatible with an
|
||||
// LLVM pointer to the element type.
|
||||
if (auto ptrType = llvmType.dyn_cast<LLVMPointerType>()) {
|
||||
if (!memrefType.hasStaticShape())
|
||||
return op->emitOpError(
|
||||
"unexpected bare pointer for dynamically shaped memref");
|
||||
if (memrefType.getMemorySpaceAsInt() != ptrType.getAddressSpace())
|
||||
return op->emitError("invalid conversion between memref and pointer in "
|
||||
"different memory spaces");
|
||||
|
||||
return verifyCast(op, ptrType.getElementType(),
|
||||
memrefType.getElementType(), /*isElement=*/true);
|
||||
}
|
||||
|
||||
// Otherwise, memrefs are convertible to a descriptor, which is a structure
|
||||
// type.
|
||||
auto structType = llvmType.dyn_cast<LLVMStructType>();
|
||||
if (!structType)
|
||||
return op->emitOpError("invalid cast between a memref and a type other "
|
||||
"than pointer or memref descriptor");
|
||||
|
||||
unsigned expectedNumElements = memrefType.getRank() == 0 ? 3 : 5;
|
||||
if (structType.getBody().size() != expectedNumElements) {
|
||||
return op->emitOpError() << "expected memref descriptor with "
|
||||
<< expectedNumElements << " elements";
|
||||
}
|
||||
|
||||
// The first two elements are pointers to the element type.
|
||||
auto allocatedPtr = structType.getBody()[0].dyn_cast<LLVMPointerType>();
|
||||
if (!allocatedPtr ||
|
||||
allocatedPtr.getAddressSpace() != memrefType.getMemorySpaceAsInt())
|
||||
return op->emitOpError("expected first element of a memref descriptor to "
|
||||
"be a pointer in the address space of the memref");
|
||||
if (failed(verifyCast(op, allocatedPtr.getElementType(),
|
||||
memrefType.getElementType(), /*isElement=*/true)))
|
||||
return failure();
|
||||
|
||||
auto alignedPtr = structType.getBody()[1].dyn_cast<LLVMPointerType>();
|
||||
if (!alignedPtr ||
|
||||
alignedPtr.getAddressSpace() != memrefType.getMemorySpaceAsInt())
|
||||
return op->emitOpError(
|
||||
"expected second element of a memref descriptor to "
|
||||
"be a pointer in the address space of the memref");
|
||||
if (failed(verifyCast(op, alignedPtr.getElementType(),
|
||||
memrefType.getElementType(), /*isElement=*/true)))
|
||||
return failure();
|
||||
|
||||
// The second element (offset) is an equivalent of index.
|
||||
if (failed(verifyCastWithIndex(structType.getBody()[2])))
|
||||
return op->emitOpError("expected third element of a memref descriptor to "
|
||||
"be index-compatible integers");
|
||||
|
||||
// 0D memrefs don't have sizes/strides.
|
||||
if (memrefType.getRank() == 0)
|
||||
return success();
|
||||
|
||||
// Sizes and strides are rank-sized arrays of `index` equivalents.
|
||||
auto sizes = structType.getBody()[3].dyn_cast<LLVMArrayType>();
|
||||
if (!sizes || failed(verifyCastWithIndex(sizes.getElementType())) ||
|
||||
sizes.getNumElements() != memrefType.getRank())
|
||||
return op->emitOpError(
|
||||
"expected fourth element of a memref descriptor "
|
||||
"to be an array of <rank> index-compatible integers");
|
||||
|
||||
auto strides = structType.getBody()[4].dyn_cast<LLVMArrayType>();
|
||||
if (!strides || failed(verifyCastWithIndex(strides.getElementType())) ||
|
||||
strides.getNumElements() != memrefType.getRank())
|
||||
return op->emitOpError(
|
||||
"expected fifth element of a memref descriptor "
|
||||
"to be an array of <rank> index-compatible integers");
|
||||
|
||||
return success();
|
||||
}
|
||||
|
||||
// Unranked memrefs are compatible with their descriptors.
|
||||
if (auto unrankedMemrefType = type.dyn_cast<UnrankedMemRefType>()) {
|
||||
auto structType = llvmType.dyn_cast<LLVMStructType>();
|
||||
if (!structType || structType.getBody().size() != 2)
|
||||
return op->emitOpError(
|
||||
"expected descriptor to be a struct with two elements");
|
||||
|
||||
if (failed(verifyCastWithIndex(structType.getBody()[0])))
|
||||
return op->emitOpError("expected first element of a memref descriptor to "
|
||||
"be an index-compatible integer");
|
||||
|
||||
auto ptrType = structType.getBody()[1].dyn_cast<LLVMPointerType>();
|
||||
auto ptrElementType =
|
||||
ptrType ? ptrType.getElementType().dyn_cast<IntegerType>() : nullptr;
|
||||
if (!ptrElementType || ptrElementType.getWidth() != 8)
|
||||
return op->emitOpError("expected second element of a memref descriptor "
|
||||
"to be an !llvm.ptr<i8>");
|
||||
|
||||
return success();
|
||||
}
|
||||
|
||||
// Complex types are compatible with the two-element structs.
|
||||
if (auto complexType = type.dyn_cast<ComplexType>()) {
|
||||
auto structType = llvmType.dyn_cast<LLVMStructType>();
|
||||
if (!structType || structType.getBody().size() != 2 ||
|
||||
structType.getBody()[0] != structType.getBody()[1] ||
|
||||
structType.getBody()[0] != complexType.getElementType())
|
||||
return op->emitOpError("expected 'complex' to map to two-element struct "
|
||||
"with identical element types");
|
||||
return success();
|
||||
}
|
||||
|
||||
// Everything else is not supported.
|
||||
return op->emitError("unsupported cast");
|
||||
}
|
||||
|
||||
static LogicalResult verify(DialectCastOp op) {
|
||||
if (isCompatibleType(op.getType()))
|
||||
return verifyCast(op, op.getType(), op.in().getType());
|
||||
|
||||
if (!isCompatibleType(op.in().getType()))
|
||||
return op->emitOpError("expected one LLVM type and one built-in type");
|
||||
|
||||
return verifyCast(op, op.in().getType(), op.getType());
|
||||
}
|
||||
|
||||
// Parses one of the keywords provided in the list `keywords` and returns the
|
||||
// position of the parsed keyword in the list. If none of the keywords from the
|
||||
// list is parsed, returns -1.
|
||||
|
@ -284,13 +284,19 @@ LogicalResult
|
||||
UnrealizedConversionCastOp::fold(ArrayRef<Attribute> attrOperands,
|
||||
SmallVectorImpl<OpFoldResult> &foldResults) {
|
||||
OperandRange operands = inputs();
|
||||
ResultRange results = outputs();
|
||||
|
||||
if (operands.getType() == results.getType()) {
|
||||
foldResults.append(operands.begin(), operands.end());
|
||||
return success();
|
||||
}
|
||||
|
||||
if (operands.empty())
|
||||
return failure();
|
||||
|
||||
// Check that the input is a cast with results that all feed into this
|
||||
// operation, and operand types that directly match the result types of this
|
||||
// operation.
|
||||
ResultRange results = outputs();
|
||||
Value firstInput = operands.front();
|
||||
auto inputOp = firstInput.getDefiningOp<UnrealizedConversionCastOp>();
|
||||
if (!inputOp || inputOp.getResults() != operands ||
|
||||
|
@ -21,7 +21,7 @@ func @create_value() {
|
||||
// CHECK-LABEL: @create_group
|
||||
func @create_group() {
|
||||
// CHECK: %[[C:.*]] = constant 1 : index
|
||||
// CHECK: %[[S:.*]] = llvm.mlir.cast %[[C]] : index to i64
|
||||
// CHECK: %[[S:.*]] = unrealized_conversion_cast %[[C]] : index to i64
|
||||
%c = constant 1 : index
|
||||
// CHECK: %[[GROUP:.*]] = call @mlirAsyncRuntimeCreateGroup(%[[S]])
|
||||
%0 = async.runtime.create_group %c: !async.group
|
||||
|
@ -12,9 +12,9 @@ func @complex_create(%real: f32, %imag: f32) -> complex<f32> {
|
||||
|
||||
// CHECK-LABEL: func @complex_extract
|
||||
// CHECK-SAME: (%[[CPLX:.*]]: complex<f32>)
|
||||
// CHECK-NEXT: %[[CAST0:.*]] = llvm.mlir.cast %[[CPLX]] : complex<f32> to !llvm.struct<(f32, f32)>
|
||||
// CHECK-NEXT: %[[CAST0:.*]] = unrealized_conversion_cast %[[CPLX]] : complex<f32> to !llvm.struct<(f32, f32)>
|
||||
// CHECK-NEXT: %[[REAL:.*]] = llvm.extractvalue %[[CAST0]][0] : !llvm.struct<(f32, f32)>
|
||||
// CHECK-NEXT: %[[CAST1:.*]] = llvm.mlir.cast %[[CPLX]] : complex<f32> to !llvm.struct<(f32, f32)>
|
||||
// CHECK-NEXT: %[[CAST1:.*]] = unrealized_conversion_cast %[[CPLX]] : complex<f32> to !llvm.struct<(f32, f32)>
|
||||
// CHECK-NEXT: %[[IMAG:.*]] = llvm.extractvalue %[[CAST1]][1] : !llvm.struct<(f32, f32)>
|
||||
func @complex_extract(%cplx: complex<f32>) {
|
||||
%real1 = complex.re %cplx : complex<f32>
|
||||
@ -70,8 +70,8 @@ func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
|
||||
%div = complex.div %lhs, %rhs : complex<f32>
|
||||
return %div : complex<f32>
|
||||
}
|
||||
// CHECK: %[[CASTED_LHS:.*]] = llvm.mlir.cast %[[LHS]] : complex<f32> to ![[C_TY:.*>]]
|
||||
// CHECK: %[[CASTED_RHS:.*]] = llvm.mlir.cast %[[RHS]] : complex<f32> to ![[C_TY]]
|
||||
// CHECK: %[[CASTED_LHS:.*]] = unrealized_conversion_cast %[[LHS]] : complex<f32> to ![[C_TY:.*>]]
|
||||
// CHECK: %[[CASTED_RHS:.*]] = unrealized_conversion_cast %[[RHS]] : complex<f32> to ![[C_TY]]
|
||||
|
||||
// CHECK: %[[LHS_RE:.*]] = llvm.extractvalue %[[CASTED_LHS]][0] : ![[C_TY]]
|
||||
// CHECK: %[[LHS_IM:.*]] = llvm.extractvalue %[[CASTED_LHS]][1] : ![[C_TY]]
|
||||
@ -97,7 +97,7 @@ func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
|
||||
// CHECK: %[[IMAG:.*]] = llvm.fdiv %[[IMAG_TMP_2]], %[[SQ_NORM]] : f32
|
||||
// CHECK: %[[RESULT_2:.*]] = llvm.insertvalue %[[IMAG]], %[[RESULT_1]][1] : ![[C_TY]]
|
||||
//
|
||||
// CHECK: %[[CASTED_RESULT:.*]] = llvm.mlir.cast %[[RESULT_2]] : ![[C_TY]] to complex<f32>
|
||||
// CHECK: %[[CASTED_RESULT:.*]] = unrealized_conversion_cast %[[RESULT_2]] : ![[C_TY]] to complex<f32>
|
||||
// CHECK: return %[[CASTED_RESULT]] : complex<f32>
|
||||
|
||||
// CHECK-LABEL: func @complex_mul
|
||||
@ -106,8 +106,8 @@ func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
|
||||
%mul = complex.mul %lhs, %rhs : complex<f32>
|
||||
return %mul : complex<f32>
|
||||
}
|
||||
// CHECK: %[[CASTED_LHS:.*]] = llvm.mlir.cast %[[LHS]] : complex<f32> to ![[C_TY:.*>]]
|
||||
// CHECK: %[[CASTED_RHS:.*]] = llvm.mlir.cast %[[RHS]] : complex<f32> to ![[C_TY]]
|
||||
// CHECK: %[[CASTED_LHS:.*]] = unrealized_conversion_cast %[[LHS]] : complex<f32> to ![[C_TY:.*>]]
|
||||
// CHECK: %[[CASTED_RHS:.*]] = unrealized_conversion_cast %[[RHS]] : complex<f32> to ![[C_TY]]
|
||||
|
||||
// CHECK: %[[LHS_RE:.*]] = llvm.extractvalue %[[CASTED_LHS]][0] : ![[C_TY]]
|
||||
// CHECK: %[[LHS_IM:.*]] = llvm.extractvalue %[[CASTED_LHS]][1] : ![[C_TY]]
|
||||
@ -126,7 +126,7 @@ func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
|
||||
// CHECK: %[[RESULT_1:.*]] = llvm.insertvalue %[[REAL]], %[[RESULT_0]][0]
|
||||
// CHECK: %[[RESULT_2:.*]] = llvm.insertvalue %[[IMAG]], %[[RESULT_1]][1]
|
||||
|
||||
// CHECK: %[[CASTED_RESULT:.*]] = llvm.mlir.cast %[[RESULT_2]] : ![[C_TY]] to complex<f32>
|
||||
// CHECK: %[[CASTED_RESULT:.*]] = unrealized_conversion_cast %[[RESULT_2]] : ![[C_TY]] to complex<f32>
|
||||
// CHECK: return %[[CASTED_RESULT]] : complex<f32>
|
||||
|
||||
// CHECK-LABEL: func @complex_abs
|
||||
@ -135,7 +135,7 @@ func @complex_abs(%arg: complex<f32>) -> f32 {
|
||||
%abs = complex.abs %arg: complex<f32>
|
||||
return %abs : f32
|
||||
}
|
||||
// CHECK: %[[CASTED_ARG:.*]] = llvm.mlir.cast %[[ARG]] : complex<f32> to ![[C_TY:.*>]]
|
||||
// CHECK: %[[CASTED_ARG:.*]] = unrealized_conversion_cast %[[ARG]] : complex<f32> to ![[C_TY:.*>]]
|
||||
// CHECK: %[[REAL:.*]] = llvm.extractvalue %[[CASTED_ARG]][0] : ![[C_TY]]
|
||||
// CHECK: %[[IMAG:.*]] = llvm.extractvalue %[[CASTED_ARG]][1] : ![[C_TY]]
|
||||
// CHECK-DAG: %[[REAL_SQ:.*]] = llvm.fmul %[[REAL]], %[[REAL]] : f32
|
||||
|
@ -4,8 +4,8 @@
|
||||
// CHECK-LABEL: func @mixed_alloc(
|
||||
// CHECK: %[[Marg:.*]]: index, %[[Narg:.*]]: index)
|
||||
func @mixed_alloc(%arg0: index, %arg1: index) -> memref<?x42x?xf32> {
|
||||
// CHECK: %[[M:.*]] = llvm.mlir.cast %[[Marg]]
|
||||
// CHECK: %[[N:.*]] = llvm.mlir.cast %[[Narg]]
|
||||
// CHECK: %[[M:.*]] = unrealized_conversion_cast %[[Marg]]
|
||||
// CHECK: %[[N:.*]] = unrealized_conversion_cast %[[Narg]]
|
||||
// CHECK: %[[c42:.*]] = llvm.mlir.constant(42 : index) : i64
|
||||
// CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
|
||||
// CHECK-NEXT: %[[st0:.*]] = llvm.mul %[[N]], %[[c42]] : i64
|
||||
@ -46,8 +46,8 @@ func @mixed_dealloc(%arg0: memref<?x42x?xf32>) {
|
||||
// CHECK-LABEL: func @dynamic_alloc(
|
||||
// CHECK: %[[Marg:.*]]: index, %[[Narg:.*]]: index)
|
||||
func @dynamic_alloc(%arg0: index, %arg1: index) -> memref<?x?xf32> {
|
||||
// CHECK: %[[M:.*]] = llvm.mlir.cast %[[Marg]]
|
||||
// CHECK: %[[N:.*]] = llvm.mlir.cast %[[Narg]]
|
||||
// CHECK: %[[M:.*]] = unrealized_conversion_cast %[[Marg]]
|
||||
// CHECK: %[[N:.*]] = unrealized_conversion_cast %[[Narg]]
|
||||
// CHECK-NEXT: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
|
||||
// CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[N]], %[[M]] : i64
|
||||
// CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
|
||||
@ -73,8 +73,8 @@ func @dynamic_alloc(%arg0: index, %arg1: index) -> memref<?x?xf32> {
|
||||
// CHECK-LABEL: func @dynamic_alloca
|
||||
// CHECK: %[[Marg:.*]]: index, %[[Narg:.*]]: index)
|
||||
func @dynamic_alloca(%arg0: index, %arg1: index) -> memref<?x?xf32> {
|
||||
// CHECK: %[[M:.*]] = llvm.mlir.cast %[[Marg]]
|
||||
// CHECK: %[[N:.*]] = llvm.mlir.cast %[[Narg]]
|
||||
// CHECK: %[[M:.*]] = unrealized_conversion_cast %[[Marg]]
|
||||
// CHECK: %[[N:.*]] = unrealized_conversion_cast %[[Narg]]
|
||||
// CHECK-NEXT: %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64
|
||||
// CHECK-NEXT: %[[num_elems:.*]] = llvm.mul %[[N]], %[[M]] : i64
|
||||
// CHECK-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
|
||||
@ -167,8 +167,8 @@ func @stdlib_aligned_alloc(%N : index) -> memref<32x18xf32> {
|
||||
// CHECK-LABEL: func @mixed_load(
|
||||
// CHECK: %{{.*}}, %[[Iarg:.*]]: index, %[[Jarg:.*]]: index)
|
||||
func @mixed_load(%mixed : memref<42x?xf32>, %i : index, %j : index) {
|
||||
// CHECK: %[[I:.*]] = llvm.mlir.cast %[[Iarg]]
|
||||
// CHECK: %[[J:.*]] = llvm.mlir.cast %[[Jarg]]
|
||||
// CHECK: %[[I:.*]] = unrealized_conversion_cast %[[Iarg]]
|
||||
// CHECK: %[[J:.*]] = unrealized_conversion_cast %[[Jarg]]
|
||||
// CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
|
||||
@ -184,8 +184,8 @@ func @mixed_load(%mixed : memref<42x?xf32>, %i : index, %j : index) {
|
||||
// CHECK-LABEL: func @dynamic_load(
|
||||
// CHECK: %{{.*}}, %[[Iarg:.*]]: index, %[[Jarg:.*]]: index)
|
||||
func @dynamic_load(%dynamic : memref<?x?xf32>, %i : index, %j : index) {
|
||||
// CHECK: %[[I:.*]] = llvm.mlir.cast %[[Iarg]]
|
||||
// CHECK: %[[J:.*]] = llvm.mlir.cast %[[Jarg]]
|
||||
// CHECK: %[[I:.*]] = unrealized_conversion_cast %[[Iarg]]
|
||||
// CHECK: %[[J:.*]] = unrealized_conversion_cast %[[Jarg]]
|
||||
// CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
|
||||
@ -201,8 +201,8 @@ func @dynamic_load(%dynamic : memref<?x?xf32>, %i : index, %j : index) {
|
||||
// CHECK-LABEL: func @prefetch
|
||||
// CHECK: %{{.*}}, %[[Iarg:.*]]: index, %[[Jarg:.*]]: index)
|
||||
func @prefetch(%A : memref<?x?xf32>, %i : index, %j : index) {
|
||||
// CHECK: %[[I:.*]] = llvm.mlir.cast %[[Iarg]]
|
||||
// CHECK: %[[J:.*]] = llvm.mlir.cast %[[Jarg]]
|
||||
// CHECK: %[[I:.*]] = unrealized_conversion_cast %[[Iarg]]
|
||||
// CHECK: %[[J:.*]] = unrealized_conversion_cast %[[Jarg]]
|
||||
// CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
|
||||
@ -231,8 +231,8 @@ func @prefetch(%A : memref<?x?xf32>, %i : index, %j : index) {
|
||||
// CHECK-LABEL: func @dynamic_store
|
||||
// CHECK: %{{.*}}, %[[Iarg:.*]]: index, %[[Jarg:.*]]: index
|
||||
func @dynamic_store(%dynamic : memref<?x?xf32>, %i : index, %j : index, %val : f32) {
|
||||
// CHECK: %[[I:.*]] = llvm.mlir.cast %[[Iarg]]
|
||||
// CHECK: %[[J:.*]] = llvm.mlir.cast %[[Jarg]]
|
||||
// CHECK: %[[I:.*]] = unrealized_conversion_cast %[[Iarg]]
|
||||
// CHECK: %[[J:.*]] = unrealized_conversion_cast %[[Jarg]]
|
||||
// CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
|
||||
@ -248,8 +248,8 @@ func @dynamic_store(%dynamic : memref<?x?xf32>, %i : index, %j : index, %val : f
|
||||
// CHECK-LABEL: func @mixed_store
|
||||
// CHECK: %{{.*}}, %[[Iarg:.*]]: index, %[[Jarg:.*]]: index
|
||||
func @mixed_store(%mixed : memref<42x?xf32>, %i : index, %j : index, %val : f32) {
|
||||
// CHECK: %[[I:.*]] = llvm.mlir.cast %[[Iarg]]
|
||||
// CHECK: %[[J:.*]] = llvm.mlir.cast %[[Jarg]]
|
||||
// CHECK: %[[I:.*]] = unrealized_conversion_cast %[[Iarg]]
|
||||
// CHECK: %[[J:.*]] = unrealized_conversion_cast %[[Jarg]]
|
||||
// CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK-NEXT: %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
|
||||
@ -381,7 +381,7 @@ func @memref_dim_with_dyn_index(%arg : memref<3x?xf32>, %idx : index) -> index {
|
||||
// CHECK-DAG: %[[SIZES:.*]] = llvm.extractvalue %{{.*}}[3] : ![[DESCR_TY:.*]]
|
||||
// CHECK-DAG: %[[SIZES_PTR:.*]] = llvm.alloca %[[C1]] x !llvm.array<2 x i64> : (i64) -> !llvm.ptr<array<2 x i64>>
|
||||
// CHECK-DAG: llvm.store %[[SIZES]], %[[SIZES_PTR]] : !llvm.ptr<array<2 x i64>>
|
||||
// CHECK-DAG: %[[IDX:.*]] = llvm.mlir.cast %[[IDXarg]]
|
||||
// CHECK-DAG: %[[IDX:.*]] = unrealized_conversion_cast %[[IDXarg]]
|
||||
// CHECK-DAG: %[[RESULT_PTR:.*]] = llvm.getelementptr %[[SIZES_PTR]][%[[C0]], %[[IDX]]] : (!llvm.ptr<array<2 x i64>>, i64, i64) -> !llvm.ptr<i64>
|
||||
// CHECK-DAG: %[[RESULT:.*]] = llvm.load %[[RESULT_PTR]] : !llvm.ptr<i64>
|
||||
%result = memref.dim %arg, %idx : memref<3x?xf32>
|
||||
@ -397,7 +397,7 @@ func @memref_reinterpret_cast_ranked_to_static_shape(%input : memref<2x3xf32>) {
|
||||
: memref<2x3xf32> to memref<6x1xf32>
|
||||
return
|
||||
}
|
||||
// CHECK: [[INPUT:%.*]] = llvm.mlir.cast %{{.*}} :
|
||||
// CHECK: [[INPUT:%.*]] = unrealized_conversion_cast %{{.*}} :
|
||||
// CHECK: to [[TY:!.*]]
|
||||
// CHECK: [[OUT_0:%.*]] = llvm.mlir.undef : [[TY]]
|
||||
// CHECK: [[BASE_PTR:%.*]] = llvm.extractvalue [[INPUT]][0] : [[TY]]
|
||||
@ -433,12 +433,12 @@ func @memref_reinterpret_cast_unranked_to_dynamic_shape(%offset: index,
|
||||
// CHECK-SAME: ([[OFFSETarg:%[a-z,0-9]+]]: index,
|
||||
// CHECK-SAME: [[SIZE_0arg:%[a-z,0-9]+]]: index, [[SIZE_1arg:%[a-z,0-9]+]]: index,
|
||||
// CHECK-SAME: [[STRIDE_0arg:%[a-z,0-9]+]]: index, [[STRIDE_1arg:%[a-z,0-9]+]]: index,
|
||||
// CHECK: [[INPUT:%.*]] = llvm.mlir.cast
|
||||
// CHECK: [[OFFSET:%.*]] = llvm.mlir.cast [[OFFSETarg]]
|
||||
// CHECK: [[SIZE_0:%.*]] = llvm.mlir.cast [[SIZE_0arg]]
|
||||
// CHECK: [[SIZE_1:%.*]] = llvm.mlir.cast [[SIZE_1arg]]
|
||||
// CHECK: [[STRIDE_0:%.*]] = llvm.mlir.cast [[STRIDE_0arg]]
|
||||
// CHECK: [[STRIDE_1:%.*]] = llvm.mlir.cast [[STRIDE_1arg]]
|
||||
// CHECK: [[INPUT:%.*]] = unrealized_conversion_cast
|
||||
// CHECK: [[OFFSET:%.*]] = unrealized_conversion_cast [[OFFSETarg]]
|
||||
// CHECK: [[SIZE_0:%.*]] = unrealized_conversion_cast [[SIZE_0arg]]
|
||||
// CHECK: [[SIZE_1:%.*]] = unrealized_conversion_cast [[SIZE_1arg]]
|
||||
// CHECK: [[STRIDE_0:%.*]] = unrealized_conversion_cast [[STRIDE_0arg]]
|
||||
// CHECK: [[STRIDE_1:%.*]] = unrealized_conversion_cast [[STRIDE_1arg]]
|
||||
// CHECK: [[OUT_0:%.*]] = llvm.mlir.undef : [[TY:!.*]]
|
||||
// CHECK: [[DESCRIPTOR:%.*]] = llvm.extractvalue [[INPUT]][1] : !llvm.struct<(i64, ptr<i8>)>
|
||||
// CHECK: [[BASE_PTR_PTR:%.*]] = llvm.bitcast [[DESCRIPTOR]] : !llvm.ptr<i8> to !llvm.ptr<ptr<f32>>
|
||||
@ -464,8 +464,8 @@ func @memref_reshape(%input : memref<2x3xf32>, %shape : memref<?xindex>) {
|
||||
: (memref<2x3xf32>, memref<?xindex>) -> memref<*xf32>
|
||||
return
|
||||
}
|
||||
// CHECK: [[INPUT:%.*]] = llvm.mlir.cast %{{.*}} to [[INPUT_TY:!.*]]
|
||||
// CHECK: [[SHAPE:%.*]] = llvm.mlir.cast %{{.*}} to [[SHAPE_TY:!.*]]
|
||||
// CHECK: [[INPUT:%.*]] = unrealized_conversion_cast %{{.*}} to [[INPUT_TY:!.*]]
|
||||
// CHECK: [[SHAPE:%.*]] = unrealized_conversion_cast %{{.*}} to [[SHAPE_TY:!.*]]
|
||||
// CHECK: [[RANK:%.*]] = llvm.extractvalue [[SHAPE]][3, 0] : [[SHAPE_TY]]
|
||||
// CHECK: [[UNRANKED_OUT_O:%.*]] = llvm.mlir.undef : !llvm.struct<(i64, ptr<i8>)>
|
||||
// CHECK: [[UNRANKED_OUT_1:%.*]] = llvm.insertvalue [[RANK]], [[UNRANKED_OUT_O]][0] : !llvm.struct<(i64, ptr<i8>)>
|
||||
|
@ -13,7 +13,7 @@ func @zero_d_alloc() -> memref<f32> {
|
||||
// CHECK: llvm.insertvalue %[[ptr]], %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
|
||||
// CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
|
||||
// CHECK: llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
|
||||
// CHECK: llvm.mlir.cast %{{.*}}
|
||||
// CHECK: unrealized_conversion_cast %{{.*}}
|
||||
|
||||
%0 = memref.alloc() : memref<f32>
|
||||
return %0 : memref<f32>
|
||||
@ -23,7 +23,7 @@ func @zero_d_alloc() -> memref<f32> {
|
||||
|
||||
// CHECK-LABEL: func @zero_d_dealloc
|
||||
func @zero_d_dealloc(%arg0: memref<f32>) {
|
||||
// CHECK: llvm.mlir.cast
|
||||
// CHECK: unrealized_conversion_cast
|
||||
// CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
|
||||
// CHECK: %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<f32> to !llvm.ptr<i8>
|
||||
// CHECK: llvm.call @free(%[[bc]]) : (!llvm.ptr<i8>) -> ()
|
||||
@ -128,8 +128,8 @@ func @zero_d_load(%arg0: memref<f32>) -> f32 {
|
||||
// CHECK: %[[I:.*]]: index,
|
||||
// CHECK: %[[J:.*]]: index)
|
||||
func @static_load(%static : memref<10x42xf32>, %i : index, %j : index) {
|
||||
// CHECK: %[[II:.*]] = llvm.mlir.cast %[[I]]
|
||||
// CHECK: %[[JJ:.*]] = llvm.mlir.cast %[[J]]
|
||||
// CHECK: %[[II:.*]] = unrealized_conversion_cast %[[I]]
|
||||
// CHECK: %[[JJ:.*]] = unrealized_conversion_cast %[[J]]
|
||||
// CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK: %[[st0:.*]] = llvm.mlir.constant(42 : index) : i64
|
||||
// CHECK: %[[offI:.*]] = llvm.mul %[[II]], %[[st0]] : i64
|
||||
@ -156,8 +156,8 @@ func @zero_d_store(%arg0: memref<f32>, %arg1: f32) {
|
||||
// CHECK: %[[MEMREF:.*]]: memref<10x42xf32>,
|
||||
// CHECK-SAME: %[[I:.*]]: index, %[[J:.*]]: index,
|
||||
func @static_store(%static : memref<10x42xf32>, %i : index, %j : index, %val : f32) {
|
||||
// CHECK: %[[II:.*]] = llvm.mlir.cast %[[I]]
|
||||
// CHECK: %[[JJ:.*]] = llvm.mlir.cast %[[J]]
|
||||
// CHECK: %[[II:.*]] = unrealized_conversion_cast %[[I]]
|
||||
// CHECK: %[[JJ:.*]] = unrealized_conversion_cast %[[J]]
|
||||
// CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK: %[[st0:.*]] = llvm.mlir.constant(42 : index) : i64
|
||||
// CHECK: %[[offI:.*]] = llvm.mul %[[II]], %[[st0]] : i64
|
||||
@ -200,7 +200,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<index, 32>> } {
|
||||
%c1 = constant 1 : index
|
||||
%0 = memref.alloc(%c1) : memref<? x vector<2xf32>>
|
||||
// CHECK: %[[CST_S:.*]] = constant 1 : index
|
||||
// CHECK: %[[CST:.*]] = llvm.mlir.cast
|
||||
// CHECK: %[[CST:.*]] = unrealized_conversion_cast
|
||||
// CHECK: llvm.mlir.null
|
||||
// CHECK: llvm.getelementptr %{{.*}}[[CST]]
|
||||
// CHECK: llvm.ptrtoint %{{.*}} : !llvm.ptr<{{.*}}> to i32
|
||||
|
@ -10,9 +10,9 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
|
||||
%0 = memref.alloc() : memref<2048xi8>
|
||||
|
||||
// Test two dynamic sizes.
|
||||
// CHECK: %[[ARG2:.*]] = llvm.mlir.cast %[[ARG2F:.*]]
|
||||
// CHECK: %[[ARG0:.*]] = llvm.mlir.cast %[[ARG0F:.*]]
|
||||
// CHECK: %[[ARG1:.*]] = llvm.mlir.cast %[[ARG1F:.*]]
|
||||
// CHECK: %[[ARG2:.*]] = unrealized_conversion_cast %[[ARG2F:.*]]
|
||||
// CHECK: %[[ARG0:.*]] = unrealized_conversion_cast %[[ARG0F:.*]]
|
||||
// CHECK: %[[ARG1:.*]] = unrealized_conversion_cast %[[ARG1F:.*]]
|
||||
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK: %[[BASE_PTR:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
|
||||
// CHECK: %[[SHIFTED_BASE_PTR:.*]] = llvm.getelementptr %[[BASE_PTR]][%[[ARG2]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
|
||||
@ -29,8 +29,8 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
|
||||
%1 = memref.view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32>
|
||||
|
||||
// Test one dynamic size.
|
||||
// CHECK: %[[ARG2:.*]] = llvm.mlir.cast %[[ARG2F:.*]]
|
||||
// CHECK: %[[ARG1:.*]] = llvm.mlir.cast %[[ARG1F:.*]]
|
||||
// CHECK: %[[ARG2:.*]] = unrealized_conversion_cast %[[ARG2F:.*]]
|
||||
// CHECK: %[[ARG1:.*]] = unrealized_conversion_cast %[[ARG1F:.*]]
|
||||
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK: %[[BASE_PTR_2:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
|
||||
// CHECK: %[[SHIFTED_BASE_PTR_2:.*]] = llvm.getelementptr %[[BASE_PTR_2]][%[[ARG2]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
|
||||
@ -48,7 +48,7 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
|
||||
%3 = memref.view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32>
|
||||
|
||||
// Test static sizes.
|
||||
// CHECK: %[[ARG2:.*]] = llvm.mlir.cast %[[ARG2F:.*]]
|
||||
// CHECK: %[[ARG2:.*]] = unrealized_conversion_cast %[[ARG2F:.*]]
|
||||
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK: %[[BASE_PTR_3:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
|
||||
// CHECK: %[[SHIFTED_BASE_PTR_3:.*]] = llvm.getelementptr %[[BASE_PTR_3]][%[[ARG2]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
|
||||
@ -71,7 +71,7 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
|
||||
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<i8, 4>, ptr<i8, 4>, i64, array<1 x i64>, array<1 x i64>)>
|
||||
%6 = memref.alloc() : memref<2048xi8, 4>
|
||||
|
||||
// CHECK: %[[ARG2:.*]] = llvm.mlir.cast %[[ARG2F:.*]]
|
||||
// CHECK: %[[ARG2:.*]] = unrealized_conversion_cast %[[ARG2F:.*]]
|
||||
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32, 4>, ptr<f32, 4>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK: %[[BASE_PTR_4:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<i8, 4>, ptr<i8, 4>, i64, array<1 x i64>, array<1 x i64>)>
|
||||
// CHECK: %[[SHIFTED_BASE_PTR_4:.*]] = llvm.getelementptr %[[BASE_PTR_4]][%[[ARG2]]] : (!llvm.ptr<i8, 4>, i64) -> !llvm.ptr<i8, 4>
|
||||
@ -105,21 +105,21 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
|
||||
// CHECK32: %[[ARG1f:[a-zA-Z0-9]*]]: index,
|
||||
// CHECK32: %[[ARG2f:.*]]: index)
|
||||
func @subview(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
|
||||
// CHECK: %[[MEMREF:.*]] = llvm.mlir.cast %[[MEM]]
|
||||
// CHECK32: %[[MEMREF:.*]] = llvm.mlir.cast %[[MEM]]
|
||||
// CHECK: %[[MEMREF:.*]] = unrealized_conversion_cast %[[MEM]]
|
||||
// CHECK32: %[[MEMREF:.*]] = unrealized_conversion_cast %[[MEM]]
|
||||
|
||||
// CHECK: %[[ARG0a:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1a:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0b:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1b:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0c:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1c:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0a:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1a:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0b:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1b:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0c:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1c:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0a:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1a:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0b:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1b:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0c:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1c:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0a:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1a:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0b:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1b:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0c:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1c:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
|
||||
// CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
|
||||
@ -178,21 +178,21 @@ func @subview(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index,
|
||||
// CHECK32: %[[ARG1f:[a-zA-Z0-9]*]]: index,
|
||||
// CHECK32: %[[ARG2f:.*]]: index)
|
||||
func @subview_non_zero_addrspace(%0 : memref<64x4xf32, offset: 0, strides: [4, 1], 3>, %arg0 : index, %arg1 : index, %arg2 : index) {
|
||||
// CHECK: %[[MEMREF:.*]] = llvm.mlir.cast %[[MEM]]
|
||||
// CHECK32: %[[MEMREF:.*]] = llvm.mlir.cast %[[MEM]]
|
||||
// CHECK: %[[MEMREF:.*]] = unrealized_conversion_cast %[[MEM]]
|
||||
// CHECK32: %[[MEMREF:.*]] = unrealized_conversion_cast %[[MEM]]
|
||||
|
||||
// CHECK: %[[ARG0a:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1a:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0b:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1b:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0c:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1c:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0a:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1a:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0b:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1b:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0c:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1c:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0a:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1a:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0b:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1b:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0c:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1c:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0a:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1a:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0b:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1b:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0c:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1c:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
|
||||
// CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32, 3> to !llvm.ptr<f32, 3>
|
||||
@ -251,17 +251,17 @@ func @subview_non_zero_addrspace(%0 : memref<64x4xf32, offset: 0, strides: [4, 1
|
||||
// CHECK32-SAME: %[[ARG1f:[a-zA-Z0-9]*]]: index
|
||||
// CHECK32-SAME: %[[ARG2f:[a-zA-Z0-9]*]]: index
|
||||
func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
|
||||
// CHECK: %[[MEMREF:.*]] = llvm.mlir.cast %[[MEM]]
|
||||
// CHECK32: %[[MEMREF:.*]] = llvm.mlir.cast %[[MEM]]
|
||||
// CHECK: %[[MEMREF:.*]] = unrealized_conversion_cast %[[MEM]]
|
||||
// CHECK32: %[[MEMREF:.*]] = unrealized_conversion_cast %[[MEM]]
|
||||
|
||||
// CHECK: %[[ARG0a:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1a:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0b:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1b:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0a:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1a:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0b:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1b:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0a:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1a:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0b:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1b:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0a:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1a:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0b:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1b:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
|
||||
// CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
|
||||
@ -324,17 +324,17 @@ func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg
|
||||
// CHECK32-SAME: %[[ARG1f:[a-zA-Z0-9]*]]: index
|
||||
// CHECK32-SAME: %[[ARG2f:[a-zA-Z0-9]*]]: index
|
||||
func @subview_const_stride(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
|
||||
// CHECK: %[[MEMREF:.*]] = llvm.mlir.cast %[[MEM]]
|
||||
// CHECK32: %[[MEMREF:.*]] = llvm.mlir.cast %[[MEM]]
|
||||
// CHECK: %[[MEMREF:.*]] = unrealized_conversion_cast %[[MEM]]
|
||||
// CHECK32: %[[MEMREF:.*]] = unrealized_conversion_cast %[[MEM]]
|
||||
|
||||
// CHECK: %[[ARG0a:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1a:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0b:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1b:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0a:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1a:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0b:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1b:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0a:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1a:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK: %[[ARG0b:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK: %[[ARG1b:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0a:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1a:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG0b:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
// CHECK32: %[[ARG1b:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
|
||||
// CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
|
||||
// CHECK: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
|
||||
@ -386,8 +386,8 @@ func @subview_const_stride(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %a
|
||||
// CHECK32-LABEL: func @subview_const_stride_and_offset(
|
||||
func @subview_const_stride_and_offset(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>) {
|
||||
// The last "insertvalue" that populates the memref descriptor from the function arguments.
|
||||
// CHECK: %[[MEMREF:.*]] = llvm.mlir.cast
|
||||
// CHECK32: %[[MEMREF:.*]] = llvm.mlir.cast
|
||||
// CHECK: %[[MEMREF:.*]] = unrealized_conversion_cast
|
||||
// CHECK32: %[[MEMREF:.*]] = unrealized_conversion_cast
|
||||
|
||||
// CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
|
||||
// CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
|
||||
@ -425,10 +425,10 @@ func @subview_const_stride_and_offset(%0 : memref<64x4xf32, offset: 0, strides:
|
||||
// CHECK32: %[[ARG1f:[a-zA-Z0-9]*]]: index,
|
||||
// CHECK32: %[[ARG2f:.*]]: index)
|
||||
func @subview_mixed_static_dynamic(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index, %arg1 : index, %arg2 : index) {
|
||||
// CHECK32: %[[MEMREF:.*]] = llvm.mlir.cast %[[MEM]]
|
||||
// CHECK32: %[[ARG1:.*]] = llvm.mlir.cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG2:.*]] = llvm.mlir.cast %[[ARG2f]]
|
||||
// CHECK32: %[[ARG0:.*]] = llvm.mlir.cast %[[ARG0f]]
|
||||
// CHECK32: %[[MEMREF:.*]] = unrealized_conversion_cast %[[MEM]]
|
||||
// CHECK32: %[[ARG1:.*]] = unrealized_conversion_cast %[[ARG1f]]
|
||||
// CHECK32: %[[ARG2:.*]] = unrealized_conversion_cast %[[ARG2f]]
|
||||
// CHECK32: %[[ARG0:.*]] = unrealized_conversion_cast %[[ARG0f]]
|
||||
|
||||
// CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
|
||||
// CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
|
||||
@ -567,7 +567,7 @@ func @dim_of_unranked(%unranked: memref<*xi32>) -> index {
|
||||
%dim = memref.dim %unranked, %c0 : memref<*xi32>
|
||||
return %dim : index
|
||||
}
|
||||
// CHECK: %[[UNRANKED_DESC:.*]] = llvm.mlir.cast
|
||||
// CHECK: %[[UNRANKED_DESC:.*]] = unrealized_conversion_cast
|
||||
|
||||
// CHECK: %[[RANKED_DESC:.*]] = llvm.extractvalue %[[UNRANKED_DESC]][1]
|
||||
// CHECK-SAME: : !llvm.struct<(i64, ptr<i8>)>
|
||||
|
@ -30,7 +30,7 @@ func @bitcast_index_to_i8_vector(%input: vector<16xindex>) -> vector<128xi8> {
|
||||
|
||||
// CHECK-LABEL: @bitcast_index_to_i8_vector
|
||||
// CHECK-SAME: %[[input:.*]]: vector<16xindex>
|
||||
// CHECK: %[[T0:.*]] = llvm.mlir.cast %[[input]] : vector<16xindex> to vector<16xi64>
|
||||
// CHECK: %[[T0:.*]] = unrealized_conversion_cast %[[input]] : vector<16xindex> to vector<16xi64>
|
||||
// CHECK: llvm.bitcast %[[T0]] : vector<16xi64> to vector<128xi8>
|
||||
|
||||
// -----
|
||||
@ -96,11 +96,11 @@ func @broadcast_vec2d_from_vec1d(%arg0: vector<2xf32>) -> vector<3x2xf32> {
|
||||
// CHECK-LABEL: @broadcast_vec2d_from_vec1d(
|
||||
// CHECK-SAME: %[[A:.*]]: vector<2xf32>)
|
||||
// CHECK: %[[T0:.*]] = constant dense<0.000000e+00> : vector<3x2xf32>
|
||||
// CHECK: %[[T1:.*]] = llvm.mlir.cast %[[T0]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T1:.*]] = unrealized_conversion_cast %[[T0]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T2:.*]] = llvm.insertvalue %[[A]], %[[T1]][0] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][1] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T4:.*]] = llvm.insertvalue %[[A]], %[[T3]][2] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T5:.*]] = llvm.mlir.cast %[[T4]] : !llvm.array<3 x vector<2xf32>> to vector<3x2xf32>
|
||||
// CHECK: %[[T5:.*]] = unrealized_conversion_cast %[[T4]] : !llvm.array<3 x vector<2xf32>> to vector<3x2xf32>
|
||||
// CHECK: return %[[T5]] : vector<3x2xf32>
|
||||
|
||||
// -----
|
||||
@ -112,11 +112,11 @@ func @broadcast_vec2d_from_index_vec1d(%arg0: vector<2xindex>) -> vector<3x2xind
|
||||
// CHECK-LABEL: @broadcast_vec2d_from_index_vec1d(
|
||||
// CHECK-SAME: %[[A:.*]]: vector<2xindex>)
|
||||
// CHECK: %[[T0:.*]] = constant dense<0> : vector<3x2xindex>
|
||||
// CHECK: %[[T1:.*]] = llvm.mlir.cast %[[A]] : vector<2xindex> to vector<2xi64>
|
||||
// CHECK: %[[T2:.*]] = llvm.mlir.cast %[[T0]] : vector<3x2xindex> to !llvm.array<3 x vector<2xi64>>
|
||||
// CHECK: %[[T1:.*]] = unrealized_conversion_cast %[[A]] : vector<2xindex> to vector<2xi64>
|
||||
// CHECK: %[[T2:.*]] = unrealized_conversion_cast %[[T0]] : vector<3x2xindex> to !llvm.array<3 x vector<2xi64>>
|
||||
// CHECK: %[[T3:.*]] = llvm.insertvalue %[[T1]], %[[T2]][0] : !llvm.array<3 x vector<2xi64>>
|
||||
|
||||
// CHECK: %[[T4:.*]] = llvm.mlir.cast %{{.*}} : !llvm.array<3 x vector<2xi64>> to vector<3x2xindex>
|
||||
// CHECK: %[[T4:.*]] = unrealized_conversion_cast %{{.*}} : !llvm.array<3 x vector<2xi64>> to vector<3x2xindex>
|
||||
// CHECK: return %[[T4]] : vector<3x2xindex>
|
||||
|
||||
// -----
|
||||
@ -130,18 +130,18 @@ func @broadcast_vec3d_from_vec1d(%arg0: vector<2xf32>) -> vector<4x3x2xf32> {
|
||||
// CHECK: %[[T0:.*]] = constant dense<0.000000e+00> : vector<3x2xf32>
|
||||
// CHECK: %[[T1:.*]] = constant dense<0.000000e+00> : vector<4x3x2xf32>
|
||||
|
||||
// CHECK: %[[T2:.*]] = llvm.mlir.cast %[[T0]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T2:.*]] = unrealized_conversion_cast %[[T0]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][0] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T4:.*]] = llvm.insertvalue %[[A]], %[[T3]][1] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T5:.*]] = llvm.insertvalue %[[A]], %[[T4]][2] : !llvm.array<3 x vector<2xf32>>
|
||||
|
||||
// CHECK: %[[T6:.*]] = llvm.mlir.cast %[[T1]] : vector<4x3x2xf32> to !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T6:.*]] = unrealized_conversion_cast %[[T1]] : vector<4x3x2xf32> to !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T7:.*]] = llvm.insertvalue %[[T5]], %[[T6]][0] : !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T8:.*]] = llvm.insertvalue %[[T5]], %[[T7]][1] : !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T9:.*]] = llvm.insertvalue %[[T5]], %[[T8]][2] : !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T10:.*]] = llvm.insertvalue %[[T5]], %[[T9]][3] : !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
|
||||
// CHECK: %[[T11:.*]] = llvm.mlir.cast %[[T10]] : !llvm.array<4 x array<3 x vector<2xf32>>> to vector<4x3x2xf32>
|
||||
// CHECK: %[[T11:.*]] = unrealized_conversion_cast %[[T10]] : !llvm.array<4 x array<3 x vector<2xf32>>> to vector<4x3x2xf32>
|
||||
// CHECK: return %[[T11]] : vector<4x3x2xf32>
|
||||
|
||||
// -----
|
||||
@ -153,16 +153,16 @@ func @broadcast_vec3d_from_vec2d(%arg0: vector<3x2xf32>) -> vector<4x3x2xf32> {
|
||||
// CHECK-LABEL: @broadcast_vec3d_from_vec2d(
|
||||
// CHECK-SAME: %[[A:.*]]: vector<3x2xf32>)
|
||||
// CHECK: %[[T0:.*]] = constant dense<0.000000e+00> : vector<4x3x2xf32>
|
||||
// CHECK: %[[T1:.*]] = llvm.mlir.cast %[[A]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T2:.*]] = llvm.mlir.cast %[[T0]] : vector<4x3x2xf32> to !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T1:.*]] = unrealized_conversion_cast %[[A]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T2:.*]] = unrealized_conversion_cast %[[T0]] : vector<4x3x2xf32> to !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T3:.*]] = llvm.insertvalue %[[T1]], %[[T2]][0] : !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T4:.*]] = llvm.mlir.cast %[[A]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T4:.*]] = unrealized_conversion_cast %[[A]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T5:.*]] = llvm.insertvalue %[[T4]], %[[T3]][1] : !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T6:.*]] = llvm.mlir.cast %[[A]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T6:.*]] = unrealized_conversion_cast %[[A]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T7:.*]] = llvm.insertvalue %[[T6]], %[[T5]][2] : !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T8:.*]] = llvm.mlir.cast %[[A]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T8:.*]] = unrealized_conversion_cast %[[A]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T9:.*]] = llvm.insertvalue %[[T8]], %[[T7]][3] : !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T10:.*]] = llvm.mlir.cast %[[T9]] : !llvm.array<4 x array<3 x vector<2xf32>>> to vector<4x3x2xf32>
|
||||
// CHECK: %[[T10:.*]] = unrealized_conversion_cast %[[T9]] : !llvm.array<4 x array<3 x vector<2xf32>>> to vector<4x3x2xf32>
|
||||
// CHECK: return %[[T10]] : vector<4x3x2xf32>
|
||||
|
||||
|
||||
@ -188,13 +188,13 @@ func @broadcast_stretch_at_start(%arg0: vector<1x4xf32>) -> vector<3x4xf32> {
|
||||
// CHECK-LABEL: @broadcast_stretch_at_start(
|
||||
// CHECK-SAME: %[[A:.*]]: vector<1x4xf32>)
|
||||
// CHECK: %[[T1:.*]] = constant dense<0.000000e+00> : vector<3x4xf32>
|
||||
// CHECK: %[[T2:.*]] = llvm.mlir.cast %[[A]] : vector<1x4xf32> to !llvm.array<1 x vector<4xf32>>
|
||||
// CHECK: %[[T2:.*]] = unrealized_conversion_cast %[[A]] : vector<1x4xf32> to !llvm.array<1 x vector<4xf32>>
|
||||
// CHECK: %[[T3:.*]] = llvm.extractvalue %[[T2]][0] : !llvm.array<1 x vector<4xf32>>
|
||||
// CHECK: %[[T4:.*]] = llvm.mlir.cast %[[T1]] : vector<3x4xf32> to !llvm.array<3 x vector<4xf32>>
|
||||
// CHECK: %[[T4:.*]] = unrealized_conversion_cast %[[T1]] : vector<3x4xf32> to !llvm.array<3 x vector<4xf32>>
|
||||
// CHECK: %[[T5:.*]] = llvm.insertvalue %[[T3]], %[[T4]][0] : !llvm.array<3 x vector<4xf32>>
|
||||
// CHECK: %[[T6:.*]] = llvm.insertvalue %[[T3]], %[[T5]][1] : !llvm.array<3 x vector<4xf32>>
|
||||
// CHECK: %[[T7:.*]] = llvm.insertvalue %[[T3]], %[[T6]][2] : !llvm.array<3 x vector<4xf32>>
|
||||
// CHECK: %[[T8:.*]] = llvm.mlir.cast %[[T7]] : !llvm.array<3 x vector<4xf32>> to vector<3x4xf32>
|
||||
// CHECK: %[[T8:.*]] = unrealized_conversion_cast %[[T7]] : !llvm.array<3 x vector<4xf32>> to vector<3x4xf32>
|
||||
// CHECK: return %[[T8]] : vector<3x4xf32>
|
||||
|
||||
// -----
|
||||
@ -206,32 +206,32 @@ func @broadcast_stretch_at_end(%arg0: vector<4x1xf32>) -> vector<4x3xf32> {
|
||||
// CHECK-LABEL: @broadcast_stretch_at_end(
|
||||
// CHECK-SAME: %[[A:.*]]: vector<4x1xf32>)
|
||||
// CHECK: %[[T1:.*]] = constant dense<0.000000e+00> : vector<4x3xf32>
|
||||
// CHECK: %[[T2:.*]] = llvm.mlir.cast %[[A]] : vector<4x1xf32> to !llvm.array<4 x vector<1xf32>>
|
||||
// CHECK: %[[T2:.*]] = unrealized_conversion_cast %[[A]] : vector<4x1xf32> to !llvm.array<4 x vector<1xf32>>
|
||||
// CHECK: %[[T3:.*]] = llvm.extractvalue %[[T2]][0] : !llvm.array<4 x vector<1xf32>>
|
||||
// CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : i64) : i64
|
||||
// CHECK: %[[T5:.*]] = llvm.extractelement %[[T3]]{{\[}}%[[T4]] : i64] : vector<1xf32>
|
||||
// CHECK: %[[T6:.*]] = splat %[[T5]] : vector<3xf32>
|
||||
// CHECK: %[[T7:.*]] = llvm.mlir.cast %[[T1]] : vector<4x3xf32> to !llvm.array<4 x vector<3xf32>>
|
||||
// CHECK: %[[T7:.*]] = unrealized_conversion_cast %[[T1]] : vector<4x3xf32> to !llvm.array<4 x vector<3xf32>>
|
||||
// CHECK: %[[T8:.*]] = llvm.insertvalue %[[T6]], %[[T7]][0] : !llvm.array<4 x vector<3xf32>>
|
||||
// CHECK: %[[T9:.*]] = llvm.mlir.cast %[[A]] : vector<4x1xf32> to !llvm.array<4 x vector<1xf32>>
|
||||
// CHECK: %[[T9:.*]] = unrealized_conversion_cast %[[A]] : vector<4x1xf32> to !llvm.array<4 x vector<1xf32>>
|
||||
// CHECK: %[[T10:.*]] = llvm.extractvalue %[[T9]][1] : !llvm.array<4 x vector<1xf32>>
|
||||
// CHECK: %[[T11:.*]] = llvm.mlir.constant(0 : i64) : i64
|
||||
// CHECK: %[[T12:.*]] = llvm.extractelement %[[T10]]{{\[}}%[[T11]] : i64] : vector<1xf32>
|
||||
// CHECK: %[[T13:.*]] = splat %[[T12]] : vector<3xf32>
|
||||
// CHECK: %[[T14:.*]] = llvm.insertvalue %[[T13]], %[[T8]][1] : !llvm.array<4 x vector<3xf32>>
|
||||
// CHECK: %[[T15:.*]] = llvm.mlir.cast %[[A]] : vector<4x1xf32> to !llvm.array<4 x vector<1xf32>>
|
||||
// CHECK: %[[T15:.*]] = unrealized_conversion_cast %[[A]] : vector<4x1xf32> to !llvm.array<4 x vector<1xf32>>
|
||||
// CHECK: %[[T16:.*]] = llvm.extractvalue %[[T15]][2] : !llvm.array<4 x vector<1xf32>>
|
||||
// CHECK: %[[T17:.*]] = llvm.mlir.constant(0 : i64) : i64
|
||||
// CHECK: %[[T18:.*]] = llvm.extractelement %[[T16]]{{\[}}%[[T17]] : i64] : vector<1xf32>
|
||||
// CHECK: %[[T19:.*]] = splat %[[T18]] : vector<3xf32>
|
||||
// CHECK: %[[T20:.*]] = llvm.insertvalue %[[T19]], %[[T14]][2] : !llvm.array<4 x vector<3xf32>>
|
||||
// CHECK: %[[T21:.*]] = llvm.mlir.cast %[[A]] : vector<4x1xf32> to !llvm.array<4 x vector<1xf32>>
|
||||
// CHECK: %[[T21:.*]] = unrealized_conversion_cast %[[A]] : vector<4x1xf32> to !llvm.array<4 x vector<1xf32>>
|
||||
// CHECK: %[[T22:.*]] = llvm.extractvalue %[[T21]][3] : !llvm.array<4 x vector<1xf32>>
|
||||
// CHECK: %[[T23:.*]] = llvm.mlir.constant(0 : i64) : i64
|
||||
// CHECK: %[[T24:.*]] = llvm.extractelement %[[T22]]{{\[}}%[[T23]] : i64] : vector<1xf32>
|
||||
// CHECK: %[[T25:.*]] = splat %[[T24]] : vector<3xf32>
|
||||
// CHECK: %[[T26:.*]] = llvm.insertvalue %[[T25]], %[[T20]][3] : !llvm.array<4 x vector<3xf32>>
|
||||
// CHECK: %[[T27:.*]] = llvm.mlir.cast %[[T26]] : !llvm.array<4 x vector<3xf32>> to vector<4x3xf32>
|
||||
// CHECK: %[[T27:.*]] = unrealized_conversion_cast %[[T26]] : !llvm.array<4 x vector<3xf32>> to vector<4x3xf32>
|
||||
// CHECK: return %[[T27]] : vector<4x3xf32>
|
||||
|
||||
// -----
|
||||
@ -244,36 +244,36 @@ func @broadcast_stretch_in_middle(%arg0: vector<4x1x2xf32>) -> vector<4x3x2xf32>
|
||||
// CHECK-SAME: %[[A:.*]]: vector<4x1x2xf32>) -> vector<4x3x2xf32> {
|
||||
// CHECK: %[[T1:.*]] = constant dense<0.000000e+00> : vector<4x3x2xf32>
|
||||
// CHECK: %[[T2:.*]] = constant dense<0.000000e+00> : vector<3x2xf32>
|
||||
// CHECK: %[[T3:.*]] = llvm.mlir.cast %[[A]] : vector<4x1x2xf32> to !llvm.array<4 x array<1 x vector<2xf32>>>
|
||||
// CHECK: %[[T3:.*]] = unrealized_conversion_cast %[[A]] : vector<4x1x2xf32> to !llvm.array<4 x array<1 x vector<2xf32>>>
|
||||
// CHECK: %[[T4:.*]] = llvm.extractvalue %[[T3]][0, 0] : !llvm.array<4 x array<1 x vector<2xf32>>>
|
||||
// CHECK: %[[T5:.*]] = llvm.mlir.cast %[[T2]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T5:.*]] = unrealized_conversion_cast %[[T2]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][0] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T7:.*]] = llvm.insertvalue %[[T4]], %[[T6]][1] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T8:.*]] = llvm.insertvalue %[[T4]], %[[T7]][2] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T9:.*]] = llvm.mlir.cast %[[T1]] : vector<4x3x2xf32> to !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T9:.*]] = unrealized_conversion_cast %[[T1]] : vector<4x3x2xf32> to !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T10:.*]] = llvm.insertvalue %[[T8]], %[[T9]][0] : !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T11:.*]] = llvm.mlir.cast %[[A]] : vector<4x1x2xf32> to !llvm.array<4 x array<1 x vector<2xf32>>>
|
||||
// CHECK: %[[T11:.*]] = unrealized_conversion_cast %[[A]] : vector<4x1x2xf32> to !llvm.array<4 x array<1 x vector<2xf32>>>
|
||||
// CHECK: %[[T12:.*]] = llvm.extractvalue %[[T11]][1, 0] : !llvm.array<4 x array<1 x vector<2xf32>>>
|
||||
// CHECK: %[[T13:.*]] = llvm.mlir.cast %[[T2]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T13:.*]] = unrealized_conversion_cast %[[T2]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T14:.*]] = llvm.insertvalue %[[T12]], %[[T13]][0] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T15:.*]] = llvm.insertvalue %[[T12]], %[[T14]][1] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T16:.*]] = llvm.insertvalue %[[T12]], %[[T15]][2] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T17:.*]] = llvm.insertvalue %[[T16]], %[[T10]][1] : !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T18:.*]] = llvm.mlir.cast %[[A]] : vector<4x1x2xf32> to !llvm.array<4 x array<1 x vector<2xf32>>>
|
||||
// CHECK: %[[T18:.*]] = unrealized_conversion_cast %[[A]] : vector<4x1x2xf32> to !llvm.array<4 x array<1 x vector<2xf32>>>
|
||||
// CHECK: %[[T19:.*]] = llvm.extractvalue %[[T18]][2, 0] : !llvm.array<4 x array<1 x vector<2xf32>>>
|
||||
// CHECK: %[[T20:.*]] = llvm.mlir.cast %[[T2]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T20:.*]] = unrealized_conversion_cast %[[T2]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T21:.*]] = llvm.insertvalue %[[T19]], %[[T20]][0] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T22:.*]] = llvm.insertvalue %[[T19]], %[[T21]][1] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T23:.*]] = llvm.insertvalue %[[T19]], %[[T22]][2] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T24:.*]] = llvm.insertvalue %[[T23]], %[[T17]][2] : !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T25:.*]] = llvm.mlir.cast %[[A]] : vector<4x1x2xf32> to !llvm.array<4 x array<1 x vector<2xf32>>>
|
||||
// CHECK: %[[T25:.*]] = unrealized_conversion_cast %[[A]] : vector<4x1x2xf32> to !llvm.array<4 x array<1 x vector<2xf32>>>
|
||||
// CHECK: %[[T26:.*]] = llvm.extractvalue %[[T25]][3, 0] : !llvm.array<4 x array<1 x vector<2xf32>>>
|
||||
// CHECK: %[[T27:.*]] = llvm.mlir.cast %[[T2]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T27:.*]] = unrealized_conversion_cast %[[T2]] : vector<3x2xf32> to !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T28:.*]] = llvm.insertvalue %[[T26]], %[[T27]][0] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T29:.*]] = llvm.insertvalue %[[T26]], %[[T28]][1] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T30:.*]] = llvm.insertvalue %[[T26]], %[[T29]][2] : !llvm.array<3 x vector<2xf32>>
|
||||
// CHECK: %[[T31:.*]] = llvm.insertvalue %[[T30]], %[[T24]][3] : !llvm.array<4 x array<3 x vector<2xf32>>>
|
||||
// CHECK: %[[T32:.*]] = llvm.mlir.cast %[[T31]] : !llvm.array<4 x array<3 x vector<2xf32>>> to vector<4x3x2xf32>
|
||||
// CHECK: %[[T32:.*]] = unrealized_conversion_cast %[[T31]] : !llvm.array<4 x array<3 x vector<2xf32>>> to vector<4x3x2xf32>
|
||||
// CHECK: return %[[T32]] : vector<4x3x2xf32>
|
||||
|
||||
// -----
|
||||
@ -290,14 +290,14 @@ func @outerproduct(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<2x3xf32
|
||||
// CHECK: %[[T4:.*]] = llvm.extractelement %[[A]]{{\[}}%[[T3]] : i64] : vector<2xf32>
|
||||
// CHECK: %[[T5:.*]] = splat %[[T4]] : vector<3xf32>
|
||||
// CHECK: %[[T6:.*]] = mulf %[[T5]], %[[B]] : vector<3xf32>
|
||||
// CHECK: %[[T7:.*]] = llvm.mlir.cast %[[T2]] : vector<2x3xf32> to !llvm.array<2 x vector<3xf32>>
|
||||
// CHECK: %[[T7:.*]] = unrealized_conversion_cast %[[T2]] : vector<2x3xf32> to !llvm.array<2 x vector<3xf32>>
|
||||
// CHECK: %[[T8:.*]] = llvm.insertvalue %[[T6]], %[[T7]][0] : !llvm.array<2 x vector<3xf32>>
|
||||
// CHECK: %[[T9:.*]] = llvm.mlir.constant(1 : i64) : i64
|
||||
// CHECK: %[[T10:.*]] = llvm.extractelement %[[A]]{{\[}}%[[T9]] : i64] : vector<2xf32>
|
||||
// CHECK: %[[T11:.*]] = splat %[[T10]] : vector<3xf32>
|
||||
// CHECK: %[[T12:.*]] = mulf %[[T11]], %[[B]] : vector<3xf32>
|
||||
// CHECK: %[[T13:.*]] = llvm.insertvalue %[[T12]], %[[T8]][1] : !llvm.array<2 x vector<3xf32>>
|
||||
// CHECK: %[[T14:.*]] = llvm.mlir.cast %[[T13]] : !llvm.array<2 x vector<3xf32>> to vector<2x3xf32>
|
||||
// CHECK: %[[T14:.*]] = unrealized_conversion_cast %[[T13]] : !llvm.array<2 x vector<3xf32>> to vector<2x3xf32>
|
||||
// CHECK: return %[[T14]] : vector<2x3xf32>
|
||||
|
||||
// -----
|
||||
@ -310,14 +310,14 @@ func @outerproduct_index(%arg0: vector<2xindex>, %arg1: vector<3xindex>) -> vect
|
||||
// CHECK-SAME: %[[A:.*]]: vector<2xindex>,
|
||||
// CHECK-SAME: %[[B:.*]]: vector<3xindex>)
|
||||
// CHECK: %[[T0:.*]] = constant dense<0> : vector<2x3xindex>
|
||||
// CHECK: %[[T1:.*]] = llvm.mlir.cast %[[A]] : vector<2xindex> to vector<2xi64>
|
||||
// CHECK: %[[T1:.*]] = unrealized_conversion_cast %[[A]] : vector<2xindex> to vector<2xi64>
|
||||
// CHECK: %[[T2:.*]] = llvm.mlir.constant(0 : i64) : i64
|
||||
// CHECK: %[[T3:.*]] = llvm.extractelement %[[T1]]{{\[}}%[[T2]] : i64] : vector<2xi64>
|
||||
// CHECK: %[[T4:.*]] = llvm.mlir.cast %[[T3]] : i64 to index
|
||||
// CHECK: %[[T4:.*]] = unrealized_conversion_cast %[[T3]] : i64 to index
|
||||
// CHECK: %[[T5:.*]] = splat %[[T4]] : vector<3xindex>
|
||||
// CHECK: %[[T6:.*]] = muli %[[T5]], %[[B]] : vector<3xindex>
|
||||
// CHECK: %[[T7:.*]] = llvm.mlir.cast %[[T6]] : vector<3xindex> to vector<3xi64>
|
||||
// CHECK: %[[T8:.*]] = llvm.mlir.cast %[[T0]] : vector<2x3xindex> to !llvm.array<2 x vector<3xi64>>
|
||||
// CHECK: %[[T7:.*]] = unrealized_conversion_cast %[[T6]] : vector<3xindex> to vector<3xi64>
|
||||
// CHECK: %[[T8:.*]] = unrealized_conversion_cast %[[T0]] : vector<2x3xindex> to !llvm.array<2 x vector<3xi64>>
|
||||
// CHECK: %{{.*}} = llvm.insertvalue %[[T7]], %[[T8]][0] : !llvm.array<2 x vector<3xi64>>
|
||||
|
||||
// -----
|
||||
@ -334,19 +334,19 @@ func @outerproduct_add(%arg0: vector<2xf32>, %arg1: vector<3xf32>, %arg2: vector
|
||||
// CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : i64) : i64
|
||||
// CHECK: %[[T5:.*]] = llvm.extractelement %[[A]]{{\[}}%[[T4]] : i64] : vector<2xf32>
|
||||
// CHECK: %[[T6:.*]] = splat %[[T5]] : vector<3xf32>
|
||||
// CHECK: %[[T7:.*]] = llvm.mlir.cast %[[C]] : vector<2x3xf32> to !llvm.array<2 x vector<3xf32>>
|
||||
// CHECK: %[[T7:.*]] = unrealized_conversion_cast %[[C]] : vector<2x3xf32> to !llvm.array<2 x vector<3xf32>>
|
||||
// CHECK: %[[T8:.*]] = llvm.extractvalue %[[T7]][0] : !llvm.array<2 x vector<3xf32>>
|
||||
// CHECK: %[[T9:.*]] = "llvm.intr.fmuladd"(%[[T6]], %[[B]], %[[T8]]) : (vector<3xf32>, vector<3xf32>, vector<3xf32>) -> vector<3xf32>
|
||||
// CHECK: %[[T10:.*]] = llvm.mlir.cast %[[T3]] : vector<2x3xf32> to !llvm.array<2 x vector<3xf32>>
|
||||
// CHECK: %[[T10:.*]] = unrealized_conversion_cast %[[T3]] : vector<2x3xf32> to !llvm.array<2 x vector<3xf32>>
|
||||
// CHECK: %[[T11:.*]] = llvm.insertvalue %[[T9]], %[[T10]][0] : !llvm.array<2 x vector<3xf32>>
|
||||
// CHECK: %[[T12:.*]] = llvm.mlir.constant(1 : i64) : i64
|
||||
// CHECK: %[[T13:.*]] = llvm.extractelement %[[A]]{{\[}}%[[T12]] : i64] : vector<2xf32>
|
||||
// CHECK: %[[T14:.*]] = splat %[[T13]] : vector<3xf32>
|
||||
// CHECK: %[[T15:.*]] = llvm.mlir.cast %[[C]] : vector<2x3xf32> to !llvm.array<2 x vector<3xf32>>
|
||||
// CHECK: %[[T15:.*]] = unrealized_conversion_cast %[[C]] : vector<2x3xf32> to !llvm.array<2 x vector<3xf32>>
|
||||
// CHECK: %[[T16:.*]] = llvm.extractvalue %[[T15]][1] : !llvm.array<2 x vector<3xf32>>
|
||||
// CHECK: %[[T17:.*]] = "llvm.intr.fmuladd"(%[[T14]], %[[B]], %[[T16]]) : (vector<3xf32>, vector<3xf32>, vector<3xf32>) -> vector<3xf32>
|
||||
// CHECK: %[[T18:.*]] = llvm.insertvalue %[[T17]], %[[T11]][1] : !llvm.array<2 x vector<3xf32>>
|
||||
// CHECK: %[[T19:.*]] = llvm.mlir.cast %[[T18]] : !llvm.array<2 x vector<3xf32>> to vector<2x3xf32>
|
||||
// CHECK: %[[T19:.*]] = unrealized_conversion_cast %[[T18]] : !llvm.array<2 x vector<3xf32>> to vector<2x3xf32>
|
||||
// CHECK: return %[[T19]] : vector<2x3xf32>
|
||||
|
||||
// -----
|
||||
@ -370,10 +370,10 @@ func @shuffle_1D_index_direct(%arg0: vector<2xindex>, %arg1: vector<2xindex>) ->
|
||||
// CHECK-LABEL: @shuffle_1D_index_direct(
|
||||
// CHECK-SAME: %[[A:.*]]: vector<2xindex>,
|
||||
// CHECK-SAME: %[[B:.*]]: vector<2xindex>)
|
||||
// CHECK: %[[T0:.*]] = llvm.mlir.cast %[[A]] : vector<2xindex> to vector<2xi64>
|
||||
// CHECK: %[[T1:.*]] = llvm.mlir.cast %[[B]] : vector<2xindex> to vector<2xi64>
|
||||
// CHECK: %[[T0:.*]] = unrealized_conversion_cast %[[A]] : vector<2xindex> to vector<2xi64>
|
||||
// CHECK: %[[T1:.*]] = unrealized_conversion_cast %[[B]] : vector<2xindex> to vector<2xi64>
|
||||
// CHECK: %[[T2:.*]] = llvm.shufflevector %[[T0]], %[[T1]] [0, 1] : vector<2xi64>, vector<2xi64>
|
||||
// CHECK: %[[T3:.*]] = llvm.mlir.cast %[[T2]] : vector<2xi64> to vector<2xindex>
|
||||
// CHECK: %[[T3:.*]] = unrealized_conversion_cast %[[T2]] : vector<2xi64> to vector<2xindex>
|
||||
// CHECK: return %[[T3]] : vector<2xindex>
|
||||
|
||||
// -----
|
||||
@ -417,8 +417,8 @@ func @shuffle_2D(%a: vector<1x4xf32>, %b: vector<2x4xf32>) -> vector<3x4xf32> {
|
||||
// CHECK-LABEL: @shuffle_2D(
|
||||
// CHECK-SAME: %[[A:.*]]: vector<1x4xf32>,
|
||||
// CHECK-SAME: %[[B:.*]]: vector<2x4xf32>)
|
||||
// CHECK: %[[VAL_0:.*]] = llvm.mlir.cast %[[A]] : vector<1x4xf32> to !llvm.array<1 x vector<4xf32>>
|
||||
// CHECK: %[[VAL_1:.*]] = llvm.mlir.cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[VAL_0:.*]] = unrealized_conversion_cast %[[A]] : vector<1x4xf32> to !llvm.array<1 x vector<4xf32>>
|
||||
// CHECK: %[[VAL_1:.*]] = unrealized_conversion_cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[u0:.*]] = llvm.mlir.undef : !llvm.array<3 x vector<4xf32>>
|
||||
// CHECK: %[[e1:.*]] = llvm.extractvalue %[[VAL_1]][0] : !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[i1:.*]] = llvm.insertvalue %[[e1]], %[[u0]][0] : !llvm.array<3 x vector<4xf32>>
|
||||
@ -426,7 +426,7 @@ func @shuffle_2D(%a: vector<1x4xf32>, %b: vector<2x4xf32>) -> vector<3x4xf32> {
|
||||
// CHECK: %[[i2:.*]] = llvm.insertvalue %[[e2]], %[[i1]][1] : !llvm.array<3 x vector<4xf32>>
|
||||
// CHECK: %[[e3:.*]] = llvm.extractvalue %[[VAL_1]][1] : !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[i3:.*]] = llvm.insertvalue %[[e3]], %[[i2]][2] : !llvm.array<3 x vector<4xf32>>
|
||||
// CHECK: %[[VAL_3:.*]] = llvm.mlir.cast %[[i3]] : !llvm.array<3 x vector<4xf32>> to vector<3x4xf32>
|
||||
// CHECK: %[[VAL_3:.*]] = unrealized_conversion_cast %[[i3]] : !llvm.array<3 x vector<4xf32>> to vector<3x4xf32>
|
||||
// CHECK: return %[[VAL_3]] : vector<3x4xf32>
|
||||
|
||||
// -----
|
||||
@ -461,10 +461,10 @@ func @extract_index_element_from_vec_1d(%arg0: vector<16xindex>) -> index {
|
||||
}
|
||||
// CHECK-LABEL: @extract_index_element_from_vec_1d(
|
||||
// CHECK-SAME: %[[A:.*]]: vector<16xindex>)
|
||||
// CHECK: %[[T0:.*]] = llvm.mlir.cast %[[A]] : vector<16xindex> to vector<16xi64>
|
||||
// CHECK: %[[T0:.*]] = unrealized_conversion_cast %[[A]] : vector<16xindex> to vector<16xi64>
|
||||
// CHECK: %[[T1:.*]] = llvm.mlir.constant(15 : i64) : i64
|
||||
// CHECK: %[[T2:.*]] = llvm.extractelement %[[T0]][%[[T1]] : i64] : vector<16xi64>
|
||||
// CHECK: %[[T3:.*]] = llvm.mlir.cast %[[T2]] : i64 to index
|
||||
// CHECK: %[[T3:.*]] = unrealized_conversion_cast %[[T2]] : i64 to index
|
||||
// CHECK: return %[[T3]] : index
|
||||
|
||||
// -----
|
||||
@ -533,11 +533,11 @@ func @insert_index_element_into_vec_1d(%arg0: index, %arg1: vector<4xindex>) ->
|
||||
// CHECK-LABEL: @insert_index_element_into_vec_1d(
|
||||
// CHECK-SAME: %[[A:.*]]: index,
|
||||
// CHECK-SAME: %[[B:.*]]: vector<4xindex>)
|
||||
// CHECK: %[[T0:.*]] = llvm.mlir.cast %[[A]] : index to i64
|
||||
// CHECK: %[[T1:.*]] = llvm.mlir.cast %[[B]] : vector<4xindex> to vector<4xi64>
|
||||
// CHECK: %[[T0:.*]] = unrealized_conversion_cast %[[A]] : index to i64
|
||||
// CHECK: %[[T1:.*]] = unrealized_conversion_cast %[[B]] : vector<4xindex> to vector<4xi64>
|
||||
// CHECK: %[[T3:.*]] = llvm.mlir.constant(3 : i64) : i64
|
||||
// CHECK: %[[T4:.*]] = llvm.insertelement %[[T0]], %[[T1]][%[[T3]] : i64] : vector<4xi64>
|
||||
// CHECK: %[[T5:.*]] = llvm.mlir.cast %[[T4]] : vector<4xi64> to vector<4xindex>
|
||||
// CHECK: %[[T5:.*]] = unrealized_conversion_cast %[[T4]] : vector<4xi64> to vector<4xindex>
|
||||
// CHECK: return %[[T5]] : vector<4xindex>
|
||||
|
||||
// -----
|
||||
@ -598,9 +598,9 @@ func @vector_index_type_cast(%arg0: memref<8x8x8xindex>) -> memref<vector<8x8x8x
|
||||
}
|
||||
// CHECK-LABEL: @vector_index_type_cast(
|
||||
// CHECK-SAME: %[[A:.*]]: memref<8x8x8xindex>)
|
||||
// CHECK: %{{.*}} = llvm.mlir.cast %[[A]] : memref<8x8x8xindex> to !llvm.struct<(ptr<i64>, ptr<i64>, i64, array<3 x i64>, array<3 x i64>)>
|
||||
// CHECK: %{{.*}} = unrealized_conversion_cast %[[A]] : memref<8x8x8xindex> to !llvm.struct<(ptr<i64>, ptr<i64>, i64, array<3 x i64>, array<3 x i64>)>
|
||||
|
||||
// CHECK: %{{.*}} = llvm.mlir.cast %{{.*}} : !llvm.struct<(ptr<array<8 x array<8 x vector<8xi64>>>>, ptr<array<8 x array<8 x vector<8xi64>>>>, i64)> to memref<vector<8x8x8xindex>>
|
||||
// CHECK: %{{.*}} = unrealized_conversion_cast %{{.*}} : !llvm.struct<(ptr<array<8 x array<8 x vector<8xi64>>>>, ptr<array<8 x array<8 x vector<8xi64>>>>, i64)> to memref<vector<8x8x8xindex>>
|
||||
|
||||
// -----
|
||||
|
||||
@ -654,7 +654,7 @@ func @vector_print_scalar_si4(%arg0: si4) {
|
||||
}
|
||||
// CHECK-LABEL: @vector_print_scalar_si4(
|
||||
// CHECK-SAME: %[[A:.*]]: si4)
|
||||
// CHECK: %[[C:.*]] = llvm.mlir.cast %[[A]] : si4 to i4
|
||||
// CHECK: %[[C:.*]] = unrealized_conversion_cast %[[A]] : si4 to i4
|
||||
// CHECK: %[[S:.*]] = sexti %[[C]] : i4 to i64
|
||||
// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> ()
|
||||
// CHECK: llvm.call @printNewline() : () -> ()
|
||||
@ -667,7 +667,7 @@ func @vector_print_scalar_ui4(%arg0: ui4) {
|
||||
}
|
||||
// CHECK-LABEL: @vector_print_scalar_ui4(
|
||||
// CHECK-SAME: %[[A:.*]]: ui4)
|
||||
// CHECK: %[[C:.*]] = llvm.mlir.cast %[[A]] : ui4 to i4
|
||||
// CHECK: %[[C:.*]] = unrealized_conversion_cast %[[A]] : ui4 to i4
|
||||
// CHECK: %[[S:.*]] = zexti %[[C]] : i4 to i64
|
||||
// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> ()
|
||||
// CHECK: llvm.call @printNewline() : () -> ()
|
||||
@ -692,7 +692,7 @@ func @vector_print_scalar_ui32(%arg0: ui32) {
|
||||
}
|
||||
// CHECK-LABEL: @vector_print_scalar_ui32(
|
||||
// CHECK-SAME: %[[A:.*]]: ui32)
|
||||
// CHECK: %[[C:.*]] = llvm.mlir.cast %[[A]] : ui32 to i32
|
||||
// CHECK: %[[C:.*]] = unrealized_conversion_cast %[[A]] : ui32 to i32
|
||||
// CHECK: %[[S:.*]] = zexti %[[C]] : i32 to i64
|
||||
// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> ()
|
||||
|
||||
@ -716,7 +716,7 @@ func @vector_print_scalar_si40(%arg0: si40) {
|
||||
}
|
||||
// CHECK-LABEL: @vector_print_scalar_si40(
|
||||
// CHECK-SAME: %[[A:.*]]: si40)
|
||||
// CHECK: %[[C:.*]] = llvm.mlir.cast %[[A]] : si40 to i40
|
||||
// CHECK: %[[C:.*]] = unrealized_conversion_cast %[[A]] : si40 to i40
|
||||
// CHECK: %[[S:.*]] = sexti %[[C]] : i40 to i64
|
||||
// CHECK: llvm.call @printI64(%[[S]]) : (i64) -> ()
|
||||
// CHECK: llvm.call @printNewline() : () -> ()
|
||||
@ -729,7 +729,7 @@ func @vector_print_scalar_ui40(%arg0: ui40) {
|
||||
}
|
||||
// CHECK-LABEL: @vector_print_scalar_ui40(
|
||||
// CHECK-SAME: %[[A:.*]]: ui40)
|
||||
// CHECK: %[[C:.*]] = llvm.mlir.cast %[[A]] : ui40 to i40
|
||||
// CHECK: %[[C:.*]] = unrealized_conversion_cast %[[A]] : ui40 to i40
|
||||
// CHECK: %[[S:.*]] = zexti %[[C]] : i40 to i64
|
||||
// CHECK: llvm.call @printU64(%[[S]]) : (i64) -> ()
|
||||
// CHECK: llvm.call @printNewline() : () -> ()
|
||||
@ -753,7 +753,7 @@ func @vector_print_scalar_ui64(%arg0: ui64) {
|
||||
}
|
||||
// CHECK-LABEL: @vector_print_scalar_ui64(
|
||||
// CHECK-SAME: %[[A:.*]]: ui64)
|
||||
// CHECK: %[[C:.*]] = llvm.mlir.cast %[[A]] : ui64 to i64
|
||||
// CHECK: %[[C:.*]] = unrealized_conversion_cast %[[A]] : ui64 to i64
|
||||
// CHECK: llvm.call @printU64(%[[C]]) : (i64) -> ()
|
||||
// CHECK: llvm.call @printNewline() : () -> ()
|
||||
|
||||
@ -765,7 +765,7 @@ func @vector_print_scalar_index(%arg0: index) {
|
||||
}
|
||||
// CHECK-LABEL: @vector_print_scalar_index(
|
||||
// CHECK-SAME: %[[A:.*]]: index)
|
||||
// CHECK: %[[C:.*]] = llvm.mlir.cast %[[A]] : index to i64
|
||||
// CHECK: %[[C:.*]] = unrealized_conversion_cast %[[A]] : index to i64
|
||||
// CHECK: llvm.call @printU64(%[[C]]) : (i64) -> ()
|
||||
// CHECK: llvm.call @printNewline() : () -> ()
|
||||
|
||||
@ -799,7 +799,7 @@ func @vector_print_vector(%arg0: vector<2x2xf32>) {
|
||||
}
|
||||
// CHECK-LABEL: @vector_print_vector(
|
||||
// CHECK-SAME: %[[A:.*]]: vector<2x2xf32>)
|
||||
// CHECK: %[[VAL_1:.*]] = llvm.mlir.cast %[[A]] : vector<2x2xf32> to !llvm.array<2 x vector<2xf32>>
|
||||
// CHECK: %[[VAL_1:.*]] = unrealized_conversion_cast %[[A]] : vector<2x2xf32> to !llvm.array<2 x vector<2xf32>>
|
||||
// CHECK: llvm.call @printOpen() : () -> ()
|
||||
// CHECK: %[[x0:.*]] = llvm.extractvalue %[[VAL_1]][0] : !llvm.array<2 x vector<2xf32>>
|
||||
// CHECK: llvm.call @printOpen() : () -> ()
|
||||
@ -844,10 +844,10 @@ func @extract_strided_index_slice1(%arg0: vector<4xindex>) -> vector<2xindex> {
|
||||
}
|
||||
// CHECK-LABEL: @extract_strided_index_slice1(
|
||||
// CHECK-SAME: %[[A:.*]]: vector<4xindex>)
|
||||
// CHECK: %[[T0:.*]] = llvm.mlir.cast %[[A]] : vector<4xindex> to vector<4xi64>
|
||||
// CHECK: %[[T1:.*]] = llvm.mlir.cast %[[A]] : vector<4xindex> to vector<4xi64>
|
||||
// CHECK: %[[T0:.*]] = unrealized_conversion_cast %[[A]] : vector<4xindex> to vector<4xi64>
|
||||
// CHECK: %[[T1:.*]] = unrealized_conversion_cast %[[A]] : vector<4xindex> to vector<4xi64>
|
||||
// CHECK: %[[T2:.*]] = llvm.shufflevector %[[T0]], %[[T1]] [2, 3] : vector<4xi64>, vector<4xi64>
|
||||
// CHECK: %[[T3:.*]] = llvm.mlir.cast %[[T2]] : vector<2xi64> to vector<2xindex>
|
||||
// CHECK: %[[T3:.*]] = unrealized_conversion_cast %[[T2]] : vector<2xi64> to vector<2xindex>
|
||||
// CHECK: return %[[T3]] : vector<2xindex>
|
||||
|
||||
// -----
|
||||
@ -858,13 +858,13 @@ func @extract_strided_slice2(%arg0: vector<4x8xf32>) -> vector<2x8xf32> {
|
||||
}
|
||||
// CHECK-LABEL: @extract_strided_slice2(
|
||||
// CHECK-SAME: %[[ARG:.*]]: vector<4x8xf32>)
|
||||
// CHECK: %[[A:.*]] = llvm.mlir.cast %[[ARG]] : vector<4x8xf32> to !llvm.array<4 x vector<8xf32>>
|
||||
// CHECK: %[[A:.*]] = unrealized_conversion_cast %[[ARG]] : vector<4x8xf32> to !llvm.array<4 x vector<8xf32>>
|
||||
// CHECK: %[[T0:.*]] = llvm.mlir.undef : !llvm.array<2 x vector<8xf32>>
|
||||
// CHECK: %[[T1:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vector<8xf32>>
|
||||
// CHECK: %[[T2:.*]] = llvm.insertvalue %[[T1]], %[[T0]][0] : !llvm.array<2 x vector<8xf32>>
|
||||
// CHECK: %[[T3:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vector<8xf32>>
|
||||
// CHECK: %[[T4:.*]] = llvm.insertvalue %[[T3]], %[[T2]][1] : !llvm.array<2 x vector<8xf32>>
|
||||
// CHECK: %[[T5:.*]] = llvm.mlir.cast %[[T4]] : !llvm.array<2 x vector<8xf32>> to vector<2x8xf32>
|
||||
// CHECK: %[[T5:.*]] = unrealized_conversion_cast %[[T4]] : !llvm.array<2 x vector<8xf32>> to vector<2x8xf32>
|
||||
// CHECK: return %[[T5]]
|
||||
|
||||
// -----
|
||||
@ -877,16 +877,16 @@ func @extract_strided_slice3(%arg0: vector<4x8xf32>) -> vector<2x2xf32> {
|
||||
// CHECK-SAME: %[[ARG:.*]]: vector<4x8xf32>)
|
||||
// CHECK: %[[VAL_1:.*]] = constant 0.000000e+00 : f32
|
||||
// CHECK: %[[VAL_2:.*]] = splat %[[VAL_1]] : vector<2x2xf32>
|
||||
// CHECK: %[[A:.*]] = llvm.mlir.cast %[[ARG]] : vector<4x8xf32> to !llvm.array<4 x vector<8xf32>>
|
||||
// CHECK: %[[A:.*]] = unrealized_conversion_cast %[[ARG]] : vector<4x8xf32> to !llvm.array<4 x vector<8xf32>>
|
||||
// CHECK: %[[T2:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vector<8xf32>>
|
||||
// CHECK: %[[T3:.*]] = llvm.shufflevector %[[T2]], %[[T2]] [2, 3] : vector<8xf32>, vector<8xf32>
|
||||
// CHECK: %[[VAL_6:.*]] = llvm.mlir.cast %[[VAL_2]] : vector<2x2xf32> to !llvm.array<2 x vector<2xf32>>
|
||||
// CHECK: %[[VAL_6:.*]] = unrealized_conversion_cast %[[VAL_2]] : vector<2x2xf32> to !llvm.array<2 x vector<2xf32>>
|
||||
// CHECK: %[[T4:.*]] = llvm.insertvalue %[[T3]], %[[VAL_6]][0] : !llvm.array<2 x vector<2xf32>>
|
||||
// CHECK: %[[A:.*]] = llvm.mlir.cast %[[ARG]] : vector<4x8xf32> to !llvm.array<4 x vector<8xf32>>
|
||||
// CHECK: %[[A:.*]] = unrealized_conversion_cast %[[ARG]] : vector<4x8xf32> to !llvm.array<4 x vector<8xf32>>
|
||||
// CHECK: %[[T5:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vector<8xf32>>
|
||||
// CHECK: %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T5]] [2, 3] : vector<8xf32>, vector<8xf32>
|
||||
// CHECK: %[[T7:.*]] = llvm.insertvalue %[[T6]], %[[T4]][1] : !llvm.array<2 x vector<2xf32>>
|
||||
// CHECK: %[[VAL_12:.*]] = llvm.mlir.cast %[[T7]] : !llvm.array<2 x vector<2xf32>> to vector<2x2xf32>
|
||||
// CHECK: %[[VAL_12:.*]] = unrealized_conversion_cast %[[T7]] : !llvm.array<2 x vector<2xf32>> to vector<2x2xf32>
|
||||
// CHECK: return %[[VAL_12]] : vector<2x2xf32>
|
||||
|
||||
// -----
|
||||
@ -919,42 +919,42 @@ func @insert_strided_slice2(%a: vector<2x2xf32>, %b: vector<4x4xf32>) -> vector<
|
||||
//
|
||||
// Subvector vector<2xf32> @0 into vector<4xf32> @2
|
||||
// CHECK: llvm.extractvalue {{.*}}[0] : !llvm.array<2 x vector<2xf32>>
|
||||
// CHECK-NEXT: llvm.mlir.cast %{{.*}} : vector<4x4xf32> to !llvm.array<4 x vector<4xf32>>
|
||||
// CHECK-NEXT: unrealized_conversion_cast %{{.*}} : vector<4x4xf32> to !llvm.array<4 x vector<4xf32>>
|
||||
// CHECK-NEXT: llvm.extractvalue {{.*}}[2] : !llvm.array<4 x vector<4xf32>>
|
||||
// Element @0 -> element @2
|
||||
// CHECK-NEXT: constant 0 : index
|
||||
// CHECK-NEXT: llvm.mlir.cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: unrealized_conversion_cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : i64] : vector<2xf32>
|
||||
// CHECK-NEXT: constant 2 : index
|
||||
// CHECK-NEXT: llvm.mlir.cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: unrealized_conversion_cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : vector<4xf32>
|
||||
// Element @1 -> element @3
|
||||
// CHECK-NEXT: constant 1 : index
|
||||
// CHECK-NEXT: llvm.mlir.cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: unrealized_conversion_cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : i64] : vector<2xf32>
|
||||
// CHECK-NEXT: constant 3 : index
|
||||
// CHECK-NEXT: llvm.mlir.cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: unrealized_conversion_cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : vector<4xf32>
|
||||
// CHECK-NEXT: llvm.mlir.cast %{{.*}} : vector<4x4xf32> to !llvm.array<4 x vector<4xf32>>
|
||||
// CHECK-NEXT: unrealized_conversion_cast %{{.*}} : vector<4x4xf32> to !llvm.array<4 x vector<4xf32>>
|
||||
// CHECK-NEXT: llvm.insertvalue {{.*}}, {{.*}}[2] : !llvm.array<4 x vector<4xf32>>
|
||||
//
|
||||
// Subvector vector<2xf32> @1 into vector<4xf32> @3
|
||||
// CHECK: llvm.extractvalue {{.*}}[1] : !llvm.array<2 x vector<2xf32>>
|
||||
// CHECK-NEXT: llvm.mlir.cast %{{.*}} : vector<4x4xf32> to !llvm.array<4 x vector<4xf32>>
|
||||
// CHECK-NEXT: unrealized_conversion_cast %{{.*}} : vector<4x4xf32> to !llvm.array<4 x vector<4xf32>>
|
||||
// CHECK-NEXT: llvm.extractvalue {{.*}}[3] : !llvm.array<4 x vector<4xf32>>
|
||||
// Element @0 -> element @2
|
||||
// CHECK-NEXT: constant 0 : index
|
||||
// CHECK-NEXT: llvm.mlir.cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: unrealized_conversion_cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : i64] : vector<2xf32>
|
||||
// CHECK-NEXT: constant 2 : index
|
||||
// CHECK-NEXT: llvm.mlir.cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: unrealized_conversion_cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : vector<4xf32>
|
||||
// Element @1 -> element @3
|
||||
// CHECK-NEXT: constant 1 : index
|
||||
// CHECK-NEXT: llvm.mlir.cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: unrealized_conversion_cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: llvm.extractelement {{.*}}[{{.*}} : i64] : vector<2xf32>
|
||||
// CHECK-NEXT: constant 3 : index
|
||||
// CHECK-NEXT: llvm.mlir.cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: unrealized_conversion_cast %{{.*}} : index to i64
|
||||
// CHECK-NEXT: llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : vector<4xf32>
|
||||
// CHECK-NEXT: llvm.insertvalue {{.*}}, {{.*}}[3] : !llvm.array<4 x vector<4xf32>>
|
||||
|
||||
@ -968,69 +968,69 @@ func @insert_strided_slice3(%arg0: vector<2x4xf32>, %arg1: vector<16x4x8xf32>) -
|
||||
// CHECK-LABEL: @insert_strided_slice3(
|
||||
// CHECK-SAME: %[[A:.*]]: vector<2x4xf32>,
|
||||
// CHECK-SAME: %[[B:.*]]: vector<16x4x8xf32>)
|
||||
// CHECK: %[[s2:.*]] = llvm.mlir.cast %[[B]] : vector<16x4x8xf32> to !llvm.array<16 x array<4 x vector<8xf32>>>
|
||||
// CHECK: %[[s2:.*]] = unrealized_conversion_cast %[[B]] : vector<16x4x8xf32> to !llvm.array<16 x array<4 x vector<8xf32>>>
|
||||
// CHECK: %[[s3:.*]] = llvm.extractvalue %[[s2]][0] : !llvm.array<16 x array<4 x vector<8xf32>>>
|
||||
// CHECK: %[[s4:.*]] = llvm.mlir.cast %[[A]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[s4:.*]] = unrealized_conversion_cast %[[A]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[s5:.*]] = llvm.extractvalue %[[s4]][0] : !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[s6:.*]] = llvm.mlir.cast %[[B]] : vector<16x4x8xf32> to !llvm.array<16 x array<4 x vector<8xf32>>>
|
||||
// CHECK: %[[s6:.*]] = unrealized_conversion_cast %[[B]] : vector<16x4x8xf32> to !llvm.array<16 x array<4 x vector<8xf32>>>
|
||||
// CHECK: %[[s7:.*]] = llvm.extractvalue %[[s6]][0, 0] : !llvm.array<16 x array<4 x vector<8xf32>>>
|
||||
// CHECK: %[[s8:.*]] = constant 0 : index
|
||||
// CHECK: %[[s9:.*]] = llvm.mlir.cast %[[s8]] : index to i64
|
||||
// CHECK: %[[s9:.*]] = unrealized_conversion_cast %[[s8]] : index to i64
|
||||
// CHECK: %[[s10:.*]] = llvm.extractelement %[[s5]]{{\[}}%[[s9]] : i64] : vector<4xf32>
|
||||
// CHECK: %[[s11:.*]] = constant 2 : index
|
||||
// CHECK: %[[s12:.*]] = llvm.mlir.cast %[[s11]] : index to i64
|
||||
// CHECK: %[[s12:.*]] = unrealized_conversion_cast %[[s11]] : index to i64
|
||||
// CHECK: %[[s13:.*]] = llvm.insertelement %[[s10]], %[[s7]]{{\[}}%[[s12]] : i64] : vector<8xf32>
|
||||
// CHECK: %[[s14:.*]] = constant 1 : index
|
||||
// CHECK: %[[s15:.*]] = llvm.mlir.cast %[[s14]] : index to i64
|
||||
// CHECK: %[[s15:.*]] = unrealized_conversion_cast %[[s14]] : index to i64
|
||||
// CHECK: %[[s16:.*]] = llvm.extractelement %[[s5]]{{\[}}%[[s15]] : i64] : vector<4xf32>
|
||||
// CHECK: %[[s17:.*]] = constant 3 : index
|
||||
// CHECK: %[[s18:.*]] = llvm.mlir.cast %[[s17]] : index to i64
|
||||
// CHECK: %[[s18:.*]] = unrealized_conversion_cast %[[s17]] : index to i64
|
||||
// CHECK: %[[s19:.*]] = llvm.insertelement %[[s16]], %[[s13]]{{\[}}%[[s18]] : i64] : vector<8xf32>
|
||||
// CHECK: %[[s20:.*]] = constant 2 : index
|
||||
// CHECK: %[[s21:.*]] = llvm.mlir.cast %[[s20]] : index to i64
|
||||
// CHECK: %[[s21:.*]] = unrealized_conversion_cast %[[s20]] : index to i64
|
||||
// CHECK: %[[s22:.*]] = llvm.extractelement %[[s5]]{{\[}}%[[s21]] : i64] : vector<4xf32>
|
||||
// CHECK: %[[s23:.*]] = constant 4 : index
|
||||
// CHECK: %[[s24:.*]] = llvm.mlir.cast %[[s23]] : index to i64
|
||||
// CHECK: %[[s24:.*]] = unrealized_conversion_cast %[[s23]] : index to i64
|
||||
// CHECK: %[[s25:.*]] = llvm.insertelement %[[s22]], %[[s19]]{{\[}}%[[s24]] : i64] : vector<8xf32>
|
||||
// CHECK: %[[s26:.*]] = constant 3 : index
|
||||
// CHECK: %[[s27:.*]] = llvm.mlir.cast %[[s26]] : index to i64
|
||||
// CHECK: %[[s27:.*]] = unrealized_conversion_cast %[[s26]] : index to i64
|
||||
// CHECK: %[[s28:.*]] = llvm.extractelement %[[s5]]{{\[}}%[[s27]] : i64] : vector<4xf32>
|
||||
// CHECK: %[[s29:.*]] = constant 5 : index
|
||||
// CHECK: %[[s30:.*]] = llvm.mlir.cast %[[s29]] : index to i64
|
||||
// CHECK: %[[s30:.*]] = unrealized_conversion_cast %[[s29]] : index to i64
|
||||
// CHECK: %[[s31:.*]] = llvm.insertelement %[[s28]], %[[s25]]{{\[}}%[[s30]] : i64] : vector<8xf32>
|
||||
// CHECK: %[[s32:.*]] = llvm.insertvalue %[[s31]], %[[s3]][0] : !llvm.array<4 x vector<8xf32>>
|
||||
// CHECK: %[[s33:.*]] = llvm.mlir.cast %[[A]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[s33:.*]] = unrealized_conversion_cast %[[A]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[s34:.*]] = llvm.extractvalue %[[s33]][1] : !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[s35:.*]] = llvm.mlir.cast %[[B]] : vector<16x4x8xf32> to !llvm.array<16 x array<4 x vector<8xf32>>>
|
||||
// CHECK: %[[s35:.*]] = unrealized_conversion_cast %[[B]] : vector<16x4x8xf32> to !llvm.array<16 x array<4 x vector<8xf32>>>
|
||||
// CHECK: %[[s36:.*]] = llvm.extractvalue %[[s35]][0, 1] : !llvm.array<16 x array<4 x vector<8xf32>>>
|
||||
// CHECK: %[[s37:.*]] = constant 0 : index
|
||||
// CHECK: %[[s38:.*]] = llvm.mlir.cast %[[s37]] : index to i64
|
||||
// CHECK: %[[s38:.*]] = unrealized_conversion_cast %[[s37]] : index to i64
|
||||
// CHECK: %[[s39:.*]] = llvm.extractelement %[[s34]]{{\[}}%[[s38]] : i64] : vector<4xf32>
|
||||
// CHECK: %[[s40:.*]] = constant 2 : index
|
||||
// CHECK: %[[s41:.*]] = llvm.mlir.cast %[[s40]] : index to i64
|
||||
// CHECK: %[[s41:.*]] = unrealized_conversion_cast %[[s40]] : index to i64
|
||||
// CHECK: %[[s42:.*]] = llvm.insertelement %[[s39]], %[[s36]]{{\[}}%[[s41]] : i64] : vector<8xf32>
|
||||
// CHECK: %[[s43:.*]] = constant 1 : index
|
||||
// CHECK: %[[s44:.*]] = llvm.mlir.cast %[[s43]] : index to i64
|
||||
// CHECK: %[[s44:.*]] = unrealized_conversion_cast %[[s43]] : index to i64
|
||||
// CHECK: %[[s45:.*]] = llvm.extractelement %[[s34]]{{\[}}%[[s44]] : i64] : vector<4xf32>
|
||||
// CHECK: %[[s46:.*]] = constant 3 : index
|
||||
// CHECK: %[[s47:.*]] = llvm.mlir.cast %[[s46]] : index to i64
|
||||
// CHECK: %[[s47:.*]] = unrealized_conversion_cast %[[s46]] : index to i64
|
||||
// CHECK: %[[s48:.*]] = llvm.insertelement %[[s45]], %[[s42]]{{\[}}%[[s47]] : i64] : vector<8xf32>
|
||||
// CHECK: %[[s49:.*]] = constant 2 : index
|
||||
// CHECK: %[[s50:.*]] = llvm.mlir.cast %[[s49]] : index to i64
|
||||
// CHECK: %[[s50:.*]] = unrealized_conversion_cast %[[s49]] : index to i64
|
||||
// CHECK: %[[s51:.*]] = llvm.extractelement %[[s34]]{{\[}}%[[s50]] : i64] : vector<4xf32>
|
||||
// CHECK: %[[s52:.*]] = constant 4 : index
|
||||
// CHECK: %[[s53:.*]] = llvm.mlir.cast %[[s52]] : index to i64
|
||||
// CHECK: %[[s53:.*]] = unrealized_conversion_cast %[[s52]] : index to i64
|
||||
// CHECK: %[[s54:.*]] = llvm.insertelement %[[s51]], %[[s48]]{{\[}}%[[s53]] : i64] : vector<8xf32>
|
||||
// CHECK: %[[s55:.*]] = constant 3 : index
|
||||
// CHECK: %[[s56:.*]] = llvm.mlir.cast %[[s55]] : index to i64
|
||||
// CHECK: %[[s56:.*]] = unrealized_conversion_cast %[[s55]] : index to i64
|
||||
// CHECK: %[[s57:.*]] = llvm.extractelement %[[s34]]{{\[}}%[[s56]] : i64] : vector<4xf32>
|
||||
// CHECK: %[[s58:.*]] = constant 5 : index
|
||||
// CHECK: %[[s59:.*]] = llvm.mlir.cast %[[s58]] : index to i64
|
||||
// CHECK: %[[s59:.*]] = unrealized_conversion_cast %[[s58]] : index to i64
|
||||
// CHECK: %[[s60:.*]] = llvm.insertelement %[[s57]], %[[s54]]{{\[}}%[[s59]] : i64] : vector<8xf32>
|
||||
// CHECK: %[[s61:.*]] = llvm.insertvalue %[[s60]], %[[s32]][1] : !llvm.array<4 x vector<8xf32>>
|
||||
// CHECK: %[[s62:.*]] = llvm.mlir.cast %[[B]] : vector<16x4x8xf32> to !llvm.array<16 x array<4 x vector<8xf32>>>
|
||||
// CHECK: %[[s62:.*]] = unrealized_conversion_cast %[[B]] : vector<16x4x8xf32> to !llvm.array<16 x array<4 x vector<8xf32>>>
|
||||
// CHECK: %[[s63:.*]] = llvm.insertvalue %[[s61]], %[[s62]][0] : !llvm.array<16 x array<4 x vector<8xf32>>>
|
||||
// CHECK: %[[s64:.*]] = llvm.mlir.cast %[[s63]] : !llvm.array<16 x array<4 x vector<8xf32>>> to vector<16x4x8xf32>
|
||||
// CHECK: %[[s64:.*]] = unrealized_conversion_cast %[[s63]] : !llvm.array<16 x array<4 x vector<8xf32>>> to vector<16x4x8xf32>
|
||||
// CHECK: return %[[s64]] : vector<16x4x8xf32>
|
||||
|
||||
// -----
|
||||
@ -1043,20 +1043,20 @@ func @vector_fma(%a: vector<8xf32>, %b: vector<2x4xf32>) -> (vector<8xf32>, vect
|
||||
// CHECK-SAME: (vector<8xf32>, vector<8xf32>, vector<8xf32>) -> vector<8xf32>
|
||||
%0 = vector.fma %a, %a, %a : vector<8xf32>
|
||||
|
||||
// CHECK: %[[BL:.*]] = llvm.mlir.cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[BL:.*]] = unrealized_conversion_cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[b00:.*]] = llvm.extractvalue %[[BL]][0] : !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[BL:.*]] = llvm.mlir.cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[BL:.*]] = unrealized_conversion_cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[b01:.*]] = llvm.extractvalue %[[BL]][0] : !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[BL:.*]] = llvm.mlir.cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[BL:.*]] = unrealized_conversion_cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[b02:.*]] = llvm.extractvalue %[[BL]][0] : !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[B0:.*]] = "llvm.intr.fmuladd"(%[[b00]], %[[b01]], %[[b02]]) :
|
||||
// CHECK-SAME: (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
|
||||
// CHECK: llvm.insertvalue %[[B0]], {{.*}}[0] : !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[BL:.*]] = llvm.mlir.cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[BL:.*]] = unrealized_conversion_cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[b10:.*]] = llvm.extractvalue %[[BL]][1] : !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[BL:.*]] = llvm.mlir.cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[BL:.*]] = unrealized_conversion_cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[b11:.*]] = llvm.extractvalue %[[BL]][1] : !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[BL:.*]] = llvm.mlir.cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[BL:.*]] = unrealized_conversion_cast %[[B]] : vector<2x4xf32> to !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[b12:.*]] = llvm.extractvalue %[[BL]][1] : !llvm.array<2 x vector<4xf32>>
|
||||
// CHECK: %[[B1:.*]] = "llvm.intr.fmuladd"(%[[b10]], %[[b11]], %[[b12]]) :
|
||||
// CHECK-SAME: (vector<4xf32>, vector<4xf32>, vector<4xf32>) -> vector<4xf32>
|
||||
@ -1146,9 +1146,9 @@ func @reduce_index(%arg0: vector<16xindex>) -> index {
|
||||
}
|
||||
// CHECK-LABEL: @reduce_index(
|
||||
// CHECK-SAME: %[[A:.*]]: vector<16xindex>)
|
||||
// CHECK: %[[T0:.*]] = llvm.mlir.cast %[[A]] : vector<16xindex> to vector<16xi64>
|
||||
// CHECK: %[[T0:.*]] = unrealized_conversion_cast %[[A]] : vector<16xindex> to vector<16xi64>
|
||||
// CHECK: %[[T1:.*]] = "llvm.intr.vector.reduce.add"(%[[T0]])
|
||||
// CHECK: %[[T2:.*]] = llvm.mlir.cast %[[T1]] : i64 to index
|
||||
// CHECK: %[[T2:.*]] = unrealized_conversion_cast %[[T1]] : i64 to index
|
||||
// CHECK: return %[[T2]] : index
|
||||
|
||||
// 4x16 16x3 4x3
|
||||
@ -1265,7 +1265,7 @@ func @transfer_read_index_1d(%A : memref<?xindex>, %base: index) -> vector<17xin
|
||||
// CHECK-LABEL: func @transfer_read_index_1d
|
||||
// CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: index) -> vector<17xindex>
|
||||
// CHECK: %[[C7:.*]] = constant 7
|
||||
// CHECK: %{{.*}} = llvm.mlir.cast %[[C7]] : index to i64
|
||||
// CHECK: %{{.*}} = unrealized_conversion_cast %[[C7]] : index to i64
|
||||
|
||||
// CHECK: %[[loaded:.*]] = llvm.intr.masked.load %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} :
|
||||
// CHECK-SAME: (!llvm.ptr<vector<17xi64>>, vector<17xi1>, vector<17xi64>) -> vector<17xi64>
|
||||
@ -1422,10 +1422,10 @@ func @genbool_2d() -> vector<4x4xi1> {
|
||||
// CHECK-LABEL: func @genbool_2d
|
||||
// CHECK: %[[VAL_0:.*]] = constant dense<[true, true, false, false]> : vector<4xi1>
|
||||
// CHECK: %[[VAL_1:.*]] = constant dense<false> : vector<4x4xi1>
|
||||
// CHECK: %[[VAL_2:.*]] = llvm.mlir.cast %[[VAL_1]] : vector<4x4xi1> to !llvm.array<4 x vector<4xi1>>
|
||||
// CHECK: %[[VAL_2:.*]] = unrealized_conversion_cast %[[VAL_1]] : vector<4x4xi1> to !llvm.array<4 x vector<4xi1>>
|
||||
// CHECK: %[[VAL_3:.*]] = llvm.insertvalue %[[VAL_0]], %[[VAL_2]][0] : !llvm.array<4 x vector<4xi1>>
|
||||
// CHECK: %[[VAL_4:.*]] = llvm.insertvalue %[[VAL_0]], %[[VAL_3]][1] : !llvm.array<4 x vector<4xi1>>
|
||||
// CHECK: %[[VAL_5:.*]] = llvm.mlir.cast %[[VAL_4]] : !llvm.array<4 x vector<4xi1>> to vector<4x4xi1>
|
||||
// CHECK: %[[VAL_5:.*]] = unrealized_conversion_cast %[[VAL_4]] : !llvm.array<4 x vector<4xi1>> to vector<4x4xi1>
|
||||
// CHECK: return %[[VAL_5]] : vector<4x4xi1>
|
||||
|
||||
// -----
|
||||
@ -1452,11 +1452,11 @@ func @flat_transpose_index(%arg0: vector<16xindex>) -> vector<16xindex> {
|
||||
}
|
||||
// CHECK-LABEL: func @flat_transpose_index
|
||||
// CHECK-SAME: %[[A:.*]]: vector<16xindex>
|
||||
// CHECK: %[[T0:.*]] = llvm.mlir.cast %[[A]] : vector<16xindex> to vector<16xi64>
|
||||
// CHECK: %[[T0:.*]] = unrealized_conversion_cast %[[A]] : vector<16xindex> to vector<16xi64>
|
||||
// CHECK: %[[T1:.*]] = llvm.intr.matrix.transpose %[[T0]]
|
||||
// CHECK-SAME: {columns = 4 : i32, rows = 4 : i32} :
|
||||
// CHECK-SAME: vector<16xi64> into vector<16xi64>
|
||||
// CHECK: %[[T2:.*]] = llvm.mlir.cast %[[T1]] : vector<16xi64> to vector<16xindex>
|
||||
// CHECK: %[[T2:.*]] = unrealized_conversion_cast %[[T1]] : vector<16xi64> to vector<16xindex>
|
||||
// CHECK: return %[[T2]] : vector<16xindex>
|
||||
|
||||
// -----
|
||||
@ -1482,7 +1482,7 @@ func @vector_load_op_index(%memref : memref<200x100xindex>, %i : index, %j : ind
|
||||
}
|
||||
// CHECK-LABEL: func @vector_load_op_index
|
||||
// CHECK: %[[T0:.*]] = llvm.load %{{.*}} {alignment = 8 : i64} : !llvm.ptr<vector<8xi64>>
|
||||
// CHECK: %[[T1:.*]] = llvm.mlir.cast %[[T0]] : vector<8xi64> to vector<8xindex>
|
||||
// CHECK: %[[T1:.*]] = unrealized_conversion_cast %[[T0]] : vector<8xi64> to vector<8xindex>
|
||||
// CHECK: return %[[T1]] : vector<8xindex>
|
||||
|
||||
// -----
|
||||
@ -1554,7 +1554,7 @@ func @masked_load_op(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<1
|
||||
|
||||
// CHECK-LABEL: func @masked_load_op
|
||||
// CHECK: %[[CO:.*]] = constant 0 : index
|
||||
// CHECK: %[[C:.*]] = llvm.mlir.cast %[[CO]] : index to i64
|
||||
// CHECK: %[[C:.*]] = unrealized_conversion_cast %[[CO]] : index to i64
|
||||
// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
|
||||
// CHECK: %[[B:.*]] = llvm.bitcast %[[P]] : !llvm.ptr<f32> to !llvm.ptr<vector<16xf32>>
|
||||
// CHECK: %[[L:.*]] = llvm.intr.masked.load %[[B]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.ptr<vector<16xf32>>, vector<16xi1>, vector<16xf32>) -> vector<16xf32>
|
||||
@ -1580,7 +1580,7 @@ func @masked_store_op(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<
|
||||
|
||||
// CHECK-LABEL: func @masked_store_op
|
||||
// CHECK: %[[CO:.*]] = constant 0 : index
|
||||
// CHECK: %[[C:.*]] = llvm.mlir.cast %[[CO]] : index to i64
|
||||
// CHECK: %[[C:.*]] = unrealized_conversion_cast %[[CO]] : index to i64
|
||||
// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
|
||||
// CHECK: %[[B:.*]] = llvm.bitcast %[[P]] : !llvm.ptr<f32> to !llvm.ptr<vector<16xf32>>
|
||||
// CHECK: llvm.intr.masked.store %{{.*}}, %[[B]], %{{.*}} {alignment = 4 : i32} : vector<16xf32>, vector<16xi1> into !llvm.ptr<vector<16xf32>>
|
||||
@ -1619,7 +1619,7 @@ func @gather_op_index(%arg0: memref<?xindex>, %arg1: vector<3xindex>, %arg2: vec
|
||||
// CHECK-LABEL: func @gather_op_index
|
||||
// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%{{.*}}] : (!llvm.ptr<i64>, vector<3xi64>) -> !llvm.vec<3 x ptr<i64>>
|
||||
// CHECK: %[[G:.*]] = llvm.intr.masked.gather %{{.*}}, %{{.*}}, %{{.*}} {alignment = 8 : i32} : (!llvm.vec<3 x ptr<i64>>, vector<3xi1>, vector<3xi64>) -> vector<3xi64>
|
||||
// CHECK: %{{.*}} = llvm.mlir.cast %[[G]] : vector<3xi64> to vector<3xindex>
|
||||
// CHECK: %{{.*}} = unrealized_conversion_cast %[[G]] : vector<3xi64> to vector<3xindex>
|
||||
|
||||
// -----
|
||||
|
||||
@ -1709,7 +1709,7 @@ func @expand_load_op(%arg0: memref<?xf32>, %arg1: vector<11xi1>, %arg2: vector<1
|
||||
|
||||
// CHECK-LABEL: func @expand_load_op
|
||||
// CHECK: %[[CO:.*]] = constant 0 : index
|
||||
// CHECK: %[[C:.*]] = llvm.mlir.cast %[[CO]] : index to i64
|
||||
// CHECK: %[[C:.*]] = unrealized_conversion_cast %[[CO]] : index to i64
|
||||
// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
|
||||
// CHECK: %[[E:.*]] = "llvm.intr.masked.expandload"(%[[P]], %{{.*}}, %{{.*}}) : (!llvm.ptr<f32>, vector<11xi1>, vector<11xf32>) -> vector<11xf32>
|
||||
// CHECK: return %[[E]] : vector<11xf32>
|
||||
@ -1734,7 +1734,7 @@ func @compress_store_op(%arg0: memref<?xf32>, %arg1: vector<11xi1>, %arg2: vecto
|
||||
|
||||
// CHECK-LABEL: func @compress_store_op
|
||||
// CHECK: %[[CO:.*]] = constant 0 : index
|
||||
// CHECK: %[[C:.*]] = llvm.mlir.cast %[[CO]] : index to i64
|
||||
// CHECK: %[[C:.*]] = unrealized_conversion_cast %[[CO]] : index to i64
|
||||
// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[%[[C]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
|
||||
// CHECK: "llvm.intr.masked.compressstore"(%{{.*}}, %[[P]], %{{.*}}) : (vector<11xf32>, !llvm.ptr<f32>, vector<11xi1>) -> ()
|
||||
|
||||
|
@ -9,15 +9,15 @@ func @memcopy(%src : memref<?xf32>, %dst : memref<?xf32>, %size : index) {
|
||||
|
||||
// CHECK: scf.for [[LOOPIDX:%arg[0-9]+]] = {{.*}}
|
||||
scf.for %i0 = %c0 to %size step %step {
|
||||
// CHECK: [[SRCMRS:%[0-9]+]] = llvm.mlir.cast [[SRC]] : memref<?xf32> to !llvm.struct<(ptr<f32>
|
||||
// CHECK: [[SRCIDX:%[0-9]+]] = llvm.mlir.cast [[LOOPIDX]] : index to i64
|
||||
// CHECK: [[SRCMRS:%[0-9]+]] = unrealized_conversion_cast [[SRC]] : memref<?xf32> to !llvm.struct<(ptr<f32>
|
||||
// CHECK: [[SRCIDX:%[0-9]+]] = unrealized_conversion_cast [[LOOPIDX]] : index to i64
|
||||
// CHECK: [[SRCMEM:%[0-9]+]] = llvm.extractvalue [[SRCMRS]][1] : !llvm.struct<(ptr<f32>
|
||||
// CHECK-NEXT: [[SRCPTR:%[0-9]+]] = llvm.getelementptr [[SRCMEM]]{{.}}[[SRCIDX]]{{.}} : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
|
||||
// CHECK-NEXT: [[SRCVPTR:%[0-9]+]] = llvm.bitcast [[SRCPTR]] : !llvm.ptr<f32> to !llvm.ptr<vec<? x 4 x f32>>
|
||||
// CHECK-NEXT: [[LDVAL:%[0-9]+]] = llvm.load [[SRCVPTR]] : !llvm.ptr<vec<? x 4 x f32>>
|
||||
%0 = arm_sve.load %src[%i0] : !arm_sve.vector<4xf32> from memref<?xf32>
|
||||
// CHECK: [[DSTMRS:%[0-9]+]] = llvm.mlir.cast [[DST]] : memref<?xf32> to !llvm.struct<(ptr<f32>
|
||||
// CHECK: [[DSTIDX:%[0-9]+]] = llvm.mlir.cast [[LOOPIDX]] : index to i64
|
||||
// CHECK: [[DSTMRS:%[0-9]+]] = unrealized_conversion_cast [[DST]] : memref<?xf32> to !llvm.struct<(ptr<f32>
|
||||
// CHECK: [[DSTIDX:%[0-9]+]] = unrealized_conversion_cast [[LOOPIDX]] : index to i64
|
||||
// CHECK: [[DSTMEM:%[0-9]+]] = llvm.extractvalue [[DSTMRS]][1] : !llvm.struct<(ptr<f32>
|
||||
// CHECK-NEXT: [[DSTPTR:%[0-9]+]] = llvm.getelementptr [[DSTMEM]]{{.}}[[DSTIDX]]{{.}} : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
|
||||
// CHECK-NEXT: [[DSTVPTR:%[0-9]+]] = llvm.bitcast [[DSTPTR]] : !llvm.ptr<f32> to !llvm.ptr<vec<? x 4 x f32>>
|
||||
|
@ -1,267 +0,0 @@
|
||||
// RUN: mlir-opt -split-input-file -verify-diagnostics %s
|
||||
|
||||
// These are the supported cases, just make sure they don't trigger errors, op
|
||||
// syntax is tested elsewhere.
|
||||
|
||||
func @mlir_dialect_cast(%0: index, %1: vector<2x2x2xf32>,
|
||||
%6: vector<42xf32>, %7: memref<42xf32>,
|
||||
%8: memref<?xf32>, %9: memref<f32>,
|
||||
%10: memref<*xf32>) {
|
||||
llvm.mlir.cast %0 : index to i64
|
||||
llvm.mlir.cast %0 : index to i32
|
||||
llvm.mlir.cast %1 : vector<2x2x2xf32> to !llvm.array<2 x array<2 x vector<2xf32>>>
|
||||
llvm.mlir.cast %7 : memref<42xf32> to !llvm.ptr<f32>
|
||||
llvm.mlir.cast %7 : memref<42xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1xi64>, array<1xi64>)>
|
||||
llvm.mlir.cast %8 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1xi64>, array<1xi64>)>
|
||||
llvm.mlir.cast %9 : memref<f32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
|
||||
llvm.mlir.cast %10 : memref<*xf32> to !llvm.struct<(i64, ptr<i8>)>
|
||||
return
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_index_non_integer(%0 : index) {
|
||||
// expected-error@+1 {{invalid cast between index and non-integer type}}
|
||||
%1 = llvm.mlir.cast %0 : index to f32
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
// Cast verifier is symmetric, so we only check the symmetry once by having an
|
||||
// std->llvm and llvm->std test. Everything else is std->llvm.
|
||||
|
||||
func @mlir_dialect_cast_index_non_integer_symmetry(%0: f32) {
|
||||
// expected-error@+1 {{invalid cast between index and non-integer type}}
|
||||
llvm.mlir.cast %0 : f32 to index
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_f16(%0 : f16) {
|
||||
// expected-error@+1 {{unsupported cast}}
|
||||
llvm.mlir.cast %0 : f16 to f32
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_bf16(%0 : bf16) {
|
||||
// expected-error@+1 {{unsupported cast}}
|
||||
llvm.mlir.cast %0 : bf16 to f16
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_f32(%0 : f32) {
|
||||
// expected-error@+1 {{unsupported cast}}
|
||||
llvm.mlir.cast %0 : f32 to bf16
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_f64(%0 : f64) {
|
||||
// expected-error@+1 {{unsupported cast}}
|
||||
llvm.mlir.cast %0 : f64 to f32
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_integer_non_integer(%0 : i16) {
|
||||
// expected-error@+1 {{invalid cast between integer and non-integer}}
|
||||
llvm.mlir.cast %0 : i16 to f16
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_scalable_vector(%0 : vector<2xf32>) {
|
||||
// expected-error@+1 {{invalid cast for vector types}}
|
||||
llvm.mlir.cast %0 : vector<2xf32> to !llvm.vec<?x2xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_vector_to_self(%0 : vector<2xf32>) {
|
||||
// expected-error@+1 {{vector types should not be casted}}
|
||||
llvm.mlir.cast %0 : vector<2xf32> to vector<2xf32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_nd_vector(%0 : vector<2x2xf32>) {
|
||||
// expected-error@+1 {{invalid cast for vector, expected array}}
|
||||
llvm.mlir.cast %0 : vector<2x2xf32> to !llvm.struct<()>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_dynamic_memref_bare_ptr(%0 : memref<?xf32>) {
|
||||
// expected-error@+1 {{unexpected bare pointer for dynamically shaped memref}}
|
||||
llvm.mlir.cast %0 : memref<?xf32> to !llvm.ptr<f32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_memref_bare_ptr_space(%0 : memref<4xf32, 4>) {
|
||||
// expected-error@+1 {{invalid conversion between memref and pointer in different memory spaces}}
|
||||
llvm.mlir.cast %0 : memref<4xf32, 4> to !llvm.ptr<f32, 3>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_memref_no_descriptor(%0 : memref<?xf32>) {
|
||||
// expected-error@+1 {{invalid cast between a memref and a type other than pointer or memref descriptor}}
|
||||
llvm.mlir.cast %0 : memref<?xf32> to f32
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_memref_descriptor_wrong_num_elements(%0 : memref<?xf32>) {
|
||||
// expected-error@+1 {{expected memref descriptor with 5 elements}}
|
||||
llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<()>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_0d_memref_descriptor_wrong_num_elements(%0 : memref<f32>) {
|
||||
// expected-error@+1 {{expected memref descriptor with 3 elements}}
|
||||
llvm.mlir.cast %0 : memref<f32> to !llvm.struct<()>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_memref_descriptor_allocated(%0 : memref<?xf32>) {
|
||||
// expected-error@+1 {{expected first element of a memref descriptor to be a pointer in the address space of the memref}}
|
||||
llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(f32, f32, f32, f32, f32)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_memref_descriptor_allocated_wrong_space(%0 : memref<?xf32>) {
|
||||
// expected-error@+1 {{expected first element of a memref descriptor to be a pointer in the address space of the memref}}
|
||||
llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32, 2>, f32, f32, f32, f32)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_memref_descriptor_aligned(%0 : memref<?xf32>) {
|
||||
// expected-error@+1 {{expected second element of a memref descriptor to be a pointer in the address space of the memref}}
|
||||
llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, f32, f32, f32, f32)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_memref_descriptor_aligned_wrong_space(%0 : memref<?xf32>) {
|
||||
// expected-error@+1 {{expected second element of a memref descriptor to be a pointer in the address space of the memref}}
|
||||
llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32, 2>, f32, f32, f32)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_memref_descriptor_offset(%0 : memref<?xf32>) {
|
||||
// expected-error@+1 {{expected third element of a memref descriptor to be index-compatible integers}}
|
||||
llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, f32, f32, f32)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_memref_descriptor_sizes(%0 : memref<?xf32>) {
|
||||
// expected-error@+1 {{expected fourth element of a memref descriptor to be an array of <rank> index-compatible integers}}
|
||||
llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, f32, f32)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_memref_descriptor_sizes_wrong_type(%0 : memref<?xf32>) {
|
||||
// expected-error@+1 {{expected fourth element of a memref descriptor to be an array of <rank> index-compatible integers}}
|
||||
llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<10xf32>, f32)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_memref_descriptor_sizes_wrong_rank(%0 : memref<?xf32>) {
|
||||
// expected-error@+1 {{expected fourth element of a memref descriptor to be an array of <rank> index-compatible integers}}
|
||||
llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<10xi64>, f32)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_memref_descriptor_strides(%0 : memref<?xf32>) {
|
||||
// expected-error@+1 {{expected fifth element of a memref descriptor to be an array of <rank> index-compatible integers}}
|
||||
llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1xi64>, f32)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_memref_descriptor_strides_wrong_type(%0 : memref<?xf32>) {
|
||||
// expected-error@+1 {{expected fifth element of a memref descriptor to be an array of <rank> index-compatible integers}}
|
||||
llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1xi64>, array<10xf32>)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_memref_descriptor_strides_wrong_rank(%0 : memref<?xf32>) {
|
||||
// expected-error@+1 {{expected fifth element of a memref descriptor to be an array of <rank> index-compatible integers}}
|
||||
llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1xi64>, array<10xi64>)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_tensor(%0 : tensor<?xf32>) {
|
||||
// expected-error@+1 {{unsupported cast}}
|
||||
llvm.mlir.cast %0 : tensor<?xf32> to f32
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_unranked_memref(%0: memref<*xf32>) {
|
||||
// expected-error@+1 {{expected descriptor to be a struct with two elements}}
|
||||
llvm.mlir.cast %0 : memref<*xf32> to !llvm.ptr<f32>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_unranked_memref(%0: memref<*xf32>) {
|
||||
// expected-error@+1 {{expected descriptor to be a struct with two elements}}
|
||||
llvm.mlir.cast %0 : memref<*xf32> to !llvm.struct<()>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_unranked_rank(%0: memref<*xf32>) {
|
||||
// expected-error@+1 {{expected first element of a memref descriptor to be an index-compatible integer}}
|
||||
llvm.mlir.cast %0 : memref<*xf32> to !llvm.struct<(f32, f32)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_unranked_rank(%0: memref<*xf32>) {
|
||||
// expected-error@+1 {{expected second element of a memref descriptor to be an !llvm.ptr<i8>}}
|
||||
llvm.mlir.cast %0 : memref<*xf32> to !llvm.struct<(i64, f32)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_complex_non_struct(%0: complex<f32>) {
|
||||
// expected-error@+1 {{expected 'complex' to map to two-element struct with identical element types}}
|
||||
llvm.mlir.cast %0 : complex<f32> to f32
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_complex_bad_size(%0: complex<f32>) {
|
||||
// expected-error@+1 {{expected 'complex' to map to two-element struct with identical element types}}
|
||||
llvm.mlir.cast %0 : complex<f32> to !llvm.struct<(f32, f32, f32)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_complex_mismatching_type_struct(%0: complex<f32>) {
|
||||
// expected-error@+1 {{expected 'complex' to map to two-element struct with identical element types}}
|
||||
llvm.mlir.cast %0 : complex<f32> to !llvm.struct<(f32, f64)>
|
||||
}
|
||||
|
||||
// -----
|
||||
|
||||
func @mlir_dialect_cast_complex_mismatching_element(%0: complex<f32>) {
|
||||
// expected-error@+1 {{expected 'complex' to map to two-element struct with identical element types}}
|
||||
llvm.mlir.cast %0 : complex<f32> to !llvm.struct<(f64, f64)>
|
||||
}
|
Loading…
Reference in New Issue
Block a user