From 560564f51c626cf89920f13b6cea96684bac5848 Mon Sep 17 00:00:00 2001 From: Jakub Kuderski Date: Wed, 20 Dec 2023 00:14:43 -0500 Subject: [PATCH] [mlir][vector][gpu] Align minf/maxf reduction kind names with arith (#75901) This is to avoid confusion when dealing with reduction/combining kinds. For example, see a recent PR comment: https://github.com/llvm/llvm-project/pull/75846#discussion_r1430722175. Previously, they were picked to mostly mirror the names of the llvm vector reduction intrinsics: https://llvm.org/docs/LangRef.html#llvm-vector-reduce-fmin-intrinsic. In isolation, it was not clear if `` has `arith.maxnumf` or `arith.maximumf` semantics. The new reduction kind names map 1:1 to arith ops, which makes it easier to tell/look up their semantics. Because both the vector and the gpu dialect depend on the arith dialect, it's more natural to align names with those in arith than with the lowering to llvm intrinsics. Issue: https://github.com/llvm/llvm-project/issues/72354 --- mlir/include/mlir/Dialect/GPU/IR/GPUOps.td | 12 +++++----- .../Dialect/Vector/IR/VectorAttributes.td | 8 +++---- .../mlir/Dialect/Vector/IR/VectorOps.td | 23 ++++++++++--------- .../GPUToNVVM/LowerGpuOpsToNVVMOps.cpp | 4 ++-- mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp | 4 ++-- .../VectorToLLVM/ConvertVectorToLLVM.cpp | 8 +++---- .../VectorToSPIRV/VectorToSPIRV.cpp | 4 ++-- mlir/lib/Dialect/GPU/IR/GPUDialect.cpp | 3 ++- .../GPU/Transforms/AllReduceLowering.cpp | 4 ++-- .../Linalg/Transforms/Vectorization.cpp | 4 ++-- mlir/lib/Dialect/Vector/IR/VectorOps.cpp | 8 +++---- .../Vector/Transforms/LowerVectorContract.cpp | 2 +- .../Vector/Transforms/LowerVectorScan.cpp | 4 ++-- .../Conversion/GPUToSPIRV/reductions.mlir | 16 ++++++------- .../vector-reduction-to-llvm.mlir | 4 ++-- .../VectorToLLVM/vector-to-llvm.mlir | 8 +++---- mlir/test/Dialect/GPU/all-reduce-maxf.mlir | 2 +- mlir/test/Dialect/GPU/invalid.mlir | 18 +++++++-------- .../Vector/break-down-vector-reduction.mlir | 4 ++-- mlir/test/Dialect/Vector/ops.mlir | 20 ++++++++-------- ...act-to-outerproduct-matvec-transforms.mlir | 18 +++++++-------- ...vector-multi-reduction-outer-lowering.mlir | 6 ++--- .../CPU/test-reductions-f32-reassoc.mlir | 4 ++-- .../Vector/CPU/test-reductions-f32.mlir | 4 ++-- .../CPU/test-reductions-f64-reassoc.mlir | 4 ++-- .../Vector/CPU/test-reductions-f64.mlir | 4 ++-- 26 files changed, 101 insertions(+), 99 deletions(-) diff --git a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td index 2e1a5f5cc78a..2e21cd77d2d8 100644 --- a/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td +++ b/mlir/include/mlir/Dialect/GPU/IR/GPUOps.td @@ -937,11 +937,11 @@ def GPU_AllReduceOpMul : I32EnumAttrCase<"MUL", 1, "mul">; def GPU_AllReduceOpMinUI : I32EnumAttrCase<"MINUI", 2, "minui">; def GPU_AllReduceOpMinSI : I32EnumAttrCase<"MINSI", 3, "minsi">; // Follows the `arith.minnumf` semantics. -def GPU_AllReduceOpMinF : I32EnumAttrCase<"MINF", 4, "minf">; +def GPU_AllReduceOpMinnumF : I32EnumAttrCase<"MINNUMF", 4, "minnumf">; def GPU_AllReduceOpMaxUI : I32EnumAttrCase<"MAXUI", 5, "maxui">; def GPU_AllReduceOpMaxSI : I32EnumAttrCase<"MAXSI", 6, "maxsi">; // Follows the `arith.maxnumf` semantics. -def GPU_AllReduceOpMaxF : I32EnumAttrCase<"MAXF", 7, "maxf">; +def GPU_AllReduceOpMaxnumF : I32EnumAttrCase<"MAXNUMF", 7, "maxnumf">; def GPU_AllReduceOpAnd : I32EnumAttrCase<"AND", 8, "and">; def GPU_AllReduceOpOr : I32EnumAttrCase<"OR", 9, "or">; def GPU_AllReduceOpXor : I32EnumAttrCase<"XOR", 10, "xor">; @@ -957,10 +957,10 @@ def GPU_AllReduceOperation : I32EnumAttr<"AllReduceOperation", GPU_AllReduceOpMul, GPU_AllReduceOpMinUI, GPU_AllReduceOpMinSI, - GPU_AllReduceOpMinF, + GPU_AllReduceOpMinnumF, GPU_AllReduceOpMaxUI, GPU_AllReduceOpMaxSI, - GPU_AllReduceOpMaxF, + GPU_AllReduceOpMaxnumF, GPU_AllReduceOpAnd, GPU_AllReduceOpOr, GPU_AllReduceOpXor, @@ -999,7 +999,7 @@ def GPU_AllReduceOp : GPU_Op<"all_reduce", accumulation as code region. The reduction operation must be one of: * Integer types: `add`, `mul`, `minui`, `minsi`, `maxui`, `maxsi`, `and`, `or`, `xor` - * Floating point types: `add`, `mul`, `minf`, `maxf`, `minimumf`, + * Floating point types: `add`, `mul`, `minnumf`, `maxnumf`, `minimumf`, `maximumf` If `uniform` flag is set either none or all work items of a workgroup @@ -1039,7 +1039,7 @@ def GPU_SubgroupReduceOp : GPU_Op<"subgroup_reduce", [SameOperandsAndResultType] of: * Integer types: `add`, `mul`, `minui`, `minsi`, `maxui`, `maxsi`, `and`, `or`, `xor` - * Floating point types: `add`, `mul`, `minf`, `maxf`, `minimumf`, + * Floating point types: `add`, `mul`, `minnumf`, `maxnumf`, `minimumf`, `maximumf` }]; diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorAttributes.td b/mlir/include/mlir/Dialect/Vector/IR/VectorAttributes.td index f8f85b0d09d9..0f08f61d7b25 100644 --- a/mlir/include/mlir/Dialect/Vector/IR/VectorAttributes.td +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorAttributes.td @@ -21,10 +21,10 @@ def COMBINING_KIND_ADD : I32BitEnumAttrCaseBit<"ADD", 0, "add">; def COMBINING_KIND_MUL : I32BitEnumAttrCaseBit<"MUL", 1, "mul">; def COMBINING_KIND_MINUI : I32BitEnumAttrCaseBit<"MINUI", 2, "minui">; def COMBINING_KIND_MINSI : I32BitEnumAttrCaseBit<"MINSI", 3, "minsi">; -def COMBINING_KIND_MINF : I32BitEnumAttrCaseBit<"MINF", 4, "minf">; +def COMBINING_KIND_MINNUMF : I32BitEnumAttrCaseBit<"MINNUMF", 4, "minnumf">; def COMBINING_KIND_MAXUI : I32BitEnumAttrCaseBit<"MAXUI", 5, "maxui">; def COMBINING_KIND_MAXSI : I32BitEnumAttrCaseBit<"MAXSI", 6, "maxsi">; -def COMBINING_KIND_MAXF : I32BitEnumAttrCaseBit<"MAXF", 7, "maxf">; +def COMBINING_KIND_MAXNUMF : I32BitEnumAttrCaseBit<"MAXNUMF", 7, "maxnumf">; def COMBINING_KIND_AND : I32BitEnumAttrCaseBit<"AND", 8, "and">; def COMBINING_KIND_OR : I32BitEnumAttrCaseBit<"OR", 9, "or">; def COMBINING_KIND_XOR : I32BitEnumAttrCaseBit<"XOR", 10, "xor">; @@ -35,8 +35,8 @@ def CombiningKind : I32BitEnumAttr< "CombiningKind", "Kind of combining function for contractions and reductions", [COMBINING_KIND_ADD, COMBINING_KIND_MUL, COMBINING_KIND_MINUI, - COMBINING_KIND_MINSI, COMBINING_KIND_MINF, COMBINING_KIND_MAXUI, - COMBINING_KIND_MAXSI, COMBINING_KIND_MAXF, COMBINING_KIND_AND, + COMBINING_KIND_MINSI, COMBINING_KIND_MINNUMF, COMBINING_KIND_MAXUI, + COMBINING_KIND_MAXSI, COMBINING_KIND_MAXNUMF, COMBINING_KIND_AND, COMBINING_KIND_OR, COMBINING_KIND_XOR, COMBINING_KIND_MAXIMUMF, COMBINING_KIND_MINIMUMF]> { let cppNamespace = "::mlir::vector"; diff --git a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td index afc9d532f6e3..423118f79e73 100644 --- a/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td +++ b/mlir/include/mlir/Dialect/Vector/IR/VectorOps.td @@ -87,8 +87,8 @@ def Vector_ContractionOp : An optional kind attribute may be used to specify the combining function between the intermediate result and accumulator argument of rank K. This attribute can take the values `add`/`mul`/`minsi`/`minui`/`maxsi`/`maxui` - /`and`/`or`/`xor` for integers, and `add`/`mul`/`minf`/`maxf`/`minimumf` - /`maximumf` for floats. The default is `add`. + /`and`/`or`/`xor` for integers, and `add`/`mul`/`minnumf`/`maxnumf` + /`minimumf`/`maximumf` for floats. The default is `add`. Example: @@ -150,7 +150,7 @@ def Vector_ContractionOp : #contraction_trait = { indexing_maps = #contraction_accesses, iterator_types = ["reduction"], - kind = #vector.kind + kind = #vector.kind } %6 = vector.contract #contraction_trait %0, %1, %2 : vector<10xf32>, vector<10xf32> into f32 @@ -234,8 +234,8 @@ def Vector_ReductionOp : let description = [{ Reduces an 1-D vector "horizontally" into a scalar using the given operation: `add`/`mul`/`minsi`/`minui`/`maxsi`/`maxui`/`and`/`or`/`xor` for - integers, and `add`/`mul`/`minf`/`maxf`/`minimumf`/`maximumf` for floats. - Reductions also allow an optional fused accumulator. + integers, and `add`/`mul`/`minnumf`/`maxnumf`/`minimumf`/`maximumf` for + floats. Reductions also allow an optional fused accumulator. Note that these operations are restricted to 1-D vectors to remain close to the corresponding LLVM intrinsics: @@ -292,7 +292,7 @@ def Vector_MultiDimReductionOp : let description = [{ Reduces an n-D vector into an (n-k)-D vector (or a scalar when k == n) using the given operation: `add`/`mul`/`minsi`/`minui`/`maxsi`/`maxui` - /`and`/`or`/`xor` for integers, and `add`/`mul`/`minf`/`maxf`/`minimumf` + /`and`/`or`/`xor` for integers, and `add`/`mul`/`minnumf`/`maxnumf`/`minimumf` /`maximumf` for floats. Takes an initial accumulator operand. @@ -942,7 +942,8 @@ def Vector_OuterProductOp : An optional kind attribute may be specified to be: `add`/`mul`/`minsi` /`minui`/`maxsi`/`maxui`/`and`/`or`/`xor` for integers, and `add`/`mul` - /`minf`/`maxf`/`minimumf`/`maximumf` for floats. The default is `add`. + /`minnumf`/`maxnumf`/`minimumf`/`maximumf` for floats. The default is + `add`. Example: @@ -954,7 +955,7 @@ def Vector_OuterProductOp : vector<4xf32>, vector<8xf32>, vector<4x8xf32> return %3: vector<4x8xf32> - %4 = vector.outerproduct %0, %1, %2 {kind = #vector.kind}: + %4 = vector.outerproduct %0, %1, %2 {kind = #vector.kind}: vector<4xf32>, vector<8xf32>, vector<4x8xf32> return %3: vector<4x8xf32> @@ -2769,9 +2770,9 @@ def Vector_ScanOp : Performs an inclusive/exclusive scan on an n-D vector along a single dimension returning an n-D result vector using the given operation (`add`/`mul`/`minsi`/`minui`/`maxsi`/`maxui`/`and`/`or`/`xor` for - integers, and `add`/`mul`/`minf`/`maxf`/`minimumf`/`maximumf` for floats), - and a specified value for the initial value. The operator returns the - result of scan as well as the result of the last reduction in the scan. + integers, and `add`/`mul`/`minnumf`/`maxnumf`/`minimumf`/`maximumf` for + floats), and a specified value for the initial value. The operator returns + the result of scan as well as the result of the last reduction in the scan. Example: diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp index 0e978ca0a642..e60fe5cbd760 100644 --- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp +++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp @@ -72,13 +72,13 @@ convertReduxKind(gpu::AllReduceOperation mode) { return NVVM::ReduxKind::MIN; case gpu::AllReduceOperation::MINUI: return std::nullopt; - case gpu::AllReduceOperation::MINF: + case gpu::AllReduceOperation::MINNUMF: return NVVM::ReduxKind::MIN; case gpu::AllReduceOperation::MAXSI: return NVVM::ReduxKind::MAX; case gpu::AllReduceOperation::MAXUI: return std::nullopt; - case gpu::AllReduceOperation::MAXF: + case gpu::AllReduceOperation::MAXNUMF: return NVVM::ReduxKind::MAX; case gpu::AllReduceOperation::AND: return NVVM::ReduxKind::AND; diff --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp index 5a88ab351866..d383c16949f0 100644 --- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp +++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp @@ -529,7 +529,7 @@ static std::optional createGroupReduceOp(OpBuilder &builder, {ReduceType::MINSI, ElemType::Integer, &createGroupReduceOpImpl}, - {ReduceType::MINF, ElemType::Float, + {ReduceType::MINNUMF, ElemType::Float, &createGroupReduceOpImpl}, {ReduceType::MAXUI, ElemType::Integer, @@ -538,7 +538,7 @@ static std::optional createGroupReduceOp(OpBuilder &builder, {ReduceType::MAXSI, ElemType::Integer, &createGroupReduceOpImpl}, - {ReduceType::MAXF, ElemType::Float, + {ReduceType::MAXNUMF, ElemType::Float, &createGroupReduceOpImpl}, {ReduceType::MINIMUMF, ElemType::Float, diff --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp index cd5df0be740b..ebf7d9b65fa1 100644 --- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp +++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp @@ -818,10 +818,10 @@ public: result = createFPReductionComparisonOpLowering( rewriter, loc, llvmType, operand, acc, fmf); - } else if (kind == vector::CombiningKind::MINF) { + } else if (kind == vector::CombiningKind::MINNUMF) { result = createFPReductionComparisonOpLowering( rewriter, loc, llvmType, operand, acc, fmf); - } else if (kind == vector::CombiningKind::MAXF) { + } else if (kind == vector::CombiningKind::MAXNUMF) { result = createFPReductionComparisonOpLowering( rewriter, loc, llvmType, operand, acc, fmf); } else @@ -938,12 +938,12 @@ public: ReductionNeutralZero>( rewriter, loc, llvmType, operand, acc, maskOp.getMask()); break; - case vector::CombiningKind::MINF: + case vector::CombiningKind::MINNUMF: result = lowerPredicatedReductionWithStartValue( rewriter, loc, llvmType, operand, acc, maskOp.getMask()); break; - case vector::CombiningKind::MAXF: + case vector::CombiningKind::MAXNUMF: result = lowerPredicatedReductionWithStartValue( rewriter, loc, llvmType, operand, acc, maskOp.getMask()); diff --git a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp index e48f29a4f170..868a3521e7a0 100644 --- a/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp +++ b/mlir/lib/Conversion/VectorToSPIRV/VectorToSPIRV.cpp @@ -478,8 +478,8 @@ struct VectorReductionFloatMinMax final INT_OR_FLOAT_CASE(MAXIMUMF, SPIRVFMaxOp); INT_OR_FLOAT_CASE(MINIMUMF, SPIRVFMinOp); - INT_OR_FLOAT_CASE(MAXF, SPIRVFMaxOp); - INT_OR_FLOAT_CASE(MINF, SPIRVFMinOp); + INT_OR_FLOAT_CASE(MAXNUMF, SPIRVFMaxOp); + INT_OR_FLOAT_CASE(MINNUMF, SPIRVFMinOp); default: return rewriter.notifyMatchFailure(reduceOp, "not handled here"); diff --git a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp index d31903ea2011..7c3330f4c238 100644 --- a/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp +++ b/mlir/lib/Dialect/GPU/IR/GPUDialect.cpp @@ -492,7 +492,8 @@ static LogicalResult verifyReduceOpAndType(gpu::AllReduceOperation opName, Type resType) { using Kind = gpu::AllReduceOperation; if (llvm::is_contained( - {Kind::MINF, Kind::MAXF, Kind::MINIMUMF, Kind::MAXIMUMF}, opName)) { + {Kind::MINNUMF, Kind::MAXNUMF, Kind::MINIMUMF, Kind::MAXIMUMF}, + opName)) { if (!isa(resType)) return failure(); } diff --git a/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp b/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp index a9f903e696df..608d801ee9bb 100644 --- a/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp +++ b/mlir/lib/Dialect/GPU/Transforms/AllReduceLowering.cpp @@ -38,10 +38,10 @@ convertReductionKind(gpu::AllReduceOperation mode) { MAP_CASE(MUL); MAP_CASE(MINUI); MAP_CASE(MINSI); - MAP_CASE(MINF); + MAP_CASE(MINNUMF); MAP_CASE(MAXSI); MAP_CASE(MAXUI); - MAP_CASE(MAXF); + MAP_CASE(MAXNUMF); MAP_CASE(AND); MAP_CASE(OR); MAP_CASE(XOR); diff --git a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp index d956fd4fdd9b..be813df8e782 100644 --- a/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp +++ b/mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp @@ -2426,11 +2426,11 @@ bool isCastOfBlockArgument(Operation *op) { bool isSupportedPoolKind(vector::CombiningKind kind) { switch (kind) { case vector::CombiningKind::ADD: - case vector::CombiningKind::MAXF: + case vector::CombiningKind::MAXNUMF: case vector::CombiningKind::MAXIMUMF: case vector::CombiningKind::MAXSI: case vector::CombiningKind::MAXUI: - case vector::CombiningKind::MINF: + case vector::CombiningKind::MINNUMF: case vector::CombiningKind::MINIMUMF: case vector::CombiningKind::MINSI: case vector::CombiningKind::MINUI: diff --git a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp index 9f3e13c90a62..1d3200bf5c82 100644 --- a/mlir/lib/Dialect/Vector/IR/VectorOps.cpp +++ b/mlir/lib/Dialect/Vector/IR/VectorOps.cpp @@ -140,8 +140,8 @@ static bool isSupportedCombiningKind(CombiningKind combiningKind, case CombiningKind::OR: case CombiningKind::XOR: return elementType.isIntOrIndex(); - case CombiningKind::MINF: - case CombiningKind::MAXF: + case CombiningKind::MINNUMF: + case CombiningKind::MAXNUMF: case CombiningKind::MINIMUMF: case CombiningKind::MAXIMUMF: return llvm::isa(elementType); @@ -6233,7 +6233,7 @@ Value mlir::vector::makeArithReduction(OpBuilder &b, Location loc, assert(t1.isIntOrIndex() && tAcc.isIntOrIndex() && "expected int values"); result = b.createOrFold(loc, v1, acc); break; - case CombiningKind::MAXF: + case CombiningKind::MAXNUMF: assert(llvm::isa(t1) && llvm::isa(tAcc) && "expected float values"); result = b.createOrFold(loc, v1, acc, fastmath); @@ -6243,7 +6243,7 @@ Value mlir::vector::makeArithReduction(OpBuilder &b, Location loc, "expected float values"); result = b.createOrFold(loc, v1, acc, fastmath); break; - case CombiningKind::MINF: + case CombiningKind::MINNUMF: assert(llvm::isa(t1) && llvm::isa(tAcc) && "expected float values"); result = b.createOrFold(loc, v1, acc, fastmath); diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp index 41ff0c18fe62..6ff4c26763d2 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorContract.cpp @@ -139,7 +139,7 @@ createContractArithOp(Location loc, Value x, Value y, Value acc, Value mul; if (isInt) { - if (kind == CombiningKind::MINF || kind == CombiningKind::MAXF || + if (kind == CombiningKind::MINNUMF || kind == CombiningKind::MAXNUMF || kind == CombiningKind::MINIMUMF || kind == CombiningKind::MAXIMUMF) // Only valid for floating point types. return std::nullopt; diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp index c3ae7e74693c..c4e8ddcb13c3 100644 --- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp +++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorScan.cpp @@ -45,9 +45,9 @@ static bool isValidKind(bool isInt, vector::CombiningKind kind) { enum class KindType { FLOAT, INT, INVALID }; KindType type{KindType::INVALID}; switch (kind) { - case CombiningKind::MINF: + case CombiningKind::MINNUMF: case CombiningKind::MINIMUMF: - case CombiningKind::MAXF: + case CombiningKind::MAXNUMF: case CombiningKind::MAXIMUMF: type = KindType::FLOAT; break; diff --git a/mlir/test/Conversion/GPUToSPIRV/reductions.mlir b/mlir/test/Conversion/GPUToSPIRV/reductions.mlir index 636078181cae..af58f4173136 100644 --- a/mlir/test/Conversion/GPUToSPIRV/reductions.mlir +++ b/mlir/test/Conversion/GPUToSPIRV/reductions.mlir @@ -331,7 +331,7 @@ gpu.module @kernels { gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupFMin %[[ARG]] : f32 - %reduced = gpu.all_reduce minf %arg uniform {} : (f32) -> (f32) + %reduced = gpu.all_reduce minnumf %arg uniform {} : (f32) -> (f32) gpu.return } } @@ -351,7 +351,7 @@ gpu.module @kernels { gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %[[ARG]] : f32 - %reduced = gpu.all_reduce minf %arg {} : (f32) -> (f32) + %reduced = gpu.all_reduce minnumf %arg {} : (f32) -> (f32) gpu.return } } @@ -414,7 +414,7 @@ gpu.module @kernels { gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupFMin %[[ARG]] : f32 - %reduced = gpu.subgroup_reduce minf %arg uniform : (f32) -> (f32) + %reduced = gpu.subgroup_reduce minnumf %arg uniform : (f32) -> (f32) gpu.return } } @@ -434,7 +434,7 @@ gpu.module @kernels { gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformFMin "Subgroup" "Reduce" %[[ARG]] : f32 - %reduced = gpu.subgroup_reduce minf %arg : (f32) -> (f32) + %reduced = gpu.subgroup_reduce minnumf %arg : (f32) -> (f32) gpu.return } } @@ -498,7 +498,7 @@ gpu.module @kernels { gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupFMax %[[ARG]] : f32 - %reduced = gpu.all_reduce maxf %arg uniform {} : (f32) -> (f32) + %reduced = gpu.all_reduce maxnumf %arg uniform {} : (f32) -> (f32) gpu.return } } @@ -518,7 +518,7 @@ gpu.module @kernels { gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %[[ARG]] : f32 - %reduced = gpu.all_reduce maxf %arg {} : (f32) -> (f32) + %reduced = gpu.all_reduce maxnumf %arg {} : (f32) -> (f32) gpu.return } } @@ -582,7 +582,7 @@ gpu.module @kernels { gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupFMax %[[ARG]] : f32 - %reduced = gpu.subgroup_reduce maxf %arg uniform : (f32) -> (f32) + %reduced = gpu.subgroup_reduce maxnumf %arg uniform : (f32) -> (f32) gpu.return } } @@ -602,7 +602,7 @@ gpu.module @kernels { gpu.func @test(%arg : f32) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi} { // CHECK: %{{.*}} = spirv.GroupNonUniformFMax "Subgroup" "Reduce" %[[ARG]] : f32 - %reduced = gpu.subgroup_reduce maxf %arg : (f32) -> (f32) + %reduced = gpu.subgroup_reduce maxnumf %arg : (f32) -> (f32) gpu.return } } diff --git a/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir index 22463f57f24c..f98a05f8d17e 100644 --- a/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir @@ -97,7 +97,7 @@ func.func @masked_reduce_mul_f32(%arg0: vector<16xf32>, %mask : vector<16xi1>) - // ----- func.func @masked_reduce_minf_f32(%arg0: vector<16xf32>, %mask : vector<16xi1>) -> f32 { - %0 = vector.mask %mask { vector.reduction , %arg0 : vector<16xf32> into f32 } : vector<16xi1> -> f32 + %0 = vector.mask %mask { vector.reduction , %arg0 : vector<16xf32> into f32 } : vector<16xi1> -> f32 return %0 : f32 } @@ -111,7 +111,7 @@ func.func @masked_reduce_minf_f32(%arg0: vector<16xf32>, %mask : vector<16xi1>) // ----- func.func @masked_reduce_maxf_f32(%arg0: vector<16xf32>, %mask : vector<16xi1>) -> f32 { - %0 = vector.mask %mask { vector.reduction , %arg0 : vector<16xf32> into f32 } : vector<16xi1> -> f32 + %0 = vector.mask %mask { vector.reduction , %arg0 : vector<16xf32> into f32 } : vector<16xi1> -> f32 return %0 : f32 } diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir index 7353d16d79ce..d80392ebd87b 100644 --- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir +++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir @@ -443,7 +443,7 @@ func.func @masked_float_mul_outerprod(%arg0: vector<2xf32>, %arg1: f32, %arg2: v // ----- func.func @masked_float_max_outerprod(%arg0: vector<2xf32>, %arg1: f32, %arg2: vector<2xf32>, %m: vector<2xi1>) -> vector<2xf32> { - %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind} : vector<2xf32>, f32 } : vector<2xi1> -> vector<2xf32> + %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind} : vector<2xf32>, f32 } : vector<2xi1> -> vector<2xf32> return %0 : vector<2xf32> } @@ -456,7 +456,7 @@ func.func @masked_float_max_outerprod(%arg0: vector<2xf32>, %arg1: f32, %arg2: v // ----- func.func @masked_float_min_outerprod(%arg0: vector<2xf32>, %arg1: f32, %arg2: vector<2xf32>, %m: vector<2xi1>) -> vector<2xf32> { - %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind} : vector<2xf32>, f32 } : vector<2xi1> -> vector<2xf32> + %0 = vector.mask %m { vector.outerproduct %arg0, %arg1, %arg2 {kind = #vector.kind} : vector<2xf32>, f32 } : vector<2xi1> -> vector<2xf32> return %0 : vector<2xf32> } @@ -1379,7 +1379,7 @@ func.func @reduce_fminimum_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { // ----- func.func @reduce_fmax_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 + %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 return %0 : f32 } // CHECK-LABEL: @reduce_fmax_f32( @@ -1391,7 +1391,7 @@ func.func @reduce_fmax_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { // ----- func.func @reduce_fmin_f32(%arg0: vector<16xf32>, %arg1: f32) -> f32 { - %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 + %0 = vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 return %0 : f32 } // CHECK-LABEL: @reduce_fmin_f32( diff --git a/mlir/test/Dialect/GPU/all-reduce-maxf.mlir b/mlir/test/Dialect/GPU/all-reduce-maxf.mlir index b502e587637c..a7d61fdfbd16 100644 --- a/mlir/test/Dialect/GPU/all-reduce-maxf.mlir +++ b/mlir/test/Dialect/GPU/all-reduce-maxf.mlir @@ -175,7 +175,7 @@ gpu.module @kernels { // CHECK: cf.br ^bb42 // CHECK: ^bb42: // CHECK: gpu.barrier - %sum = gpu.all_reduce maxf %arg0 uniform {} : (f32) -> (f32) + %sum = gpu.all_reduce maxnumf %arg0 uniform {} : (f32) -> (f32) gpu.return } diff --git a/mlir/test/Dialect/GPU/invalid.mlir b/mlir/test/Dialect/GPU/invalid.mlir index 17faccbd091a..d8a40f89f80a 100644 --- a/mlir/test/Dialect/GPU/invalid.mlir +++ b/mlir/test/Dialect/GPU/invalid.mlir @@ -301,17 +301,17 @@ func.func @reduce_invalid_op_type_xor(%arg0 : f32) { // ----- -func.func @reduce_invalid_op_type_minf(%arg0 : i32) { - // expected-error@+1 {{`minf` reduction operation is not compatible with type 'i32'}} - %res = gpu.all_reduce minf %arg0 {} : (i32) -> (i32) +func.func @reduce_invalid_op_type_minnumf(%arg0 : i32) { + // expected-error@+1 {{`minnumf` reduction operation is not compatible with type 'i32'}} + %res = gpu.all_reduce minnumf %arg0 {} : (i32) -> (i32) return } // ----- -func.func @reduce_invalid_op_type_maxf(%arg0 : i32) { - // expected-error@+1 {{`maxf` reduction operation is not compatible with type 'i32'}} - %res = gpu.all_reduce maxf %arg0 {} : (i32) -> (i32) +func.func @reduce_invalid_op_type_maxnumf(%arg0 : i32) { + // expected-error@+1 {{`maxnumf` reduction operation is not compatible with type 'i32'}} + %res = gpu.all_reduce maxnumf %arg0 {} : (i32) -> (i32) return } @@ -349,9 +349,9 @@ func.func @subgroup_reduce_invalid_op_type_and(%arg0 : f32) { // ----- -func.func @subgroup_reduce_invalid_op_type_maxf(%arg0 : i32) { - // expected-error@+1 {{`maxf` reduction operation is not compatible with type 'i32'}} - %res = gpu.subgroup_reduce maxf %arg0 : (i32) -> (i32) +func.func @subgroup_reduce_invalid_op_type_maxnumf(%arg0 : i32) { + // expected-error@+1 {{`maxnumf` reduction operation is not compatible with type 'i32'}} + %res = gpu.subgroup_reduce maxnumf %arg0 : (i32) -> (i32) return } diff --git a/mlir/test/Dialect/Vector/break-down-vector-reduction.mlir b/mlir/test/Dialect/Vector/break-down-vector-reduction.mlir index 34234591b79c..8ef5383c9812 100644 --- a/mlir/test/Dialect/Vector/break-down-vector-reduction.mlir +++ b/mlir/test/Dialect/Vector/break-down-vector-reduction.mlir @@ -16,8 +16,8 @@ func.func @reduce_2x_f32(%arg0: vector<2xf32>) -> (f32, f32, f32, f32, f32, f32) { %0 = vector.reduction , %arg0 : vector<2xf32> into f32 %1 = vector.reduction , %arg0 : vector<2xf32> into f32 - %2 = vector.reduction , %arg0 : vector<2xf32> into f32 - %3 = vector.reduction , %arg0 : vector<2xf32> into f32 + %2 = vector.reduction , %arg0 : vector<2xf32> into f32 + %3 = vector.reduction , %arg0 : vector<2xf32> into f32 %4 = vector.reduction , %arg0 : vector<2xf32> into f32 %5 = vector.reduction , %arg0 : vector<2xf32> into f32 return %0, %1, %2, %3, %4, %5 : f32, f32, f32, f32, f32, f32 diff --git a/mlir/test/Dialect/Vector/ops.mlir b/mlir/test/Dialect/Vector/ops.mlir index 6cfddac94efd..c1ef8f2c30c0 100644 --- a/mlir/test/Dialect/Vector/ops.mlir +++ b/mlir/test/Dialect/Vector/ops.mlir @@ -366,13 +366,13 @@ func.func @contraction_extra_attrs(%arg0: vector<10xf32>, %arg1: vector<10xf32>) #contraction_to_scalar_max_trait = { indexing_maps = #contraction_to_scalar_max_accesses, iterator_types = ["reduction"], - kind = #vector.kind + kind = #vector.kind } // CHECK-LABEL: @contraction_to_scalar_with_max func.func @contraction_to_scalar_with_max(%arg0: vector<10xf32>, %arg1: vector<10xf32>) -> f32 { // CHECK: %[[C0:.*]] = arith.constant 0.000000e+00 : f32 %f0 = arith.constant 0.0: f32 - // CHECK: %[[X:.*]] = vector.contract {indexing_maps = [#{{.*}}, #{{.*}}, #{{.*}}], iterator_types = ["reduction"], kind = #vector.kind} %{{.*}}, %{{.*}}, %[[C0]] : vector<10xf32>, vector<10xf32> into f32 + // CHECK: %[[X:.*]] = vector.contract {indexing_maps = [#{{.*}}, #{{.*}}, #{{.*}}], iterator_types = ["reduction"], kind = #vector.kind} %{{.*}}, %{{.*}}, %[[C0]] : vector<10xf32>, vector<10xf32> into f32 %0 = vector.contract #contraction_to_scalar_max_trait %arg0, %arg1, %f0 : vector<10xf32>, vector<10xf32> into f32 // CHECK: return %[[X]] : f32 @@ -404,7 +404,7 @@ func.func @contraction_to_scalar_with_max(%arg0: vector<10xf32>, %arg1: vector<1 #contraction_trait2 = { indexing_maps = #contraction_accesses1, iterator_types = #iterator_types1, - kind = #vector.kind + kind = #vector.kind } // CHECK-LABEL: @contraction func.func @contraction(%arg0 : vector<7x8x16x15xf32>, %arg1 : vector<8x16x7x5xf32>, @@ -425,7 +425,7 @@ func.func @contraction(%arg0 : vector<7x8x16x15xf32>, %arg1 : vector<8x16x7x5xf3 %3 = vector.contract #contraction_trait1 %arg4, %arg5, %arg3 : vector<7x8x16x15xf16>, vector<8x16x7x5xf16> into vector<8x8x15x5xf32> // Test contraction with "max" instead of "add". - // CHECK: vector.contract {indexing_maps = [#{{.*}}, #{{.*}}, #{{.*}}], iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction", "reduction"], kind = #vector.kind} {{.*}}, {{.*}}, {{.*}} : vector<7x8x16x15xf32>, vector<8x16x7x5xf32> into vector<8x8x15x5xf32> + // CHECK: vector.contract {indexing_maps = [#{{.*}}, #{{.*}}, #{{.*}}], iterator_types = ["parallel", "parallel", "parallel", "parallel", "reduction", "reduction"], kind = #vector.kind} {{.*}}, {{.*}}, {{.*}} : vector<7x8x16x15xf32>, vector<8x16x7x5xf32> into vector<8x8x15x5xf32> %4 = vector.contract #contraction_trait2 %arg0, %arg1, %arg3 : vector<7x8x16x15xf32>, vector<8x16x7x5xf32> into vector<8x8x15x5xf32> return @@ -606,10 +606,10 @@ func.func @reduce_fp(%arg0: vector<16xf32>, %arg1: f32) -> f32 { vector.reduction , %arg0 : vector<16xf32> into f32 // CHECK: vector.reduction , %{{.*}}, %{{.*}} : vector<16xf32> into f32 vector.reduction , %arg0, %arg1 : vector<16xf32> into f32 - // CHECK: vector.reduction , %{{.*}} : vector<16xf32> into f32 - vector.reduction , %arg0 : vector<16xf32> into f32 - // CHECK: %[[X0:.*]] = vector.reduction , %{{.*}} : vector<16xf32> into f32 - %0 = vector.reduction , %arg0 : vector<16xf32> into f32 + // CHECK: vector.reduction , %{{.*}} : vector<16xf32> into f32 + vector.reduction , %arg0 : vector<16xf32> into f32 + // CHECK: %[[X0:.*]] = vector.reduction , %{{.*}} : vector<16xf32> into f32 + %0 = vector.reduction , %arg0 : vector<16xf32> into f32 // CHECK: vector.reduction , %{{.*}} : vector<16xf32> into f32 vector.reduction , %arg0 : vector<16xf32> into f32 // CHECK: %[[X1:.*]] = vector.reduction , %{{.*}} : vector<16xf32> into f32 @@ -1042,7 +1042,7 @@ func.func @contraction_masked_scalable(%A: vector<3x4xf32>, // CHECK-LABEL: func.func @fastmath( func.func @fastmath(%x: vector<42xf32>) -> f32 { - // CHECK: vector.reduction , %{{.*}} fastmath - %min = vector.reduction , %x fastmath : vector<42xf32> into f32 + // CHECK: vector.reduction , %{{.*}} fastmath + %min = vector.reduction , %x fastmath : vector<42xf32> into f32 return %min: f32 } diff --git a/mlir/test/Dialect/Vector/vector-contract-to-outerproduct-matvec-transforms.mlir b/mlir/test/Dialect/Vector/vector-contract-to-outerproduct-matvec-transforms.mlir index 8fed1f8fb341..c09a4d569638 100644 --- a/mlir/test/Dialect/Vector/vector-contract-to-outerproduct-matvec-transforms.mlir +++ b/mlir/test/Dialect/Vector/vector-contract-to-outerproduct-matvec-transforms.mlir @@ -25,7 +25,7 @@ #matvecmax_trait = { indexing_maps = #matvec_accesses_1, iterator_types = ["parallel", "reduction"], - kind = #vector.kind + kind = #vector.kind } #matvec_accesses_2 = [ @@ -175,10 +175,10 @@ func.func @masked_matvec_mk_k_m_scalable_parallel_dim(%A: vector<[2]x3xf32>, // CHECK: %[[T3:.*]] = vector.transpose %[[A]], [1, 0] : vector<2x2xf32> to vector<2x2xf32> // CHECK: %[[T4:.*]] = vector.extract %[[T3]][0] : vector<2xf32> from vector<2x2xf32> // CHECK: %[[T5:.*]] = vector.extract %[[X]][0] : f32 from vector<2xf32> -// CHECK: %[[T6:.*]] = vector.outerproduct %[[T4]], %[[T5]], %[[B]] {kind = #vector.kind} : vector<2xf32>, f32 +// CHECK: %[[T6:.*]] = vector.outerproduct %[[T4]], %[[T5]], %[[B]] {kind = #vector.kind} : vector<2xf32>, f32 // CHECK: %[[T7:.*]] = vector.extract %[[T3]][1] : vector<2xf32> from vector<2x2xf32> // CHECK: %[[T8:.*]] = vector.extract %[[X]][1] : f32 from vector<2xf32> -// CHECK: %[[T9:.*]] = vector.outerproduct %[[T7]], %[[T8]], %[[T6]] {kind = #vector.kind} : vector<2xf32>, f32 +// CHECK: %[[T9:.*]] = vector.outerproduct %[[T7]], %[[T8]], %[[T6]] {kind = #vector.kind} : vector<2xf32>, f32 func.func @matvec_mk_k_m_max(%A: vector<2x2xf32>, %x: vector<2xf32>, %b: vector<2xf32>) -> vector<2xf32> { @@ -193,13 +193,13 @@ func.func @matvec_mk_k_m_max(%A: vector<2x2xf32>, // CHECK-SAME: %[[IN_MASK:.*]]: vector<2x3xi1>) -> vector<2xf32> // CHECK: %[[T_MASK:.*]] = vector.transpose %[[IN_MASK]], [1, 0] : vector<2x3xi1> to vector<3x2xi1> // CHECK: %[[MASK0:.*]] = vector.extract %[[T_MASK]][0] : vector<2xi1> from vector<3x2xi1> -// CHECK: vector.mask %[[MASK0]] { vector.outerproduct {{.*}} {kind = #vector.kind} : vector<2xf32>, f32 } : vector<2xi1> -> vector<2xf32> +// CHECK: vector.mask %[[MASK0]] { vector.outerproduct {{.*}} {kind = #vector.kind} : vector<2xf32>, f32 } : vector<2xi1> -> vector<2xf32> // CHECK: %[[MASK1:.*]] = vector.extract %[[T_MASK]][1] : vector<2xi1> from vector<3x2xi1> -// CHECK: vector.mask %[[MASK1]] { vector.outerproduct {{.*}} {kind = #vector.kind} : vector<2xf32>, f32 } : vector<2xi1> -> vector<2xf32> +// CHECK: vector.mask %[[MASK1]] { vector.outerproduct {{.*}} {kind = #vector.kind} : vector<2xf32>, f32 } : vector<2xi1> -> vector<2xf32> // CHECK: %[[MASK2:.*]] = vector.extract %[[T_MASK]][2] : vector<2xi1> from vector<3x2xi1> -// CHECK: vector.mask %[[MASK2]] { vector.outerproduct {{.*}} {kind = #vector.kind} : vector<2xf32>, f32 } : vector<2xi1> -> vector<2xf32> +// CHECK: vector.mask %[[MASK2]] { vector.outerproduct {{.*}} {kind = #vector.kind} : vector<2xf32>, f32 } : vector<2xi1> -> vector<2xf32> func.func @masked_matvec_mk_k_m_max(%A: vector<2x3xf32>, %x: vector<3xf32>, %b: vector<2xf32>, @@ -216,13 +216,13 @@ func.func @masked_matvec_mk_k_m_max(%A: vector<2x3xf32>, // CHECK-SAME: %[[IN_MASK:.*]]: vector<[2]x3xi1>) -> vector<[2]xf32> // CHECK: %[[T_MASK:.*]] = vector.transpose %[[IN_MASK]], [1, 0] : vector<[2]x3xi1> to vector<3x[2]xi1> // CHECK: %[[MASK0:.*]] = vector.extract %[[T_MASK]][0] : vector<[2]xi1> from vector<3x[2]xi1> -// CHECK: vector.mask %[[MASK0]] { vector.outerproduct {{.*}} {kind = #vector.kind} : vector<[2]xf32>, f32 } : vector<[2]xi1> -> vector<[2]xf32> +// CHECK: vector.mask %[[MASK0]] { vector.outerproduct {{.*}} {kind = #vector.kind} : vector<[2]xf32>, f32 } : vector<[2]xi1> -> vector<[2]xf32> // CHECK: %[[MASK1:.*]] = vector.extract %[[T_MASK]][1] : vector<[2]xi1> from vector<3x[2]xi1> -// CHECK: vector.mask %[[MASK1]] { vector.outerproduct {{.*}} {kind = #vector.kind} : vector<[2]xf32>, f32 } : vector<[2]xi1> -> vector<[2]xf32> +// CHECK: vector.mask %[[MASK1]] { vector.outerproduct {{.*}} {kind = #vector.kind} : vector<[2]xf32>, f32 } : vector<[2]xi1> -> vector<[2]xf32> // CHECK: %[[MASK2:.*]] = vector.extract %[[T_MASK]][2] : vector<[2]xi1> from vector<3x[2]xi1> -// CHECK: vector.mask %[[MASK2]] { vector.outerproduct {{.*}} {kind = #vector.kind} : vector<[2]xf32>, f32 } : vector<[2]xi1> -> vector<[2]xf32> +// CHECK: vector.mask %[[MASK2]] { vector.outerproduct {{.*}} {kind = #vector.kind} : vector<[2]xf32>, f32 } : vector<[2]xi1> -> vector<[2]xf32> func.func @masked_matvec_mk_k_m_max_scalable_parallel_dim(%A: vector<[2]x3xf32>, %x: vector<3xf32>, %b: vector<[2]xf32>, diff --git a/mlir/test/Dialect/Vector/vector-multi-reduction-outer-lowering.mlir b/mlir/test/Dialect/Vector/vector-multi-reduction-outer-lowering.mlir index 614a97fe4d67..308baa97af9a 100644 --- a/mlir/test/Dialect/Vector/vector-multi-reduction-outer-lowering.mlir +++ b/mlir/test/Dialect/Vector/vector-multi-reduction-outer-lowering.mlir @@ -19,7 +19,7 @@ func.func @vector_multi_reduction(%arg0: vector<2x4xf32>, %acc: vector<2xf32>) - // CHECK: return %[[RESULT_VEC]] : vector<2xf32> func.func @vector_multi_reduction_min(%arg0: vector<2x4xf32>, %acc: vector<2xf32>) -> vector<2xf32> { - %0 = vector.multi_reduction , %arg0, %acc [1] : vector<2x4xf32> to vector<2xf32> + %0 = vector.multi_reduction , %arg0, %acc [1] : vector<2x4xf32> to vector<2xf32> return %0 : vector<2xf32> } @@ -37,7 +37,7 @@ func.func @vector_multi_reduction_min(%arg0: vector<2x4xf32>, %acc: vector<2xf32 // CHECK: return %[[RESULT_VEC]] : vector<2xf32> func.func @vector_multi_reduction_max(%arg0: vector<2x4xf32>, %acc: vector<2xf32>) -> vector<2xf32> { - %0 = vector.multi_reduction , %arg0, %acc [1] : vector<2x4xf32> to vector<2xf32> + %0 = vector.multi_reduction , %arg0, %acc [1] : vector<2x4xf32> to vector<2xf32> return %0 : vector<2xf32> } @@ -175,7 +175,7 @@ func.func @vector_multi_reduction_parallel_middle(%arg0: vector<3x4x5xf32>, %acc // `InnerOuterDimReductionConversion` on this function results in an // infinite loop. So just check that some value is returned. func.func @vector_reduction_1D(%arg0 : vector<2xf32>, %acc: f32) -> f32 { - %0 = vector.multi_reduction #vector.kind, %arg0, %acc [0] : vector<2xf32> to f32 + %0 = vector.multi_reduction #vector.kind, %arg0, %acc [0] : vector<2xf32> to f32 return %0 : f32 } // CHECK-LABEL: func @vector_reduction_1D diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir index ce160880a009..298c382eac72 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32-reassoc.mlir @@ -33,10 +33,10 @@ func.func @entry() { %3 = vector.reduction , %v2 : vector<64xf32> into f32 vector.print %3 : f32 // CHECK: 3 - %4 = vector.reduction , %v2 : vector<64xf32> into f32 + %4 = vector.reduction , %v2 : vector<64xf32> into f32 vector.print %4 : f32 // CHECK: 1 - %5 = vector.reduction , %v2 : vector<64xf32> into f32 + %5 = vector.reduction , %v2 : vector<64xf32> into f32 vector.print %5 : f32 // CHECK: 3 diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir index 56d987ba2e22..aac679c5f5bc 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f32.mlir @@ -45,10 +45,10 @@ func.func @entry() { %3 = vector.reduction , %v9 : vector<10xf32> into f32 vector.print %3 : f32 // CHECK: 5 - %4 = vector.reduction , %v9 : vector<10xf32> into f32 + %4 = vector.reduction , %v9 : vector<10xf32> into f32 vector.print %4 : f32 // CHECK: -16 - %5 = vector.reduction , %v9 : vector<10xf32> into f32 + %5 = vector.reduction , %v9 : vector<10xf32> into f32 vector.print %5 : f32 // CHECK: 5 diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir index 711144b67485..3abe18252c0d 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64-reassoc.mlir @@ -33,10 +33,10 @@ func.func @entry() { %3 = vector.reduction , %v2 : vector<64xf64> into f64 vector.print %3 : f64 // CHECK: 3 - %4 = vector.reduction , %v2 : vector<64xf64> into f64 + %4 = vector.reduction , %v2 : vector<64xf64> into f64 vector.print %4 : f64 // CHECK: 1 - %5 = vector.reduction , %v2 : vector<64xf64> into f64 + %5 = vector.reduction , %v2 : vector<64xf64> into f64 vector.print %5 : f64 // CHECK: 3 diff --git a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir index 41d1bbcb731f..d5ae64c058d4 100644 --- a/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir +++ b/mlir/test/Integration/Dialect/Vector/CPU/test-reductions-f64.mlir @@ -45,10 +45,10 @@ func.func @entry() { %3 = vector.reduction , %v9 : vector<10xf64> into f64 vector.print %3 : f64 // CHECK: 5 - %4 = vector.reduction , %v9 : vector<10xf64> into f64 + %4 = vector.reduction , %v9 : vector<10xf64> into f64 vector.print %4 : f64 // CHECK: -16 - %5 = vector.reduction , %v9 : vector<10xf64> into f64 + %5 = vector.reduction , %v9 : vector<10xf64> into f64 vector.print %5 : f64 // CHECK: 5