diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp index 0cd62e6db768..73c6f14dd28f 100644 --- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp +++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp @@ -1145,6 +1145,20 @@ struct LoadOpConversion : public FIROpConversion { } }; +/// Lower `fir.no_reassoc` to LLVM IR dialect. +/// TODO: how do we want to enforce this in LLVM-IR? Can we manipulate the fast +/// math flags? +struct NoReassocOpConversion : public FIROpConversion { + using FIROpConversion::FIROpConversion; + + mlir::LogicalResult + matchAndRewrite(fir::NoReassocOp noreassoc, OpAdaptor adaptor, + mlir::ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOp(noreassoc, adaptor.getOperands()[0]); + return success(); + } +}; + /// Lower `fir.select_type` to LLVM IR dialect. struct SelectTypeOpConversion : public FIROpConversion { using FIROpConversion::FIROpConversion; @@ -2166,13 +2180,13 @@ public: FirEndOpConversion, HasValueOpConversion, GenTypeDescOpConversion, GlobalLenOpConversion, GlobalOpConversion, InsertOnRangeOpConversion, InsertValueOpConversion, IsPresentOpConversion, LoadOpConversion, - NegcOpConversion, MulcOpConversion, SelectCaseOpConversion, - SelectOpConversion, SelectRankOpConversion, SelectTypeOpConversion, - ShapeOpConversion, ShapeShiftOpConversion, ShiftOpConversion, - SliceOpConversion, StoreOpConversion, StringLitOpConversion, - SubcOpConversion, UnboxCharOpConversion, UnboxProcOpConversion, - UndefOpConversion, UnreachableOpConversion, ZeroOpConversion>( - typeConverter); + NegcOpConversion, NoReassocOpConversion, MulcOpConversion, + SelectCaseOpConversion, SelectOpConversion, SelectRankOpConversion, + SelectTypeOpConversion, ShapeOpConversion, ShapeShiftOpConversion, + ShiftOpConversion, SliceOpConversion, StoreOpConversion, + StringLitOpConversion, SubcOpConversion, UnboxCharOpConversion, + UnboxProcOpConversion, UndefOpConversion, UnreachableOpConversion, + ZeroOpConversion>(typeConverter); mlir::populateStdToLLVMConversionPatterns(typeConverter, pattern); mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, pattern); diff --git a/flang/test/Fir/convert-to-llvm.fir b/flang/test/Fir/convert-to-llvm.fir index af43418e3c31..33b7941a4581 100644 --- a/flang/test/Fir/convert-to-llvm.fir +++ b/flang/test/Fir/convert-to-llvm.fir @@ -1569,3 +1569,23 @@ func @field_index_dynamic_size() -> () { // CHECK-NEXT: %{{.*}} = llvm.call @custom_typeP.field_1.offset() {field = 0 : i64} : () -> i32 // CHECK-NEXT: %{{.*}} = llvm.call @custom_typeP.field_2.offset() {field = 1 : i64} : () -> i32 // CHECK-NEXT: llvm.return + +// ----- + +// Check `fir.no_reassoc` conversion to LLVM IR dialect + +func @no_reassoc(%arg0: !fir.ref) { + %0 = fir.alloca i32 + %1 = fir.load %arg0 : !fir.ref + %2 = fir.no_reassoc %1 : i32 + fir.store %2 to %0 : !fir.ref + return +} + +// CHECK-LABEL: llvm.func @no_reassoc( +// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr) { +// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64 +// CHECK: %[[ALLOC:.*]] = llvm.alloca %[[C1]] x i32 {in_type = i32, operand_segment_sizes = dense<0> : vector<2xi32>} : (i64) -> !llvm.ptr +// CHECK: %[[LOAD:.*]] = llvm.load %[[ARG0]] : !llvm.ptr +// CHECK: llvm.store %[[LOAD]], %[[ALLOC]] : !llvm.ptr +// CHECK: llvm.return