[mlir][NFC] Use options struct in ExecutionEngine::create

Its number of optional parameters has grown too large,
which makes adding new optional parameters quite a chore.

Fix this by using an options struct.

Reviewed By: mehdi_amini

Differential Revision: https://reviews.llvm.org/D120380
This commit is contained in:
Emilio Cota 2022-02-22 22:27:54 -05:00
parent b1863d8245
commit a7db3c611b
6 changed files with 63 additions and 59 deletions

View File

@ -236,8 +236,9 @@ int runJit(mlir::ModuleOp module) {
// Create an MLIR execution engine. The execution engine eagerly JIT-compiles
// the module.
auto maybeEngine = mlir::ExecutionEngine::create(
module, /*llvmModuleBuilder=*/nullptr, optPipeline);
mlir::ExecutionEngineOptions engineOptions;
engineOptions.transformer = optPipeline;
auto maybeEngine = mlir::ExecutionEngine::create(module, engineOptions);
assert(maybeEngine && "failed to construct an execution engine");
auto &engine = maybeEngine.get();

View File

@ -237,8 +237,9 @@ int runJit(mlir::ModuleOp module) {
// Create an MLIR execution engine. The execution engine eagerly JIT-compiles
// the module.
auto maybeEngine = mlir::ExecutionEngine::create(
module, /*llvmModuleBuilder=*/nullptr, optPipeline);
mlir::ExecutionEngineOptions engineOptions;
engineOptions.transformer = optPipeline;
auto maybeEngine = mlir::ExecutionEngine::create(module, engineOptions);
assert(maybeEngine && "failed to construct an execution engine");
auto &engine = maybeEngine.get();

View File

@ -48,6 +48,39 @@ private:
llvm::StringMap<std::unique_ptr<llvm::MemoryBuffer>> cachedObjects;
};
struct ExecutionEngineOptions {
/// If `llvmModuleBuilder` is provided, it will be used to create LLVM module
/// from the given MLIR module. Otherwise, a default `translateModuleToLLVMIR`
/// function will be used to translate MLIR module to LLVM IR.
llvm::function_ref<std::unique_ptr<llvm::Module>(ModuleOp,
llvm::LLVMContext &)>
llvmModuleBuilder = nullptr;
/// If `transformer` is provided, it will be called on the LLVM module during
/// JIT-compilation and can be used, e.g., for reporting or optimization.
llvm::function_ref<llvm::Error(llvm::Module *)> transformer = {};
/// `jitCodeGenOptLevel`, when provided, is used as the optimization level for
/// target code generation.
Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel = llvm::None;
/// If `sharedLibPaths` are provided, the underlying JIT-compilation will
/// open and link the shared libraries for symbol resolution.
ArrayRef<StringRef> sharedLibPaths = {};
/// If `enableObjectCache` is set, the JIT compiler will create one to store
/// the object generated for the given module.
bool enableObjectCache = true;
/// If enable `enableGDBNotificationListener` is set, the JIT compiler will
/// notify the llvm's global GDB notification listener.
bool enableGDBNotificationListener = true;
/// If `enablePerfNotificationListener` is set, the JIT compiler will notify
/// the llvm's global Perf notification listener.
bool enablePerfNotificationListener = true;
};
/// JIT-backed execution engine for MLIR modules. Assumes the module can be
/// converted to LLVM IR. For each function, creates a wrapper function with
/// the fixed interface
@ -64,38 +97,8 @@ public:
bool enablePerfNotificationListener);
/// Creates an execution engine for the given module.
///
/// If `llvmModuleBuilder` is provided, it will be used to create LLVM module
/// from the given MLIR module. Otherwise, a default `translateModuleToLLVMIR`
/// function will be used to translate MLIR module to LLVM IR.
///
/// If `transformer` is provided, it will be called on the LLVM module during
/// JIT-compilation and can be used, e.g., for reporting or optimization.
///
/// `jitCodeGenOptLevel`, when provided, is used as the optimization level for
/// target code generation.
///
/// If `sharedLibPaths` are provided, the underlying JIT-compilation will
/// open and link the shared libraries for symbol resolution.
///
/// If `enableObjectCache` is set, the JIT compiler will create one to store
/// the object generated for the given module.
///
/// If enable `enableGDBNotificationListener` is set, the JIT compiler will
/// notify the llvm's global GDB notification listener.
///
/// If `enablePerfNotificationListener` is set, the JIT compiler will notify
/// the llvm's global Perf notification listener.
static llvm::Expected<std::unique_ptr<ExecutionEngine>>
create(ModuleOp m,
llvm::function_ref<std::unique_ptr<llvm::Module>(ModuleOp,
llvm::LLVMContext &)>
llvmModuleBuilder = nullptr,
llvm::function_ref<llvm::Error(llvm::Module *)> transformer = {},
Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel = llvm::None,
ArrayRef<StringRef> sharedLibPaths = {}, bool enableObjectCache = true,
bool enableGDBNotificationListener = true,
bool enablePerfNotificationListener = true);
create(ModuleOp m, const ExecutionEngineOptions &options = {});
/// Looks up a packed-argument function wrapping the function with the given
/// name and returns a pointer to it. Propagates errors in case of failure.

View File

@ -50,9 +50,11 @@ mlirExecutionEngineCreate(MlirModule op, int optLevel, int numPaths,
auto llvmOptLevel = static_cast<llvm::CodeGenOpt::Level>(optLevel);
auto transformer = mlir::makeLLVMPassesTransformer(
/*passes=*/{}, llvmOptLevel, /*targetMachine=*/tmOrError->get());
auto jitOrError =
ExecutionEngine::create(unwrap(op), /*llvmModuleBuilder=*/{}, transformer,
llvmOptLevel, libPaths);
ExecutionEngineOptions jitOptions;
jitOptions.transformer = transformer;
jitOptions.jitCodeGenOptLevel = llvmOptLevel;
jitOptions.sharedLibPaths = libPaths;
auto jitOrError = ExecutionEngine::create(unwrap(op), jitOptions);
if (!jitOrError) {
consumeError(jitOrError.takeError());
return MlirExecutionEngine{nullptr};

View File

@ -227,22 +227,16 @@ ExecutionEngine::ExecutionEngine(bool enableObjectCache,
}
}
Expected<std::unique_ptr<ExecutionEngine>> ExecutionEngine::create(
ModuleOp m,
llvm::function_ref<std::unique_ptr<llvm::Module>(ModuleOp,
llvm::LLVMContext &)>
llvmModuleBuilder,
llvm::function_ref<Error(llvm::Module *)> transformer,
Optional<llvm::CodeGenOpt::Level> jitCodeGenOptLevel,
ArrayRef<StringRef> sharedLibPaths, bool enableObjectCache,
bool enableGDBNotificationListener, bool enablePerfNotificationListener) {
Expected<std::unique_ptr<ExecutionEngine>>
ExecutionEngine::create(ModuleOp m, const ExecutionEngineOptions &options) {
auto engine = std::make_unique<ExecutionEngine>(
enableObjectCache, enableGDBNotificationListener,
enablePerfNotificationListener);
options.enableObjectCache, options.enableGDBNotificationListener,
options.enablePerfNotificationListener);
std::unique_ptr<llvm::LLVMContext> ctx(new llvm::LLVMContext);
auto llvmModule = llvmModuleBuilder ? llvmModuleBuilder(m, *ctx)
: translateModuleToLLVMIR(m, *ctx);
auto llvmModule = options.llvmModuleBuilder
? options.llvmModuleBuilder(m, *ctx)
: translateModuleToLLVMIR(m, *ctx);
if (!llvmModule)
return makeStringError("could not convert to LLVM IR");
// FIXME: the triple should be passed to the translation or dialect conversion
@ -276,7 +270,7 @@ Expected<std::unique_ptr<ExecutionEngine>> ExecutionEngine::create(
}
// Resolve symbols from shared libraries.
for (auto libPath : sharedLibPaths) {
for (auto libPath : options.sharedLibPaths) {
auto mb = llvm::MemoryBuffer::getFile(libPath);
if (!mb) {
errs() << "Failed to create MemoryBuffer for: " << libPath
@ -302,8 +296,8 @@ Expected<std::unique_ptr<ExecutionEngine>> ExecutionEngine::create(
// LLJITWithObjectCache example.
auto compileFunctionCreator = [&](JITTargetMachineBuilder jtmb)
-> Expected<std::unique_ptr<IRCompileLayer::IRCompiler>> {
if (jitCodeGenOptLevel)
jtmb.setCodeGenOptLevel(jitCodeGenOptLevel.getValue());
if (options.jitCodeGenOptLevel)
jtmb.setCodeGenOptLevel(options.jitCodeGenOptLevel.getValue());
auto tm = jtmb.createTargetMachine();
if (!tm)
return tm.takeError();
@ -320,9 +314,9 @@ Expected<std::unique_ptr<ExecutionEngine>> ExecutionEngine::create(
// Add a ThreadSafemodule to the engine and return.
ThreadSafeModule tsm(std::move(llvmModule), std::move(ctx));
if (transformer)
if (options.transformer)
cantFail(tsm.withModuleDo(
[&](llvm::Module &module) { return transformer(&module); }));
[&](llvm::Module &module) { return options.transformer(&module); }));
cantFail(jit->addIRModule(std::move(tsm)));
engine->jit = std::move(jit);

View File

@ -207,9 +207,12 @@ static Error compileAndExecute(Options &options, ModuleOp module,
return symbolMap;
};
auto expectedEngine = mlir::ExecutionEngine::create(
module, config.llvmModuleBuilder, config.transformer, jitCodeGenOptLevel,
executionEngineLibs);
mlir::ExecutionEngineOptions engineOptions;
engineOptions.llvmModuleBuilder = config.llvmModuleBuilder;
engineOptions.transformer = config.transformer;
engineOptions.jitCodeGenOptLevel = jitCodeGenOptLevel;
engineOptions.sharedLibPaths = executionEngineLibs;
auto expectedEngine = mlir::ExecutionEngine::create(module, engineOptions);
if (!expectedEngine)
return expectedEngine.takeError();