/** * Copyright (c) 2021-2022 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "optimize_bytecode.h" #include "assembler/assembly-emitter.h" #include "assembler/extensions/extensions.h" #include "bytecode_instruction.h" #include "bytecodeopt_options.h" #include "codegen.h" #include "common.h" #include "compiler/optimizer/ir/constants.h" #include "compiler/optimizer/ir_builder/ir_builder.h" #include "compiler/optimizer/ir_builder/pbc_iterator.h" #include "compiler/optimizer/optimizations/cleanup.h" #include "compiler/optimizer/optimizations/lowering.h" #include "compiler/optimizer/optimizations/move_constants.h" #include "compiler/optimizer/optimizations/regalloc/reg_alloc.h" #include "compiler/optimizer/optimizations/vn.h" #include "libpandabase/mem/arena_allocator.h" #include "libpandabase/mem/pool_manager.h" #include "libpandafile/class_data_accessor.h" #include "libpandafile/class_data_accessor-inl.h" #include "libpandafile/method_data_accessor.h" #include "reg_acc_alloc.h" #include "reg_encoder.h" #include "runtime_adapter.h" #include namespace panda::bytecodeopt { // NOLINTNEXTLINE(fuchsia-statically-constructed-objects) panda::bytecodeopt::Options options(""); template constexpr void RunOpts(compiler::Graph *graph) { graph->RunPass(); graph->RunPass(); } template constexpr void RunOpts(compiler::Graph *graph) { RunOpts(graph); RunOpts(graph); } bool RunOptimizations(compiler::Graph *graph, BytecodeOptIrInterface *iface) { constexpr int OPT_LEVEL_0 = 0; if (panda::bytecodeopt::options.GetOptLevel() == OPT_LEVEL_0) { return false; } graph->RunPass(); ASSERT(graph->IsDynamicMethod()); RunOpts(graph); // this pass should run just before register allocator graph->RunPass(); graph->RunPass(); graph->RunPass(); if (!RegAlloc(graph)) { LOG(ERROR, BYTECODE_OPTIMIZER) << "Failed compiler::RegAlloc"; return false; } graph->RunPass(); if (!graph->RunPass()) { LOG(ERROR, BYTECODE_OPTIMIZER) << "Failed RegEncoder"; return false; } return true; } void BuildMapFromPcToIns(pandasm::Function &function, BytecodeOptIrInterface &ir_interface, const compiler::Graph *graph, compiler::RuntimeInterface::MethodPtr method_ptr) { function.local_variable_debug.clear(); auto *pc_ins_map = ir_interface.GetPcInsMap(); pc_ins_map->reserve(function.ins.size()); auto instructions_buf = graph->GetRuntime()->GetMethodCode(method_ptr); compiler::BytecodeInstructions instructions(instructions_buf, graph->GetRuntime()->GetMethodCodeSize(method_ptr)); size_t idx = 0; for (auto insn : instructions) { pandasm::Ins &ins = function.ins[idx++]; pc_ins_map->emplace(instructions.GetPc(insn), &ins); if (idx >= function.ins.size()) { break; } } } static void ColumnNumberPropagate(pandasm::Function *function) { auto &ins_vec = function->ins; uint32_t cn = compiler::INVALID_COLUMN_NUM; // handle the instructions that are at the beginning of code but do not have column number size_t k = 0; while (k < ins_vec.size() && cn == compiler::INVALID_COLUMN_NUM) { cn = ins_vec[k++].ins_debug.column_number; } if (cn == compiler::INVALID_COLUMN_NUM) { LOG(DEBUG, BYTECODE_OPTIMIZER) << "Failed ColumnNumberPropagate: All insts have invalid column number"; return; } for (size_t j = 0; j < k - 1; j++) { ins_vec[j].ins_debug.SetColumnNumber(cn); } // handle other instructions that do not have column number for (; k < ins_vec.size(); k++) { if (ins_vec[k].ins_debug.column_number != compiler::INVALID_COLUMN_NUM) { cn = ins_vec[k].ins_debug.column_number; } else { ins_vec[k].ins_debug.SetColumnNumber(cn); } } } static void LineNumberPropagate(pandasm::Function *function) { if (function == nullptr || function->ins.empty()) { return; } size_t ln = 0; auto &ins_vec = function->ins; // handle the instructions that are at the beginning of code but do not have line number size_t i = 0; while (i < ins_vec.size() && ln == 0) { ln = ins_vec[i++].ins_debug.line_number; } if (ln == 0) { LOG(DEBUG, BYTECODE_OPTIMIZER) << "Failed LineNumberPropagate: All insts have invalid line number"; return; } for (size_t j = 0; j < i - 1; j++) { ins_vec[j].ins_debug.SetLineNumber(ln); } // handle other instructions that do not have line number for (; i < ins_vec.size(); i++) { if (ins_vec[i].ins_debug.line_number != 0) { ln = ins_vec[i].ins_debug.line_number; } else { ins_vec[i].ins_debug.SetLineNumber(ln); } } } static void DebugInfoPropagate(pandasm::Function &function, const compiler::Graph *graph, BytecodeOptIrInterface &ir_interface) { LineNumberPropagate(&function); if (graph->IsDynamicMethod()) { ColumnNumberPropagate(&function); } ir_interface.ClearPcInsMap(); } static bool SkipFunction(const pandasm::Function &function, const std::string &func_name) { if (panda::bytecodeopt::options.WasSetMethodRegex()) { static std::regex rgx(panda::bytecodeopt::options.GetMethodRegex()); if (!std::regex_match(func_name, rgx)) { LOG(INFO, BYTECODE_OPTIMIZER) << "Skip Function " << func_name << ": Function's name doesn't match regex"; return true; } } if (panda::bytecodeopt::options.IsSkipMethodsWithEh() && !function.catch_blocks.empty()) { LOG(INFO, BYTECODE_OPTIMIZER) << "Was not optimized " << func_name << ": Function has catch blocks"; return true; } if ((function.regs_num + function.GetParamsNum()) > compiler::VIRTUAL_FRAME_SIZE) { LOG(ERROR, BYTECODE_OPTIMIZER) << "Unable to optimize " << func_name << ": Function frame size is larger than allowed one"; return true; } return false; } static void SetCompilerOptions(bool is_dynamic) { compiler::options.SetCompilerUseSafepoint(false); compiler::options.SetCompilerSupportInitObjectInst(true); if (!compiler::options.WasSetCompilerMaxBytecodeSize()) { compiler::options.SetCompilerMaxBytecodeSize(MAX_BYTECODE_SIZE); } if (is_dynamic) { panda::bytecodeopt::options.SetSkipMethodsWithEh(true); } } bool OptimizeFunction(pandasm::Program *prog, const pandasm::AsmEmitter::PandaFileToPandaAsmMaps *maps, const panda_file::MethodDataAccessor &mda, bool is_dynamic) { ArenaAllocator allocator {SpaceType::SPACE_TYPE_COMPILER}; ArenaAllocator local_allocator {SpaceType::SPACE_TYPE_COMPILER, nullptr, true}; SetCompilerOptions(is_dynamic); auto ir_interface = BytecodeOptIrInterface(maps, prog); auto func_name = ir_interface.GetMethodIdByOffset(mda.GetMethodId().GetOffset()); LOG(INFO, BYTECODE_OPTIMIZER) << "Optimizing function: " << func_name; auto it = prog->function_table.find(func_name); if (it == prog->function_table.end()) { LOG(ERROR, BYTECODE_OPTIMIZER) << "Cannot find function: " << func_name; return false; } auto method_ptr = reinterpret_cast(mda.GetMethodId().GetOffset()); panda::BytecodeOptimizerRuntimeAdapter adapter(mda.GetPandaFile()); auto graph = allocator.New(&allocator, &local_allocator, Arch::NONE, method_ptr, &adapter, false, nullptr, is_dynamic, true); panda::pandasm::Function &function = it->second; if (SkipFunction(function, func_name)) { return false; } // build map from pc to pandasm::ins (to re-build line-number info in BytecodeGen) BuildMapFromPcToIns(function, ir_interface, graph, method_ptr); if ((graph == nullptr) || !graph->RunPass()) { LOG(ERROR, BYTECODE_OPTIMIZER) << "Optimizing " << func_name << ": IR builder failed!"; return false; } if (graph->HasIrreducibleLoop()) { LOG(ERROR, BYTECODE_OPTIMIZER) << "Optimizing " << func_name << ": Graph has irreducible loop!"; return false; } if (!RunOptimizations(graph, &ir_interface)) { LOG(ERROR, BYTECODE_OPTIMIZER) << "Optimizing " << func_name << ": Running optimizations failed!"; return false; } if (!graph->RunPass(&function, &ir_interface, prog)) { LOG(ERROR, BYTECODE_OPTIMIZER) << "Optimizing " << func_name << ": Code generation failed!"; return false; } DebugInfoPropagate(function, graph, ir_interface); function.value_of_first_param = static_cast(graph->GetStackSlotsCount()) - 1; // Work-around promotion rules function.regs_num = static_cast(function.value_of_first_param + 1); if (auto frame_size = function.regs_num + function.GetParamsNum(); frame_size >= NUM_COMPACTLY_ENCODED_REGS) { LOG(INFO, BYTECODE_OPTIMIZER) << "Function " << func_name << " has frame size " << frame_size; } LOG(DEBUG, BYTECODE_OPTIMIZER) << "Optimized " << func_name; return true; } bool OptimizePandaFile(pandasm::Program *prog, const pandasm::AsmEmitter::PandaFileToPandaAsmMaps *maps, const std::string &pfile_name, bool is_dynamic) { auto pfile = panda_file::OpenPandaFile(pfile_name); if (!pfile) { LOG(FATAL, BYTECODE_OPTIMIZER) << "Can not open binary file: " << pfile_name; } bool result = true; for (uint32_t id : pfile->GetClasses()) { panda_file::File::EntityId record_id {id}; if (pfile->IsExternal(record_id)) { continue; } panda_file::ClassDataAccessor cda {*pfile, record_id}; cda.EnumerateMethods([prog, maps, is_dynamic, &result](panda_file::MethodDataAccessor &mda) { if (!mda.IsExternal()) { result = OptimizeFunction(prog, maps, mda, is_dynamic) && result; } }); } return result; } bool OptimizeBytecode(pandasm::Program *prog, const pandasm::AsmEmitter::PandaFileToPandaAsmMaps *maps, const std::string &pandafile_name, bool is_dynamic, bool has_memory_pool) { ASSERT(prog != nullptr); ASSERT(maps != nullptr); if (!has_memory_pool) { PoolManager::Initialize(PoolType::MALLOC); } auto res = OptimizePandaFile(prog, maps, pandafile_name, is_dynamic); if (!has_memory_pool) { PoolManager::Finalize(); } return res; } } // namespace panda::bytecodeopt