mirror of
https://gitee.com/openharmony/arkcompiler_runtime_core
synced 2024-11-23 06:40:32 +00:00
Remove unused code
Remove unused code Issue: #I638QE Signed-off-by: qiuyu <qiuyu22@huawei.com> Change-Id: I33908e556a638806a5bb2a06e023ba449ba910ef
This commit is contained in:
parent
49c02e36ac
commit
2b028ad277
40
BUILD.gn
40
BUILD.gn
@ -220,7 +220,6 @@ config("ark_config") {
|
||||
}
|
||||
|
||||
plugins_yamls = []
|
||||
runtime_options_yamls = []
|
||||
foreach(plugin, enabled_plugins) {
|
||||
plugin_dir = "$ark_root/plugins/$plugin"
|
||||
source_files = read_file("$plugin_dir/subproject_sources.gn", "scope")
|
||||
@ -229,24 +228,14 @@ foreach(plugin, enabled_plugins) {
|
||||
plugins_yamls += [ "$plugin_dir/${source_files.option_yaml_path}" ]
|
||||
}
|
||||
|
||||
if (defined(source_files.runtime_option_yaml_path)) {
|
||||
runtime_options_yamls +=
|
||||
[ "$plugin_dir/${source_files.runtime_option_yaml_path}" ]
|
||||
}
|
||||
|
||||
source_files = {
|
||||
}
|
||||
}
|
||||
|
||||
entrypoints_yamls = []
|
||||
foreach(plugin, enabled_plugins) {
|
||||
plugin_dir = "$ark_root/plugins/$plugin"
|
||||
source_files = read_file("$plugin_dir/subproject_sources.gn", "scope")
|
||||
if (defined(source_files.entrypoints_yaml_path)) {
|
||||
entrypoints_yamls += [ "$plugin_dir/${source_files.entrypoints_yaml_path}" ]
|
||||
}
|
||||
source_files = {
|
||||
}
|
||||
concat_yamls("concat_plugins_yamls") {
|
||||
output_file = "$target_gen_dir/plugin_options.yaml"
|
||||
default_file = "$ark_root/templates/plugin_options.yaml"
|
||||
add_yamls = plugins_yamls
|
||||
}
|
||||
|
||||
inst_templates_yamls = []
|
||||
@ -261,29 +250,12 @@ foreach(plugin, enabled_plugins) {
|
||||
}
|
||||
}
|
||||
|
||||
concat_yamls("concat_plugins_yamls") {
|
||||
output_file = "$target_gen_dir/plugin_options.yaml"
|
||||
default_file = "$ark_root/templates/plugin_options.yaml"
|
||||
add_yamls = plugins_yamls
|
||||
}
|
||||
|
||||
concat_yamls("concat_entrypoints_yamls") {
|
||||
output_file = "$target_gen_dir/runtime/entrypoints.yaml"
|
||||
default_file = "$ark_root/runtime/entrypoints/entrypoints.yaml"
|
||||
add_yamls = entrypoints_yamls
|
||||
}
|
||||
|
||||
concat_yamls("concat_inst_templates_yamls") {
|
||||
output_file = "$target_gen_dir/compiler/generated/inst_templates.yaml"
|
||||
default_file = "$ark_root/compiler/optimizer/ir_builder/inst_templates.yaml"
|
||||
add_yamls = inst_templates_yamls
|
||||
}
|
||||
|
||||
merge_yamls("merge_runtime_options_yamls") {
|
||||
output_file = "$target_gen_dir/runtime_options.yaml"
|
||||
add_yamls = [ "$ark_root/runtime/options.yaml" ] + runtime_options_yamls
|
||||
}
|
||||
|
||||
if (!ark_standalone_build) {
|
||||
group("bcopt_type_adapter_unit_test") {
|
||||
if (host_os == "mac") {
|
||||
@ -321,8 +293,6 @@ if (!ark_standalone_build) {
|
||||
"$ark_root/libpandabase/tests:unittest",
|
||||
"$ark_root/libpandafile/tests:unittest",
|
||||
"$ark_root/libziparchive/tests:unittest",
|
||||
"$ark_root/plugins/ecmascript/tests:unittest",
|
||||
"$ark_root/runtime/tests:unittest",
|
||||
]
|
||||
}
|
||||
|
||||
@ -332,8 +302,6 @@ if (!ark_standalone_build) {
|
||||
"$ark_root/libpandabase/tests:host_unittest",
|
||||
"$ark_root/libpandafile/tests:host_unittest",
|
||||
"$ark_root/libziparchive/tests:host_unittest",
|
||||
"$ark_root/plugins/ecmascript/tests:host_unittest",
|
||||
"$ark_root/runtime/tests:host_unittest",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
@ -25,7 +25,6 @@ config("arkassembler_public_config") {
|
||||
}
|
||||
|
||||
libarkassembler_sources = [
|
||||
"$ark_root/plugins/ecmascript/assembler/extension/ecmascript_meta.cpp",
|
||||
"$target_gen_dir/ins_to_string.cpp",
|
||||
"annotation.cpp",
|
||||
"assembly-emitter.cpp",
|
||||
@ -34,6 +33,7 @@ libarkassembler_sources = [
|
||||
"assembly-program.cpp",
|
||||
"assembly-type.cpp",
|
||||
"context.cpp",
|
||||
"extensions/ecmascript_meta.cpp",
|
||||
"extensions/extensions.cpp",
|
||||
"lexer.cpp",
|
||||
"meta.cpp",
|
||||
@ -147,12 +147,10 @@ source_set("ark_asm_static") {
|
||||
"$ark_root:ark_config",
|
||||
"$ark_root/libpandabase:arkbase_public_config",
|
||||
"$ark_root/libpandafile:arkfile_public_config",
|
||||
"$ark_root/runtime:arkruntime_public_config",
|
||||
]
|
||||
|
||||
deps = [
|
||||
":libarkassembler_frontend_static",
|
||||
"$ark_root/bytecode_optimizer:libarkbytecodeopt_frontend_static",
|
||||
"$ark_root/libpandabase:libarkbase_frontend_static",
|
||||
"$ark_root/libpandafile:libarkfile_frontend_static",
|
||||
]
|
||||
|
@ -17,7 +17,7 @@
|
||||
#define PANDA_ASSEMBLER_EXTENSIONS_REGISTER_EXTENSIONS_H
|
||||
|
||||
#include "extensions/extensions.h"
|
||||
#include "extension/ecmascript_meta.h"
|
||||
#include "extensions/ecmascript_meta.h"
|
||||
|
||||
namespace panda::pandasm::extensions {
|
||||
std::unique_ptr<panda::pandasm::RecordMetadata> MetadataExtension::CreateRecordMetadata(panda::panda_file::SourceLang lang)
|
||||
|
@ -25,9 +25,6 @@
|
||||
|
||||
#include "assembly-emitter.h"
|
||||
#include "assembly-parser.h"
|
||||
#ifdef PANDA_WITH_BYTECODE_OPTIMIZER
|
||||
#include "bytecode_optimizer/optimize_bytecode.h"
|
||||
#endif
|
||||
#include "file_format_version.h"
|
||||
#include "error.h"
|
||||
#include "lexer.h"
|
||||
@ -174,20 +171,6 @@ bool EmitProgramInBinary(panda::pandasm::Program &program, panda::PandArgParser
|
||||
return false;
|
||||
}
|
||||
|
||||
#ifdef PANDA_WITH_BYTECODE_OPTIMIZER
|
||||
if (optimize.GetValue()) {
|
||||
bool is_optimized = panda::bytecodeopt::OptimizeBytecode(&program, mapsp, output_file.GetValue());
|
||||
if (!panda::pandasm::AsmEmitter::Emit(output_file.GetValue(), program, statp, mapsp, emit_debug_info)) {
|
||||
std::cerr << "Failed to emit binary data: " << panda::pandasm::AsmEmitter::GetLastError() << std::endl;
|
||||
return false;
|
||||
}
|
||||
if (!is_optimized) {
|
||||
std::cerr << "Bytecode optimizer reported internal errors" << std::endl;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (size_stat.GetValue()) {
|
||||
size_t total_size = 0;
|
||||
std::cout << "Panda file size statistic:" << std::endl;
|
||||
|
@ -39,7 +39,6 @@ libarkbytecodeopt_configs = [
|
||||
"$ark_root/libpandabase:arkbase_public_config",
|
||||
"$ark_root/libpandafile:arkfile_public_config",
|
||||
"$ark_root/assembler:arkassembler_public_config",
|
||||
"$ark_root/runtime:arkruntime_public_config",
|
||||
]
|
||||
|
||||
ohos_shared_library("libarkbytecodeopt") {
|
||||
|
@ -228,10 +228,6 @@ def if_fcmpg?
|
||||
'static_cast<int>(inst->IsFcmpg())'
|
||||
end
|
||||
|
||||
def if_inci?
|
||||
"static_cast<int>(CanConvertToIncI(inst))"
|
||||
end
|
||||
|
||||
# Operand printers
|
||||
def dst_r
|
||||
'inst->GetDstReg()'
|
||||
@ -313,9 +309,7 @@ end
|
||||
def call_me_from_template
|
||||
# Empty visitors for IR instructions we want to ignore
|
||||
# (Add missing IRs on demand)
|
||||
%w[NullCheck BoundsCheck ZeroCheck NegativeCheck SafePoint
|
||||
InitClass SaveStateDeoptimize RefTypeCheck Phi
|
||||
Try SaveState LoadClass LoadAndInitClass Parameter].each do |op|
|
||||
%w[Phi Try SaveState Parameter].each do |op|
|
||||
visit(op) do
|
||||
empty
|
||||
end
|
||||
|
@ -14,8 +14,8 @@
|
||||
*/
|
||||
#include "codegen.h"
|
||||
#include "common.h"
|
||||
#include "runtime/include/coretypes/tagged_value.h"
|
||||
#include "generate_ecma.inl"
|
||||
#include "tagged_value.h"
|
||||
|
||||
namespace panda::bytecodeopt {
|
||||
|
||||
@ -251,11 +251,8 @@ void BytecodeGen::EncodeSpillFillData(const compiler::SpillFillData &sf)
|
||||
}
|
||||
|
||||
pandasm::Ins move;
|
||||
if (GetGraph()->IsDynamicMethod()) {
|
||||
result_.emplace_back(pandasm::Create_MOV(sf.DstValue(), sf.SrcValue()));
|
||||
return;
|
||||
}
|
||||
UNREACHABLE();
|
||||
result_.emplace_back(pandasm::Create_MOV(sf.DstValue(), sf.SrcValue()));
|
||||
return;
|
||||
}
|
||||
|
||||
void BytecodeGen::VisitSpillFill(GraphVisitor *visitor, Inst *inst)
|
||||
@ -285,11 +282,9 @@ void BytecodeGen::VisitConstant(GraphVisitor *visitor, Inst *inst)
|
||||
auto type = inst->GetType();
|
||||
|
||||
/* Do not emit unused code for Const -> CastValueToAnyType chains */
|
||||
if (enc->GetGraph()->IsDynamicMethod()) {
|
||||
if (!HasUserPredicate(inst,
|
||||
[](Inst const *i) { return i->GetOpcode() != compiler::Opcode::CastValueToAnyType; })) {
|
||||
return;
|
||||
}
|
||||
if (!HasUserPredicate(inst,
|
||||
[](Inst const *i) { return i->GetOpcode() != compiler::Opcode::CastValueToAnyType; })) {
|
||||
return;
|
||||
}
|
||||
|
||||
pandasm::Ins movi;
|
||||
@ -297,42 +292,17 @@ void BytecodeGen::VisitConstant(GraphVisitor *visitor, Inst *inst)
|
||||
switch (type) {
|
||||
case compiler::DataType::INT64:
|
||||
case compiler::DataType::UINT64:
|
||||
if (enc->GetGraph()->IsDynamicMethod()) {
|
||||
enc->result_.emplace_back(pandasm::Create_LDAI(inst->CastToConstant()->GetInt64Value()));
|
||||
DoSta(inst->GetDstReg(), enc->result_);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
enc->result_.emplace_back(pandasm::Create_LDAI(inst->CastToConstant()->GetInt64Value()));
|
||||
DoSta(inst->GetDstReg(), enc->result_);
|
||||
break;
|
||||
case compiler::DataType::FLOAT64:
|
||||
if (enc->GetGraph()->IsDynamicMethod()) {
|
||||
enc->result_.emplace_back(pandasm::Create_FLDAI(inst->CastToConstant()->GetDoubleValue()));
|
||||
DoSta(inst->GetDstReg(), enc->result_);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
enc->result_.emplace_back(pandasm::Create_FLDAI(inst->CastToConstant()->GetDoubleValue()));
|
||||
DoSta(inst->GetDstReg(), enc->result_);
|
||||
break;
|
||||
case compiler::DataType::BOOL:
|
||||
case compiler::DataType::INT8:
|
||||
case compiler::DataType::UINT8:
|
||||
case compiler::DataType::INT16:
|
||||
case compiler::DataType::UINT16:
|
||||
case compiler::DataType::INT32:
|
||||
case compiler::DataType::UINT32:
|
||||
if (enc->GetGraph()->IsDynamicMethod()) {
|
||||
enc->result_.emplace_back(pandasm::Create_LDAI(inst->CastToConstant()->GetInt32Value()));
|
||||
DoSta(inst->GetDstReg(), enc->result_);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
break;
|
||||
case compiler::DataType::FLOAT32:
|
||||
if (enc->GetGraph()->IsDynamicMethod()) {
|
||||
enc->result_.emplace_back(pandasm::Create_FLDAI(inst->CastToConstant()->GetFloatValue()));
|
||||
DoSta(inst->GetDstReg(), enc->result_);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
enc->result_.emplace_back(pandasm::Create_LDAI(inst->CastToConstant()->GetInt32Value()));
|
||||
DoSta(inst->GetDstReg(), enc->result_);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
@ -367,21 +337,16 @@ void BytecodeGen::VisitIf(GraphVisitor *v, Inst *inst_base)
|
||||
auto inst = inst_base->CastToIf();
|
||||
switch (inst->GetInputType(0)) {
|
||||
case compiler::DataType::ANY: {
|
||||
if (enc->GetGraph()->IsDynamicMethod()) {
|
||||
#if defined(ENABLE_BYTECODE_OPT) && defined(PANDA_WITH_ECMASCRIPT) && defined(ARK_INTRINSIC_SET)
|
||||
IfEcma(v, inst);
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
LOG(ERROR, BYTECODE_OPTIMIZER)
|
||||
<< "Codegen for " << compiler::GetOpcodeString(inst->GetOpcode()) << " failed";
|
||||
enc->success_ = false;
|
||||
IfEcma(v, inst);
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
default:
|
||||
LOG(ERROR, BYTECODE_OPTIMIZER)
|
||||
<< "Codegen for " << compiler::GetOpcodeString(inst->GetOpcode()) << " failed";
|
||||
enc->success_ = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -476,14 +441,12 @@ void BytecodeGen::VisitIfImm(GraphVisitor *v, Inst *inst_base)
|
||||
IfImmZero(v, inst_base);
|
||||
return;
|
||||
}
|
||||
IfImmNonZero(v, inst_base);
|
||||
}
|
||||
|
||||
void BytecodeGen::IfImmZero(GraphVisitor *v, Inst *inst_base)
|
||||
{
|
||||
auto enc = static_cast<BytecodeGen *>(v);
|
||||
auto inst = inst_base->CastToIfImm();
|
||||
ASSERT(enc->GetGraph()->IsDynamicMethod());
|
||||
DoLda(inst->GetSrcReg(0), enc->result_);
|
||||
auto label = LabelName(inst->GetBasicBlock()->GetTrueSuccessor()->GetId());
|
||||
switch (inst->GetCc()) {
|
||||
@ -498,45 +461,20 @@ void BytecodeGen::IfImmZero(GraphVisitor *v, Inst *inst_base)
|
||||
}
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(readability-function-size)
|
||||
void BytecodeGen::IfImmNonZero(GraphVisitor *v, Inst *inst_base)
|
||||
{
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(readability-function-size)
|
||||
void BytecodeGen::IfImm64(GraphVisitor *v, Inst *inst_base)
|
||||
{
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(readability-function-size)
|
||||
void BytecodeGen::VisitCast(GraphVisitor *v, Inst *inst_base)
|
||||
{
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void BytecodeGen::VisitLoadString(GraphVisitor *v, Inst *inst_base)
|
||||
{
|
||||
pandasm::Ins ins;
|
||||
auto enc = static_cast<BytecodeGen *>(v);
|
||||
auto inst = inst_base->CastToLoadString();
|
||||
|
||||
/* Do not emit unused code for Str -> CastValueToAnyType chains */
|
||||
if (enc->GetGraph()->IsDynamicMethod()) {
|
||||
if (!HasUserPredicate(inst,
|
||||
[](Inst const *i) { return i->GetOpcode() != compiler::Opcode::CastValueToAnyType; })) {
|
||||
return;
|
||||
}
|
||||
if (!HasUserPredicate(inst,
|
||||
[](Inst const *i) { return i->GetOpcode() != compiler::Opcode::CastValueToAnyType; })) {
|
||||
return;
|
||||
}
|
||||
|
||||
enc->result_.emplace_back(pandasm::Create_LDA_STR(enc->ir_interface_->GetStringIdByOffset(inst->GetTypeId())));
|
||||
if (inst->GetDstReg() != compiler::ACC_REG_ID) {
|
||||
if (enc->GetGraph()->IsDynamicMethod()) {
|
||||
enc->result_.emplace_back(pandasm::Create_STA(inst->GetDstReg()));
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
enc->result_.emplace_back(pandasm::Create_STA(inst->GetDstReg()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -628,17 +566,7 @@ void BytecodeGen::VisitCastValueToAnyType([[maybe_unused]] GraphVisitor *v, [[ma
|
||||
void BytecodeGen::VisitIntrinsic(GraphVisitor *visitor, Inst *inst_base)
|
||||
{
|
||||
ASSERT(inst_base->IsIntrinsic());
|
||||
auto inst = inst_base->CastToIntrinsic();
|
||||
auto enc = static_cast<BytecodeGen *>(visitor);
|
||||
|
||||
if (!enc->GetGraph()->IsDynamicMethod()) {
|
||||
LOG(ERROR, BYTECODE_OPTIMIZER) << "Codegen for " << compiler::GetOpcodeString(inst->GetOpcode()) << " failed";
|
||||
enc->success_ = false;
|
||||
} else {
|
||||
#ifdef ENABLE_BYTECODE_OPT
|
||||
VisitEcma(visitor, inst_base);
|
||||
#endif
|
||||
}
|
||||
VisitEcma(visitor, inst_base);
|
||||
}
|
||||
|
||||
void BytecodeGen::VisitCatchPhi(GraphVisitor *v, Inst *inst)
|
||||
|
@ -106,10 +106,7 @@ public:
|
||||
|
||||
static void VisitIf(GraphVisitor *v, Inst *inst_base);
|
||||
static void VisitIfImm(GraphVisitor *v, Inst *inst_base);
|
||||
static void VisitCast(GraphVisitor *v, Inst *inst_base);
|
||||
static void IfImmZero(GraphVisitor *v, Inst *inst_base);
|
||||
static void IfImmNonZero(GraphVisitor *v, Inst *inst_base);
|
||||
static void IfImm64(GraphVisitor *v, Inst *inst_base);
|
||||
static void VisitIntrinsic(GraphVisitor *v, Inst *inst_base);
|
||||
static void VisitLoadString(GraphVisitor *v, Inst *inst_base);
|
||||
static void VisitReturn(GraphVisitor *v, Inst *inst_base);
|
||||
|
@ -21,73 +21,11 @@ namespace panda::bytecodeopt {
|
||||
|
||||
uint8_t AccReadIndex(const compiler::Inst *inst)
|
||||
{
|
||||
// For calls we cannot tell static index for acc position, thus
|
||||
// ensure that we don't invoke this for calls
|
||||
ASSERT(!inst->IsCall());
|
||||
|
||||
switch (inst->GetOpcode()) {
|
||||
case compiler::Opcode::LoadArray:
|
||||
case compiler::Opcode::StoreObject:
|
||||
case compiler::Opcode::StoreStatic:
|
||||
case compiler::Opcode::NewArray:
|
||||
return 1U;
|
||||
case compiler::Opcode::StoreArray:
|
||||
return 2U;
|
||||
default: {
|
||||
if (inst->IsIntrinsic() && inst->IsAccRead()) {
|
||||
ASSERT(inst->GetBasicBlock()->GetGraph()->IsDynamicMethod());
|
||||
ASSERT(inst->GetInputsCount() >= 2U);
|
||||
return inst->GetInputsCount() - 2U;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
if (inst->IsIntrinsic() && inst->IsAccRead()) {
|
||||
ASSERT(inst->GetBasicBlock()->GetGraph()->IsDynamicMethod());
|
||||
ASSERT(inst->GetInputsCount() >= 2U);
|
||||
return inst->GetInputsCount() - 2U;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
// This method is used by bytecode optimizer's codegen.
|
||||
bool CanConvertToIncI(const compiler::BinaryImmOperation *binop)
|
||||
{
|
||||
ASSERT(binop->GetBasicBlock()->GetGraph()->IsRegAllocApplied());
|
||||
ASSERT(binop->GetOpcode() == compiler::Opcode::AddI || binop->GetOpcode() == compiler::Opcode::SubI);
|
||||
|
||||
// IncI works on the same register.
|
||||
if (binop->GetSrcReg(0) != binop->GetDstReg()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// IncI cannot write accumulator.
|
||||
if (binop->GetSrcReg(0) == compiler::ACC_REG_ID) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// IncI users cannot read from accumulator.
|
||||
// While Addi/SubI stores the output in accumulator, IncI works directly on registers.
|
||||
for (const auto &user : binop->GetUsers()) {
|
||||
const auto *uinst = user.GetInst();
|
||||
|
||||
if (uinst->IsCall()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const uint8_t index = AccReadIndex(uinst);
|
||||
if (uinst->GetInput(index).GetInst() == binop && uinst->GetSrcReg(index) == compiler::ACC_REG_ID) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr uint64_t bitmask = 0xffffffff;
|
||||
// Define min and max values of i4 type.
|
||||
constexpr int32_t min = -8;
|
||||
constexpr int32_t max = 7;
|
||||
|
||||
int32_t imm = binop->GetImm() & bitmask;
|
||||
// Note: subi 3 is the same as inci v2, -3.
|
||||
if (binop->GetOpcode() == compiler::Opcode::SubI) {
|
||||
imm = -imm;
|
||||
}
|
||||
|
||||
// IncI works only with 4 bits immediates.
|
||||
return imm >= min && imm <= max;
|
||||
}
|
||||
|
||||
} // namespace panda::bytecodeopt
|
||||
|
@ -19,10 +19,6 @@
|
||||
#include "compiler/optimizer/ir/constants.h"
|
||||
#include "compiler/optimizer/ir/inst.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
class BinaryImmOperation;
|
||||
} // namespace panda::compiler
|
||||
|
||||
namespace panda::bytecodeopt {
|
||||
static constexpr compiler::Register MIN_REGISTER_NUMBER = 0;
|
||||
static constexpr compiler::Register MAX_NUM_SHORT_CALL_ARGS = 2;
|
||||
@ -33,7 +29,6 @@ static constexpr panda::compiler::Register NUM_COMPACTLY_ENCODED_REGS = 16;
|
||||
// Get the position where accumulator read happens.
|
||||
uint8_t AccReadIndex(const compiler::Inst *inst);
|
||||
|
||||
bool CanConvertToIncI(const compiler::BinaryImmOperation *binop);
|
||||
} // namespace panda::bytecodeopt
|
||||
|
||||
#endif // PANDA_BYTECODE_OPTIMIZER_COMMON_H
|
||||
|
@ -45,17 +45,17 @@ namespace panda::bytecodeopt {
|
||||
panda::bytecodeopt::Options options("");
|
||||
|
||||
template <typename T>
|
||||
constexpr void RunOpts(compiler::Graph *graph, [[maybe_unused]] BytecodeOptIrInterface *iface)
|
||||
constexpr void RunOpts(compiler::Graph *graph)
|
||||
{
|
||||
graph->RunPass<compiler::Cleanup>();
|
||||
graph->RunPass<T>();
|
||||
}
|
||||
|
||||
template <typename First, typename Second, typename... Rest>
|
||||
constexpr void RunOpts(compiler::Graph *graph, BytecodeOptIrInterface *iface = nullptr)
|
||||
constexpr void RunOpts(compiler::Graph *graph)
|
||||
{
|
||||
RunOpts<First>(graph, iface);
|
||||
RunOpts<Second, Rest...>(graph, iface);
|
||||
RunOpts<First>(graph);
|
||||
RunOpts<Second, Rest...>(graph);
|
||||
}
|
||||
|
||||
bool RunOptimizations(compiler::Graph *graph, BytecodeOptIrInterface *iface)
|
||||
|
@ -88,14 +88,6 @@ bool RegAccAlloc::IsAccRead(compiler::Inst *inst) const
|
||||
return UNLIKELY(inst->IsPhi()) ? IsPhiOptimizable(inst) : inst->IsAccRead();
|
||||
}
|
||||
|
||||
bool UserNeedSwapInputs(compiler::Inst *inst, compiler::Inst *user)
|
||||
{
|
||||
if (!user->IsCommutative()) {
|
||||
return false;
|
||||
}
|
||||
return user->GetInput(AccReadIndex(user)).GetInst() != inst;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return true if instruction can write the accumulator.
|
||||
*/
|
||||
@ -164,21 +156,13 @@ bool RegAccAlloc::IsPhiAccReady(compiler::Inst *phi) const
|
||||
}
|
||||
}
|
||||
|
||||
std::unordered_set<compiler::Inst *> users_that_required_swap_inputs;
|
||||
for (auto &user : phi->GetUsers()) {
|
||||
compiler::Inst *uinst = user.GetInst();
|
||||
|
||||
if (!CanUserReadAcc(phi, uinst)) {
|
||||
return false;
|
||||
}
|
||||
if (UserNeedSwapInputs(phi, uinst)) {
|
||||
users_that_required_swap_inputs.insert(uinst);
|
||||
}
|
||||
}
|
||||
for (auto uinst : users_that_required_swap_inputs) {
|
||||
uinst->SwapInputs();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -223,25 +207,13 @@ bool RegAccAlloc::RunImpl()
|
||||
}
|
||||
for (size_t i = 0; i < inst->GetInputsCount(); ++i) {
|
||||
inst->SetSrcReg(i, compiler::INVALID_REG);
|
||||
if ((inst->GetOpcode() == compiler::Opcode::LoadObject) || (inst->IsConst())) {
|
||||
if (inst->IsConst()) {
|
||||
inst->SetDstReg(compiler::INVALID_REG);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Drop the pass if the function contains unsupported opcodes
|
||||
// TODO(rtakacs): support these opcodes.
|
||||
if (!GetGraph()->IsDynamicMethod()) {
|
||||
for (auto block : GetGraph()->GetBlocksRPO()) {
|
||||
for (auto inst : block->AllInsts()) {
|
||||
if (inst->GetOpcode() == compiler::Opcode::Builtin) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Mark Phi instructions if they can be optimized for acc.
|
||||
for (auto block : GetGraph()->GetBlocksRPO()) {
|
||||
for (auto phi : block->PhiInsts()) {
|
||||
@ -260,28 +232,21 @@ bool RegAccAlloc::RunImpl()
|
||||
|
||||
bool use_acc_dst_reg = true;
|
||||
|
||||
std::unordered_set<compiler::Inst *> users_that_required_swap_inputs;
|
||||
for (auto &user : inst->GetUsers()) {
|
||||
compiler::Inst *uinst = user.GetInst();
|
||||
if (uinst->IsSaveState()) {
|
||||
continue;
|
||||
}
|
||||
if (CanUserReadAcc(inst, uinst)) {
|
||||
if (UserNeedSwapInputs(inst, uinst)) {
|
||||
users_that_required_swap_inputs.insert(uinst);
|
||||
}
|
||||
SetNeedLda(uinst, false);
|
||||
} else {
|
||||
use_acc_dst_reg = false;
|
||||
}
|
||||
}
|
||||
for (auto uinst : users_that_required_swap_inputs) {
|
||||
uinst->SwapInputs();
|
||||
}
|
||||
|
||||
if (use_acc_dst_reg) {
|
||||
inst->SetDstReg(compiler::ACC_REG_ID);
|
||||
} else if ((inst->GetOpcode() == compiler::Opcode::LoadObject) || inst->IsConst()) {
|
||||
} else if (inst->IsConst()) {
|
||||
inst->ClearFlag(compiler::inst_flags::ACC_WRITE);
|
||||
for (auto &user : inst->GetUsers()) {
|
||||
compiler::Inst *uinst = user.GetInst();
|
||||
@ -308,7 +273,7 @@ bool RegAccAlloc::RunImpl()
|
||||
input->SetDstReg(compiler::INVALID_REG);
|
||||
SetNeedLda(inst, true);
|
||||
|
||||
if ((input->GetOpcode() == compiler::Opcode::LoadObject) || (input->IsConst())) {
|
||||
if (input->IsConst()) {
|
||||
input->ClearFlag(compiler::inst_flags::ACC_WRITE);
|
||||
for (auto &user : input->GetUsers()) {
|
||||
compiler::Inst *uinst = user.GetInst();
|
||||
|
@ -342,7 +342,7 @@ static bool IsAccReadPosition(compiler::Inst *inst, size_t pos)
|
||||
void RegEncoder::InsertSpillsForDynInputsInst(compiler::Inst *inst)
|
||||
{
|
||||
ASSERT(state_ == RegEncoderState::INSERT_SPILLS);
|
||||
ASSERT(inst->IsStaticCall() || inst->IsVirtualCall() || inst->IsInitObject() || inst->IsIntrinsic());
|
||||
ASSERT(inst->IsIntrinsic());
|
||||
|
||||
RegContentMap spill_map(GetGraph()->GetLocalAllocator()->Adapter()); // src -> (dst, src_type), non-callrange
|
||||
RegContentVec spill_vec(GetGraph()->GetLocalAllocator()->Adapter()); // spill_vec is used to handle callrange
|
||||
@ -435,7 +435,7 @@ void RegEncoder::CalculateNumNeededTempsForInst(compiler::Inst *inst)
|
||||
if (IsIntrinsicRange(inst)) {
|
||||
return;
|
||||
}
|
||||
ASSERT(inst->IsStaticCall() || inst->IsVirtualCall() || inst->IsInitObject() || inst->IsIntrinsic());
|
||||
ASSERT(inst->IsIntrinsic());
|
||||
|
||||
auto nargs = inst->GetInputsCount() - (inst->RequireState() ? 1 : 0);
|
||||
size_t start = 0;
|
||||
|
@ -127,12 +127,7 @@ public:
|
||||
|
||||
#include "generated/check_width.h"
|
||||
|
||||
void VisitDefault(Inst *inst) override
|
||||
{
|
||||
LOG(ERROR, BYTECODE_OPTIMIZER) << "Opcode " << compiler::GetOpcodeString(inst->GetOpcode())
|
||||
<< " not yet implemented in RegEncoder";
|
||||
success_ = false;
|
||||
}
|
||||
void VisitDefault(Inst *inst) override {}
|
||||
|
||||
#include "compiler/optimizer/ir/visitor.inc"
|
||||
|
||||
|
@ -41,69 +41,16 @@ public:
|
||||
return const_cast<panda_file::File *>(&panda_file_);
|
||||
}
|
||||
|
||||
MethodId ResolveMethodIndex(MethodPtr parent_method, MethodIndex index) const override
|
||||
{
|
||||
return panda_file_.ResolveMethodIndex(MethodCast(parent_method), index).GetOffset();
|
||||
}
|
||||
|
||||
uint32_t ResolveOffsetByIndex(MethodPtr parent_method, uint16_t index) const override
|
||||
{
|
||||
return panda_file_.ResolveOffsetByIndex(MethodCast(parent_method), index).GetOffset();
|
||||
}
|
||||
|
||||
FieldId ResolveFieldIndex(MethodPtr parent_method, FieldIndex index) const override
|
||||
{
|
||||
return panda_file_.ResolveFieldIndex(MethodCast(parent_method), index).GetOffset();
|
||||
}
|
||||
|
||||
IdType ResolveTypeIndex(MethodPtr parent_method, TypeIndex index) const override
|
||||
{
|
||||
return panda_file_.ResolveClassIndex(MethodCast(parent_method), index).GetOffset();
|
||||
}
|
||||
|
||||
MethodPtr GetMethodById([[maybe_unused]] MethodPtr caller, MethodId id) const override
|
||||
{
|
||||
return reinterpret_cast<MethodPtr>(id);
|
||||
}
|
||||
|
||||
MethodId GetMethodId(MethodPtr method) const override
|
||||
{
|
||||
return static_cast<MethodId>(reinterpret_cast<uintptr_t>(method));
|
||||
}
|
||||
|
||||
compiler::DataType::Type GetMethodReturnType(MethodPtr method) const override
|
||||
{
|
||||
panda_file::MethodDataAccessor mda(panda_file_, MethodCast(method));
|
||||
panda_file::ProtoDataAccessor pda(panda_file_, mda.GetProtoId());
|
||||
|
||||
return ToCompilerType(panda_file::GetEffectiveType(pda.GetReturnType()));
|
||||
}
|
||||
|
||||
compiler::DataType::Type GetMethodTotalArgumentType(MethodPtr method, size_t index) const override
|
||||
{
|
||||
panda_file::MethodDataAccessor mda(panda_file_, MethodCast(method));
|
||||
|
||||
if (!mda.IsStatic()) {
|
||||
if (index == 0) {
|
||||
return ToCompilerType(
|
||||
panda_file::GetEffectiveType(panda_file::Type(panda_file::Type::TypeId::REFERENCE)));
|
||||
}
|
||||
--index;
|
||||
}
|
||||
|
||||
panda_file::ProtoDataAccessor pda(panda_file_, mda.GetProtoId());
|
||||
return ToCompilerType(panda_file::GetEffectiveType(pda.GetArgType(index)));
|
||||
}
|
||||
|
||||
compiler::DataType::Type GetMethodArgumentType([[maybe_unused]] MethodPtr caller, MethodId id,
|
||||
size_t index) const override
|
||||
{
|
||||
panda_file::MethodDataAccessor mda(panda_file_, panda_file::File::EntityId(id));
|
||||
panda_file::ProtoDataAccessor pda(panda_file_, mda.GetProtoId());
|
||||
|
||||
return ToCompilerType(panda_file::GetEffectiveType(pda.GetArgType(index)));
|
||||
}
|
||||
|
||||
size_t GetMethodTotalArgumentsCount(MethodPtr method) const override
|
||||
{
|
||||
panda_file::MethodDataAccessor mda(panda_file_, MethodCast(method));
|
||||
@ -122,11 +69,6 @@ public:
|
||||
return pda.GetNumArgs();
|
||||
}
|
||||
|
||||
compiler::DataType::Type GetMethodReturnType(MethodPtr caller, MethodId id) const override
|
||||
{
|
||||
return GetMethodReturnType(GetMethodById(caller, id));
|
||||
}
|
||||
|
||||
size_t GetMethodRegistersCount(MethodPtr method) const override
|
||||
{
|
||||
panda_file::MethodDataAccessor mda(panda_file_, MethodCast(method));
|
||||
@ -169,20 +111,6 @@ public:
|
||||
return static_cast<compiler::SourceLanguage>(source_lang.value());
|
||||
}
|
||||
|
||||
size_t GetClassIdForField([[maybe_unused]] MethodPtr method, size_t field_id) const override
|
||||
{
|
||||
panda_file::FieldDataAccessor fda(panda_file_, panda_file::File::EntityId(field_id));
|
||||
|
||||
return static_cast<size_t>(fda.GetClassId().GetOffset());
|
||||
}
|
||||
|
||||
ClassPtr GetClassForField(FieldPtr field) const override
|
||||
{
|
||||
panda_file::FieldDataAccessor fda(panda_file_, FieldCast(field));
|
||||
|
||||
return reinterpret_cast<ClassPtr>(fda.GetClassId().GetOffset());
|
||||
}
|
||||
|
||||
size_t GetClassIdForMethod(MethodPtr method) const override
|
||||
{
|
||||
panda_file::MethodDataAccessor mda(panda_file_, MethodCast(method));
|
||||
@ -190,50 +118,6 @@ public:
|
||||
return static_cast<size_t>(mda.GetClassId().GetOffset());
|
||||
}
|
||||
|
||||
size_t GetClassIdForMethod([[maybe_unused]] MethodPtr caller, size_t method_id) const override
|
||||
{
|
||||
panda_file::MethodDataAccessor mda(panda_file_, panda_file::File::EntityId(method_id));
|
||||
|
||||
return static_cast<size_t>(mda.GetClassId().GetOffset());
|
||||
}
|
||||
|
||||
bool IsMethodExternal([[maybe_unused]] MethodPtr caller, MethodPtr callee) const override
|
||||
{
|
||||
panda_file::MethodDataAccessor mda(panda_file_, MethodCast(callee));
|
||||
|
||||
return mda.IsExternal();
|
||||
}
|
||||
|
||||
bool IsMethodIntrinsic([[maybe_unused]] MethodPtr method) const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool IsMethodIntrinsic([[maybe_unused]] MethodPtr caller, [[maybe_unused]] MethodId id) const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool IsMethodStatic(MethodPtr method) const override
|
||||
{
|
||||
panda_file::MethodDataAccessor mda(panda_file_, MethodCast(method));
|
||||
|
||||
return mda.IsStatic();
|
||||
}
|
||||
|
||||
bool IsMethodStatic([[maybe_unused]] MethodPtr caller, MethodId id) const override
|
||||
{
|
||||
panda_file::MethodDataAccessor mda(panda_file_, panda_file::File::EntityId(id));
|
||||
|
||||
return mda.IsStatic();
|
||||
}
|
||||
|
||||
// return true if the method is Jni with exception
|
||||
bool HasNativeException([[maybe_unused]] MethodPtr method) const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
std::string GetClassNameFromMethod(MethodPtr method) const override
|
||||
{
|
||||
panda_file::MethodDataAccessor mda(panda_file_, MethodCast(method));
|
||||
@ -243,13 +127,6 @@ public:
|
||||
return std::string(reinterpret_cast<const char *>(string_data.data));
|
||||
}
|
||||
|
||||
std::string GetClassName(ClassPtr cls) const override
|
||||
{
|
||||
auto string_data = panda_file_.GetStringData(ClassCast(cls));
|
||||
|
||||
return std::string(reinterpret_cast<const char *>(string_data.data));
|
||||
}
|
||||
|
||||
std::string GetMethodName(MethodPtr method) const override
|
||||
{
|
||||
panda_file::MethodDataAccessor mda(panda_file_, MethodCast(method));
|
||||
@ -259,23 +136,6 @@ public:
|
||||
return std::string(reinterpret_cast<const char *>(string_data.data));
|
||||
}
|
||||
|
||||
bool IsConstructor(MethodPtr method, uint32_t class_id) override
|
||||
{
|
||||
if (GetClassIdForMethod(method) != class_id) {
|
||||
return false;
|
||||
}
|
||||
|
||||
panda_file::File::EntityId entity_id(class_id);
|
||||
panda_file::SourceLang lang = panda_file::SourceLang::PANDA_ASSEMBLY;
|
||||
|
||||
if (!panda_file_.IsExternal(entity_id)) {
|
||||
panda_file::ClassDataAccessor cda(panda_file_, entity_id);
|
||||
lang = cda.GetSourceLang().value_or(lang);
|
||||
}
|
||||
|
||||
return GetMethodName(method) == GetCtorName(lang);
|
||||
}
|
||||
|
||||
std::string GetMethodFullName(MethodPtr method, bool /* with_signature */) const override
|
||||
{
|
||||
auto class_name = GetClassNameFromMethod(method);
|
||||
@ -284,69 +144,6 @@ public:
|
||||
return class_name + "::" + method_name;
|
||||
}
|
||||
|
||||
ClassPtr GetClass(MethodPtr method) const override
|
||||
{
|
||||
panda_file::MethodDataAccessor mda(panda_file_, MethodCast(method));
|
||||
|
||||
return reinterpret_cast<ClassPtr>(mda.GetClassId().GetOffset());
|
||||
}
|
||||
|
||||
std::string GetBytecodeString(MethodPtr method, uintptr_t pc) const override
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
BytecodeInstruction inst(GetMethodCode(method) + pc);
|
||||
std::stringstream ss;
|
||||
|
||||
ss << inst;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
bool IsArrayClass([[maybe_unused]] MethodPtr method, IdType id) const override
|
||||
{
|
||||
panda_file::File::EntityId cid(id);
|
||||
|
||||
return panda_file::IsArrayDescriptor(panda_file_.GetStringData(cid).data);
|
||||
}
|
||||
|
||||
FieldPtr ResolveField([[maybe_unused]] MethodPtr method, size_t id, [[maybe_unused]] bool allow_external,
|
||||
uint32_t * /* class_id */) override
|
||||
{
|
||||
return reinterpret_cast<FieldPtr>(id);
|
||||
}
|
||||
|
||||
compiler::DataType::Type GetFieldType(FieldPtr field) const override
|
||||
{
|
||||
panda_file::FieldDataAccessor fda(panda_file_, FieldCast(field));
|
||||
|
||||
return ToCompilerType(panda_file::Type::GetTypeFromFieldEncoding(fda.GetType()));
|
||||
}
|
||||
|
||||
compiler::DataType::Type GetFieldTypeById([[maybe_unused]] MethodPtr parent_method, IdType id) const override
|
||||
{
|
||||
panda_file::FieldDataAccessor fda(panda_file_, panda_file::File::EntityId(id));
|
||||
|
||||
return ToCompilerType(panda_file::Type::GetTypeFromFieldEncoding(fda.GetType()));
|
||||
}
|
||||
|
||||
bool IsFieldVolatile(FieldPtr field) const override
|
||||
{
|
||||
panda_file::FieldDataAccessor fda(panda_file_, FieldCast(field));
|
||||
|
||||
return fda.IsVolatile();
|
||||
}
|
||||
|
||||
ClassPtr ResolveType([[maybe_unused]] MethodPtr method, size_t id) const override
|
||||
{
|
||||
return reinterpret_cast<ClassPtr>(id);
|
||||
}
|
||||
|
||||
std::string GetFieldName(FieldPtr field) const override
|
||||
{
|
||||
panda_file::FieldDataAccessor fda(panda_file_, FieldCast(field));
|
||||
auto string_data = panda_file_.GetStringData(fda.GetNameId());
|
||||
return utf::Mutf8AsCString(string_data.data);
|
||||
}
|
||||
|
||||
TypeInfoIndex GetTypeInfoIndexByInstId(size_t id) const override
|
||||
{
|
||||
const auto it = instid_type_map_.find(id);
|
||||
@ -414,16 +211,6 @@ private:
|
||||
switch (type.GetId()) {
|
||||
case panda_file::Type::TypeId::VOID:
|
||||
return compiler::DataType::VOID;
|
||||
case panda_file::Type::TypeId::U1:
|
||||
return compiler::DataType::BOOL;
|
||||
case panda_file::Type::TypeId::I8:
|
||||
return compiler::DataType::INT8;
|
||||
case panda_file::Type::TypeId::U8:
|
||||
return compiler::DataType::UINT8;
|
||||
case panda_file::Type::TypeId::I16:
|
||||
return compiler::DataType::INT16;
|
||||
case panda_file::Type::TypeId::U16:
|
||||
return compiler::DataType::UINT16;
|
||||
case panda_file::Type::TypeId::I32:
|
||||
return compiler::DataType::INT32;
|
||||
case panda_file::Type::TypeId::U32:
|
||||
@ -432,8 +219,6 @@ private:
|
||||
return compiler::DataType::INT64;
|
||||
case panda_file::Type::TypeId::U64:
|
||||
return compiler::DataType::UINT64;
|
||||
case panda_file::Type::TypeId::F32:
|
||||
return compiler::DataType::FLOAT32;
|
||||
case panda_file::Type::TypeId::F64:
|
||||
return compiler::DataType::FLOAT64;
|
||||
case panda_file::Type::TypeId::REFERENCE:
|
||||
@ -452,16 +237,6 @@ private:
|
||||
return panda_file::File::EntityId(reinterpret_cast<uintptr_t>(method));
|
||||
}
|
||||
|
||||
static panda_file::File::EntityId ClassCast(RuntimeInterface::ClassPtr cls)
|
||||
{
|
||||
return panda_file::File::EntityId(reinterpret_cast<uintptr_t>(cls));
|
||||
}
|
||||
|
||||
static panda_file::File::EntityId FieldCast(RuntimeInterface::FieldPtr field)
|
||||
{
|
||||
return panda_file::File::EntityId(reinterpret_cast<uintptr_t>(field));
|
||||
}
|
||||
|
||||
const panda_file::File &panda_file_;
|
||||
std::unordered_map<size_t, TypeInfoIndex> instid_type_map_;
|
||||
std::unordered_map<int32_t, TypeInfoIndex> pc_type_map_;
|
||||
|
@ -22,10 +22,6 @@
|
||||
#include "libpandabase/mem/mem.h"
|
||||
#include "utils/bit_utils.h"
|
||||
|
||||
namespace panda {
|
||||
class ObjectHeader;
|
||||
} // namespace panda
|
||||
|
||||
namespace panda::coretypes {
|
||||
|
||||
// Every double with all of its exponent bits set and its highest mantissa bit set is a quiet NaN.
|
||||
@ -51,8 +47,6 @@ namespace panda::coretypes {
|
||||
|
||||
using TaggedType = uint64_t;
|
||||
|
||||
static const TaggedType NULL_POINTER = 0;
|
||||
|
||||
inline TaggedType ReinterpretDoubleToTaggedType(double value)
|
||||
{
|
||||
return bit_cast<TaggedType>(value);
|
||||
@ -94,6 +88,7 @@ public:
|
||||
|
||||
TaggedValue(void *) = delete;
|
||||
|
||||
static const TaggedType NULL_POINTER = 0;
|
||||
constexpr TaggedValue() : value_(NULL_POINTER) {}
|
||||
|
||||
constexpr explicit TaggedValue(TaggedType v) : value_(v) {}
|
||||
@ -109,31 +104,6 @@ public:
|
||||
value_ = TaggedValue(static_cast<int32_t>(v)).GetRawData();
|
||||
}
|
||||
|
||||
static uint64_t GetIntTaggedValue(uint64_t v)
|
||||
{
|
||||
ASSERT(INT32_MIN <= static_cast<int32_t>(bit_cast<int64_t>(v)));
|
||||
ASSERT(static_cast<int32_t>(bit_cast<int64_t>(v)) <= INT32_MAX);
|
||||
return static_cast<uint32_t>(v) | TAG_INT;
|
||||
}
|
||||
|
||||
static uint64_t GetDoubleTaggedValue(uint64_t v)
|
||||
{
|
||||
return v + DOUBLE_ENCODE_OFFSET;
|
||||
}
|
||||
|
||||
static uint64_t GetBoolTaggedValue(uint64_t v)
|
||||
{
|
||||
ASSERT(v == 0 || v == 1);
|
||||
return (v == 0) ? static_cast<uint64_t>(coretypes::TaggedValue::False().GetRawData())
|
||||
: static_cast<uint64_t>(coretypes::TaggedValue::True().GetRawData());
|
||||
}
|
||||
|
||||
static uint64_t GetObjectTaggedValue(uint64_t v)
|
||||
{
|
||||
ASSERT(static_cast<uint32_t>(v) == v);
|
||||
return v;
|
||||
}
|
||||
|
||||
explicit TaggedValue(int64_t v)
|
||||
{
|
||||
if (UNLIKELY(static_cast<int32_t>(v) != v)) {
|
||||
@ -159,181 +129,21 @@ public:
|
||||
|
||||
explicit TaggedValue(const ObjectHeader *v) : value_(static_cast<TaggedType>(ToUintPtr(v))) {}
|
||||
|
||||
inline void CreateWeakRef()
|
||||
{
|
||||
ASSERT_PRINT(IsHeapObject() && ((value_ & TAG_WEAK_FILTER) == 0U),
|
||||
"The least significant two bits of TaggedValue are not zero.");
|
||||
value_ = value_ | TAG_WEAK_MASK;
|
||||
}
|
||||
|
||||
inline void RemoveWeakTag()
|
||||
{
|
||||
ASSERT_PRINT(IsHeapObject() && ((value_ & TAG_WEAK_MASK) == 1U), "The tagged value is not a weak ref.");
|
||||
value_ = value_ & (~TAG_WEAK_FILTER);
|
||||
}
|
||||
|
||||
inline TaggedValue CreateAndGetWeakRef()
|
||||
{
|
||||
ASSERT_PRINT(IsHeapObject() && ((value_ & TAG_WEAK_FILTER) == 0U),
|
||||
"The least significant two bits of TaggedValue are not zero.");
|
||||
return TaggedValue(value_ | TAG_WEAK_MASK);
|
||||
}
|
||||
|
||||
inline bool IsWeak() const
|
||||
{
|
||||
return IsHeapObject() && ((value_ & TAG_WEAK_MASK) == 1U);
|
||||
}
|
||||
|
||||
inline bool IsDouble() const
|
||||
{
|
||||
return !IsInt() && !IsObject();
|
||||
}
|
||||
|
||||
inline bool IsInt() const
|
||||
{
|
||||
return (value_ & TAG_MASK) == TAG_INT;
|
||||
}
|
||||
|
||||
inline bool IsSpecial() const
|
||||
{
|
||||
return ((value_ & (~TAG_SPECIAL_MASK)) == 0U) && (((value_ & TAG_SPECIAL_VALUE) != 0U) || IsHole());
|
||||
}
|
||||
|
||||
inline bool IsObject() const
|
||||
{
|
||||
return ((value_ & TAG_MASK) == TAG_OBJECT);
|
||||
}
|
||||
|
||||
inline bool IsHeapObject() const
|
||||
{
|
||||
return IsObject() && !IsSpecial();
|
||||
}
|
||||
|
||||
inline bool IsNumber() const
|
||||
{
|
||||
return !IsObject();
|
||||
}
|
||||
|
||||
inline bool IsBoolean() const
|
||||
{
|
||||
return value_ == VALUE_FALSE || value_ == VALUE_TRUE;
|
||||
}
|
||||
|
||||
inline double GetDouble() const
|
||||
{
|
||||
ASSERT_PRINT(IsDouble(), "can not convert TaggedValue to Double : " << std::hex << value_);
|
||||
return ReinterpretTaggedTypeToDouble(value_ - DOUBLE_ENCODE_OFFSET);
|
||||
}
|
||||
|
||||
inline int GetInt() const
|
||||
{
|
||||
ASSERT_PRINT(IsInt(), "can not convert TaggedValue to Int :" << std::hex << value_);
|
||||
return static_cast<int>(value_ & (~TAG_MASK));
|
||||
}
|
||||
|
||||
inline constexpr TaggedType GetRawData() const
|
||||
{
|
||||
return value_;
|
||||
}
|
||||
|
||||
inline ObjectHeader *GetHeapObject() const
|
||||
{
|
||||
ASSERT_PRINT(IsHeapObject(), "can not convert TaggedValue to HeapObject :" << std::hex << value_);
|
||||
// TODO(vpukhov): weakref ignored
|
||||
// ASSERT_PRINT((value_ & TAG_WEAK_FILTER) == 0U,
|
||||
// "can not convert TaggedValue to HeapObject :" << std::hex << value_);
|
||||
return reinterpret_cast<ObjectHeader *>(value_ & (~TAG_WEAK_MASK));
|
||||
}
|
||||
|
||||
// This function returns the heap object pointer which may have the weak tag.
|
||||
inline ObjectHeader *GetRawHeapObject() const
|
||||
{
|
||||
ASSERT_PRINT(IsHeapObject(), "can not convert TaggedValue to HeapObject :" << std::hex << value_);
|
||||
return reinterpret_cast<ObjectHeader *>(value_);
|
||||
}
|
||||
|
||||
inline ObjectHeader *GetWeakReferent() const
|
||||
{
|
||||
ASSERT_PRINT(IsWeak(), "can not convert TaggedValue to WeakRef HeapObject :" << std::hex << value_);
|
||||
return reinterpret_cast<ObjectHeader *>(value_ & (~TAG_WEAK_MASK));
|
||||
}
|
||||
|
||||
static inline TaggedType Cast(void *ptr)
|
||||
{
|
||||
ASSERT_PRINT(sizeof(void *) == TaggedTypeSize(), "32bit platform is not support yet");
|
||||
return static_cast<TaggedType>(ToUintPtr(ptr));
|
||||
}
|
||||
|
||||
inline bool IsFalse() const
|
||||
{
|
||||
return value_ == VALUE_FALSE;
|
||||
}
|
||||
|
||||
inline bool IsTrue() const
|
||||
{
|
||||
return value_ == VALUE_TRUE;
|
||||
}
|
||||
|
||||
inline bool IsUndefined() const
|
||||
{
|
||||
return value_ == VALUE_UNDEFINED;
|
||||
}
|
||||
|
||||
inline bool IsNull() const
|
||||
{
|
||||
return value_ == VALUE_NULL;
|
||||
}
|
||||
|
||||
inline bool IsUndefinedOrNull() const
|
||||
{
|
||||
return IsNull() || IsUndefined();
|
||||
}
|
||||
|
||||
inline bool IsHole() const
|
||||
{
|
||||
return value_ == VALUE_HOLE;
|
||||
}
|
||||
|
||||
inline bool IsException() const
|
||||
{
|
||||
return value_ == VALUE_EXCEPTION;
|
||||
}
|
||||
|
||||
static inline constexpr TaggedValue False()
|
||||
{
|
||||
return TaggedValue(VALUE_FALSE);
|
||||
}
|
||||
|
||||
static inline constexpr TaggedValue True()
|
||||
{
|
||||
return TaggedValue(VALUE_TRUE);
|
||||
}
|
||||
|
||||
static inline constexpr TaggedValue Undefined()
|
||||
{
|
||||
return TaggedValue(VALUE_UNDEFINED);
|
||||
}
|
||||
|
||||
static inline constexpr TaggedValue Null()
|
||||
{
|
||||
return TaggedValue(VALUE_NULL);
|
||||
}
|
||||
|
||||
static inline constexpr TaggedValue Hole()
|
||||
{
|
||||
return TaggedValue(VALUE_HOLE);
|
||||
}
|
||||
|
||||
static inline constexpr TaggedValue Exception()
|
||||
{
|
||||
return TaggedValue(VALUE_EXCEPTION);
|
||||
}
|
||||
|
||||
static inline constexpr size_t TaggedTypeSize()
|
||||
{
|
||||
return sizeof(TaggedType);
|
||||
}
|
||||
|
||||
static inline bool IsImpureNaN(double value)
|
||||
{
|
||||
// Tests if the double value would break tagged double encoding.
|
@ -50,11 +50,6 @@ void RegEncoder::VisitIfImm([[maybe_unused]] GraphVisitor* v, Inst* inst_base) {
|
||||
re->success_ = false;
|
||||
}
|
||||
}
|
||||
void RegEncoder::VisitCast([[maybe_unused]] GraphVisitor* v, Inst* inst_base) {
|
||||
[[maybe_unused]] auto re = static_cast<RegEncoder*>(v);
|
||||
[[maybe_unused]] auto inst = inst_base->CastToCast();
|
||||
return;
|
||||
}
|
||||
% call_me_from_template
|
||||
|
||||
% visitors.each do |visitor|
|
||||
|
@ -19,7 +19,6 @@ ohos_executable("bcopt_type_adapter_unit_test") {
|
||||
configs = [
|
||||
"$ark_root:ark_config",
|
||||
"$ark_root/assembler:arkassembler_public_config",
|
||||
"$ark_root/runtime:arkruntime_public_config",
|
||||
"$ark_root/libpandabase:arkbase_public_config",
|
||||
"$ark_root/libpandafile:arkfile_public_config",
|
||||
"$ark_root/compiler:arkcompiler_public_config",
|
||||
|
@ -1,122 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <gtest/gtest.h>
|
||||
|
||||
#include "assembler/assembly-emitter.h"
|
||||
#include "assembler/assembly-parser.h"
|
||||
#include "bytecode_optimizer/optimize_bytecode.h"
|
||||
#include "runtime/include/class_linker.h"
|
||||
#include "runtime/include/runtime.h"
|
||||
#include "mangling.h"
|
||||
|
||||
namespace panda::bytecodeopt::test {
|
||||
|
||||
class BytecodeOptPeepholes : public testing::Test {
|
||||
public:
|
||||
BytecodeOptPeepholes()
|
||||
{
|
||||
RuntimeOptions options;
|
||||
options.SetHeapSizeLimit(128_MB);
|
||||
options.SetShouldLoadBootPandaFiles(false);
|
||||
options.SetShouldInitializeIntrinsics(false);
|
||||
Logger::InitializeDummyLogging();
|
||||
|
||||
Runtime::Create(options);
|
||||
thread_ = panda::MTManagedThread::GetCurrent();
|
||||
thread_->ManagedCodeBegin();
|
||||
}
|
||||
|
||||
~BytecodeOptPeepholes()
|
||||
{
|
||||
thread_->ManagedCodeEnd();
|
||||
Runtime::Destroy();
|
||||
}
|
||||
|
||||
protected:
|
||||
panda::MTManagedThread *thread_;
|
||||
};
|
||||
|
||||
TEST_F(BytecodeOptPeepholes, TryBlock)
|
||||
{
|
||||
pandasm::Parser p;
|
||||
|
||||
auto source = R"(
|
||||
.record E {}
|
||||
.record R {
|
||||
u1 field
|
||||
}
|
||||
|
||||
.function void R.ctor(R a0) <ctor> {
|
||||
newobj v0, E
|
||||
throw v0
|
||||
}
|
||||
|
||||
.function u8 try_catch() {
|
||||
try_begin:
|
||||
movi v1, 0x1
|
||||
newobj v0, R
|
||||
movi v1, 0x2
|
||||
call.short R.ctor, v0
|
||||
try_end:
|
||||
ldai 0x0
|
||||
return
|
||||
catch_all:
|
||||
lda v1
|
||||
return
|
||||
.catchall try_begin, try_end, catch_all
|
||||
}
|
||||
)";
|
||||
|
||||
auto res = p.Parse(source);
|
||||
auto &program = res.Value();
|
||||
pandasm::AsmEmitter::PandaFileToPandaAsmMaps maps;
|
||||
std::string file_name = "bc_peepholes";
|
||||
auto piece = pandasm::AsmEmitter::Emit(file_name, program, nullptr, &maps);
|
||||
ASSERT_NE(piece, false);
|
||||
|
||||
EXPECT_TRUE(OptimizeBytecode(&program, &maps, file_name, false, true));
|
||||
|
||||
// Check if there is initobj instruction in the bytecode
|
||||
bool contains_initobj = false;
|
||||
const auto sig_try_catch = pandasm::GetFunctionSignatureFromName("try_catch", {});
|
||||
for (const auto &inst : program.function_table.at(sig_try_catch).ins) {
|
||||
if (inst.opcode == pandasm::Opcode::INITOBJ) {
|
||||
contains_initobj = true;
|
||||
}
|
||||
}
|
||||
EXPECT_FALSE(contains_initobj);
|
||||
|
||||
auto pf = pandasm::AsmEmitter::Emit(program);
|
||||
ASSERT_NE(pf, nullptr);
|
||||
|
||||
ClassLinker *class_linker = Runtime::GetCurrent()->GetClassLinker();
|
||||
class_linker->AddPandaFile(std::move(pf));
|
||||
auto *extension = class_linker->GetExtension(panda_file::SourceLang::PANDA_ASSEMBLY);
|
||||
PandaString descriptor;
|
||||
|
||||
auto *klass = extension->GetClass(ClassHelper::GetDescriptor(utf::CStringAsMutf8("_GLOBAL"), &descriptor));
|
||||
ASSERT_NE(klass, nullptr);
|
||||
|
||||
Method *method = klass->GetDirectMethod(utf::CStringAsMutf8("try_catch"));
|
||||
ASSERT_NE(method, nullptr);
|
||||
|
||||
std::vector<Value> args;
|
||||
args.emplace_back(Value(1, interpreter::TypeTag::INT));
|
||||
Value v = method->Invoke(ManagedThread::GetCurrent(), args.data());
|
||||
EXPECT_EQ(v.GetAsLong(), 0x2);
|
||||
}
|
||||
|
||||
} // namespace panda::bytecodeopt::test
|
@ -1,230 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "bytecodeopt_peepholes.h"
|
||||
#include "common.h"
|
||||
#include "compiler/optimizer/optimizations/cleanup.h"
|
||||
|
||||
namespace panda::bytecodeopt::test {
|
||||
|
||||
TEST_F(IrBuilderTest, PeepholesTryBlockInstBetween)
|
||||
{
|
||||
auto source = R"(
|
||||
.record E {}
|
||||
.record R {
|
||||
u1 field
|
||||
}
|
||||
|
||||
.function void R.ctor(R a0) <ctor> {
|
||||
newobj v0, E
|
||||
throw v0
|
||||
}
|
||||
|
||||
.function u8 main() {
|
||||
try_begin:
|
||||
movi v1, 0x1
|
||||
newobj v0, R
|
||||
movi v1, 0x2
|
||||
call.short R.ctor, v0
|
||||
try_end:
|
||||
ldai 0x0
|
||||
return
|
||||
catch_all:
|
||||
lda v1
|
||||
return
|
||||
.catchall try_begin, try_end, catch_all
|
||||
}
|
||||
)";
|
||||
ASSERT_TRUE(ParseToGraph(source, "main"));
|
||||
|
||||
EXPECT_FALSE(GetGraph()->RunPass<BytecodeOptPeepholes>());
|
||||
}
|
||||
|
||||
TEST_F(IrBuilderTest, PeepholesTryBlockNoInstBetween)
|
||||
{
|
||||
auto source = R"(
|
||||
.record E {}
|
||||
.record R {
|
||||
u1 field
|
||||
}
|
||||
|
||||
.function void R.ctor(R a0) <ctor> {
|
||||
newobj v0, E
|
||||
throw v0
|
||||
}
|
||||
|
||||
.function u8 main() {
|
||||
try_begin:
|
||||
movi v1, 0x1
|
||||
newobj v0, R
|
||||
call.short R.ctor, v0
|
||||
try_end:
|
||||
ldai 0x0
|
||||
return
|
||||
catch_all:
|
||||
lda v1
|
||||
return
|
||||
.catchall try_begin, try_end, catch_all
|
||||
}
|
||||
)";
|
||||
ASSERT_TRUE(ParseToGraph(source, "main"));
|
||||
|
||||
EXPECT_TRUE(GetGraph()->RunPass<BytecodeOptPeepholes>());
|
||||
}
|
||||
|
||||
// TODO(aromanov): enable
|
||||
TEST_F(CommonTest, DISABLED_NoNullCheck)
|
||||
{
|
||||
RuntimeInterfaceMock runtime(0);
|
||||
auto graph = CreateEmptyGraph();
|
||||
graph->SetRuntime(&runtime);
|
||||
GRAPH(graph)
|
||||
{
|
||||
using namespace compiler::DataType;
|
||||
CONSTANT(6, 0);
|
||||
BASIC_BLOCK(2, -1)
|
||||
{
|
||||
INST(0, Opcode::SaveState).NoVregs();
|
||||
INST(1, Opcode::LoadAndInitClass).ref().Inputs(0).TypeId(68);
|
||||
INST(2, Opcode::NewObject).ref().Inputs(1, 0).TypeId(68);
|
||||
INST(3, Opcode::SaveState).NoVregs();
|
||||
INST(5, Opcode::CallStatic).v0id().Inputs({{REFERENCE, 2}, {NO_TYPE, 3}});
|
||||
INST(7, Opcode::Return).s32().Inputs(6);
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_TRUE(graph->RunPass<BytecodeOptPeepholes>());
|
||||
EXPECT_TRUE(graph->RunPass<compiler::Cleanup>());
|
||||
|
||||
auto after = CreateEmptyGraph();
|
||||
GRAPH(after)
|
||||
{
|
||||
using namespace compiler::DataType;
|
||||
CONSTANT(6, 0);
|
||||
BASIC_BLOCK(2, -1)
|
||||
{
|
||||
INST(0, Opcode::SaveState).NoVregs();
|
||||
INST(1, Opcode::LoadAndInitClass).ref().Inputs(0).TypeId(68);
|
||||
INST(3, Opcode::SaveState).NoVregs();
|
||||
INST(8, Opcode::InitObject).ref().Inputs({{REFERENCE, 1}, {NO_TYPE, 3}});
|
||||
INST(7, Opcode::Return).s32().Inputs(6);
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_TRUE(GraphComparator().Compare(graph, after));
|
||||
}
|
||||
|
||||
// TODO(aromanov): enable
|
||||
TEST_F(CommonTest, DISABLED_NotRelatedNullCheck)
|
||||
{
|
||||
RuntimeInterfaceMock runtime(1);
|
||||
auto graph = CreateEmptyGraph();
|
||||
graph->SetRuntime(&runtime);
|
||||
GRAPH(graph)
|
||||
{
|
||||
using namespace compiler::DataType;
|
||||
PARAMETER(10, 0).ref();
|
||||
CONSTANT(6, 0);
|
||||
BASIC_BLOCK(2, -1)
|
||||
{
|
||||
INST(0, Opcode::SaveState).NoVregs();
|
||||
INST(1, Opcode::LoadAndInitClass).ref().Inputs(0);
|
||||
INST(2, Opcode::NewObject).ref().Inputs(1, 0);
|
||||
INST(3, Opcode::SaveState).NoVregs();
|
||||
INST(4, Opcode::NullCheck).ref().Inputs(10, 3);
|
||||
INST(5, Opcode::CallStatic).v0id().Inputs({{REFERENCE, 2}, {NO_TYPE, 3}});
|
||||
INST(7, Opcode::Return).s32().Inputs(6);
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_FALSE(graph->RunPass<BytecodeOptPeepholes>());
|
||||
}
|
||||
|
||||
TEST_F(CommonTest, CallStaticOtherBasicBlock)
|
||||
{
|
||||
RuntimeInterfaceMock runtime(1);
|
||||
auto graph = CreateEmptyGraph();
|
||||
graph->SetRuntime(&runtime);
|
||||
GRAPH(graph)
|
||||
{
|
||||
using namespace compiler::DataType;
|
||||
PARAMETER(10, 0).ref();
|
||||
CONSTANT(6, 0);
|
||||
BASIC_BLOCK(2, 3)
|
||||
{
|
||||
INST(0, Opcode::SaveState).NoVregs();
|
||||
INST(1, Opcode::LoadAndInitClass).ref().Inputs(0);
|
||||
INST(2, Opcode::NewObject).ref().Inputs(1, 0);
|
||||
INST(3, Opcode::SaveState).NoVregs();
|
||||
}
|
||||
BASIC_BLOCK(3, -1)
|
||||
{
|
||||
INST(5, Opcode::CallStatic).v0id().Inputs({{REFERENCE, 2}, {NO_TYPE, 3}});
|
||||
INST(7, Opcode::Return).s32().Inputs(6);
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_FALSE(graph->RunPass<BytecodeOptPeepholes>());
|
||||
}
|
||||
|
||||
// TODO(aromanov): enable
|
||||
TEST_F(CommonTest, DISABLED_NoSaveStateNullCheckAfterNewObject)
|
||||
{
|
||||
RuntimeInterfaceMock runtime(0);
|
||||
auto graph = CreateEmptyGraph();
|
||||
graph->SetRuntime(&runtime);
|
||||
GRAPH(graph)
|
||||
{
|
||||
using namespace compiler::DataType;
|
||||
BASIC_BLOCK(2, -1)
|
||||
{
|
||||
INST(0, Opcode::SaveState).NoVregs();
|
||||
INST(1, Opcode::LoadAndInitClass).ref().Inputs(0);
|
||||
INST(2, Opcode::NewObject).ref().Inputs(1, 0);
|
||||
CONSTANT(3, 0).s32();
|
||||
INST(4, Opcode::SaveState).NoVregs();
|
||||
INST(5, Opcode::CallStatic).v0id().Inputs({{REFERENCE, 2}, {NO_TYPE, 4}});
|
||||
INST(6, Opcode::ReturnVoid).v0id();
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_FALSE(graph->RunPass<BytecodeOptPeepholes>());
|
||||
}
|
||||
|
||||
TEST_F(CommonTest, CallConstructorOtherClass)
|
||||
{
|
||||
RuntimeInterfaceMock runtime(1, false);
|
||||
auto graph = CreateEmptyGraph();
|
||||
graph->SetRuntime(&runtime);
|
||||
GRAPH(graph)
|
||||
{
|
||||
using namespace compiler::DataType;
|
||||
PARAMETER(10, 0).ref();
|
||||
CONSTANT(6, 0);
|
||||
BASIC_BLOCK(2, -1)
|
||||
{
|
||||
INST(0, Opcode::SaveState).NoVregs();
|
||||
INST(1, Opcode::LoadAndInitClass).ref().Inputs(0);
|
||||
INST(2, Opcode::NewObject).ref().Inputs(1, 0);
|
||||
INST(3, Opcode::SaveState).NoVregs();
|
||||
INST(5, Opcode::CallStatic).v0id().Inputs({{REFERENCE, 2}, {NO_TYPE, 3}});
|
||||
INST(7, Opcode::Return).s32().Inputs(6);
|
||||
}
|
||||
}
|
||||
|
||||
EXPECT_FALSE(graph->RunPass<BytecodeOptPeepholes>());
|
||||
}
|
||||
|
||||
} // namespace panda::bytecodeopt::test
|
@ -18,10 +18,6 @@ config("arkcompiler_public_config") {
|
||||
"$ark_root/compiler/code_info",
|
||||
"$ark_root/compiler/optimizer/ir",
|
||||
"$target_gen_dir/generated",
|
||||
get_label_info("$ark_root/cross_values:cross_values_getters_generate",
|
||||
"target_gen_dir"),
|
||||
get_label_info("$ark_root/runtime:plugins_defines_h", "target_gen_dir") +
|
||||
"/asm_defines/generated",
|
||||
]
|
||||
|
||||
include_dirs += platform_include_dirs
|
||||
@ -50,20 +46,11 @@ config("arkcompiler_public_config") {
|
||||
libarkcompiler_sources = [
|
||||
"compiler_logger.cpp",
|
||||
"compiler_options.cpp",
|
||||
"optimizer/analysis/alias_analysis.cpp",
|
||||
"optimizer/analysis/bounds_analysis.cpp",
|
||||
"optimizer/analysis/countable_loop_parser.cpp",
|
||||
"optimizer/analysis/dominators_tree.cpp",
|
||||
"optimizer/analysis/linear_order.cpp",
|
||||
"optimizer/analysis/live_registers.cpp",
|
||||
"optimizer/analysis/liveness_analyzer.cpp",
|
||||
"optimizer/analysis/liveness_use_table.cpp",
|
||||
"optimizer/analysis/loop_analyzer.cpp",
|
||||
"optimizer/analysis/object_type_propagation.cpp",
|
||||
"optimizer/analysis/rpo.cpp",
|
||||
"optimizer/analysis/types_analysis.cpp",
|
||||
"optimizer/ir/analysis.cpp",
|
||||
"optimizer/ir/aot_data.cpp",
|
||||
"optimizer/ir/basicblock.cpp",
|
||||
"optimizer/ir/dump.cpp",
|
||||
"optimizer/ir/graph.cpp",
|
||||
@ -73,20 +60,9 @@ libarkcompiler_sources = [
|
||||
"optimizer/ir/locations.cpp",
|
||||
"optimizer/ir_builder/inst_builder.cpp",
|
||||
"optimizer/ir_builder/ir_builder.cpp",
|
||||
"optimizer/optimizations/adjust_arefs.cpp",
|
||||
"optimizer/optimizations/balance_expressions.cpp",
|
||||
"optimizer/optimizations/cleanup.cpp",
|
||||
"optimizer/optimizations/code_sink.cpp",
|
||||
"optimizer/optimizations/deoptimize_elimination.cpp",
|
||||
"optimizer/optimizations/if_conversion.cpp",
|
||||
"optimizer/optimizations/loop_peeling.cpp",
|
||||
"optimizer/optimizations/loop_unroll.cpp",
|
||||
"optimizer/optimizations/lowering.cpp",
|
||||
"optimizer/optimizations/memory_barriers.cpp",
|
||||
"optimizer/optimizations/memory_coalescing.cpp",
|
||||
"optimizer/optimizations/move_constants.cpp",
|
||||
"optimizer/optimizations/object_type_check_elimination.cpp",
|
||||
"optimizer/optimizations/redundant_loop_elimination.cpp",
|
||||
"optimizer/optimizations/regalloc/interference_graph.cpp",
|
||||
"optimizer/optimizations/regalloc/reg_alloc.cpp",
|
||||
"optimizer/optimizations/regalloc/reg_alloc_base.cpp",
|
||||
@ -96,44 +72,12 @@ libarkcompiler_sources = [
|
||||
"optimizer/optimizations/regalloc/reg_map.cpp",
|
||||
"optimizer/optimizations/regalloc/spill_fills_resolver.cpp",
|
||||
"optimizer/optimizations/regalloc/split_resolver.cpp",
|
||||
"optimizer/optimizations/scheduler.cpp",
|
||||
"optimizer/optimizations/try_catch_resolving.cpp",
|
||||
"optimizer/optimizations/types_resolving.cpp",
|
||||
"optimizer/optimizations/vn.cpp",
|
||||
"optimizer/pass.cpp",
|
||||
"optimizer/pass_manager.cpp",
|
||||
"optimizer/pass_manager_statistics.cpp",
|
||||
]
|
||||
|
||||
plugin_irts = []
|
||||
foreach(plugin, enabled_plugins) {
|
||||
print("add compiler plugin: $plugin")
|
||||
plugin_dir = "$ark_root/plugins/$plugin"
|
||||
|
||||
source_files = read_file("$plugin_dir/subproject_sources.gn", "scope")
|
||||
if (defined(source_files.srcs_compiler_path)) {
|
||||
source_file = "$plugin_dir/${source_files.srcs_compiler_path}"
|
||||
src_scope = read_file(source_file, "scope")
|
||||
if (defined(src_scope.srcs)) {
|
||||
foreach(src, src_scope.srcs) {
|
||||
libarkcompiler_sources += [ "$plugin_dir/compiler/$src" ]
|
||||
}
|
||||
}
|
||||
|
||||
if (defined(source_files.irtoc_plugins)) {
|
||||
scripts = read_file("$plugin_dir/${source_files.irtoc_plugins}", "scope")
|
||||
foreach(script, scripts.srcs) {
|
||||
plugin_irts +=
|
||||
[ rebase_path("$plugin_dir/irtoc_scripts/$script", ark_root) ]
|
||||
}
|
||||
}
|
||||
|
||||
src_scope = {
|
||||
}
|
||||
}
|
||||
source_files = []
|
||||
}
|
||||
|
||||
libarkcompiler_sources += [ "$target_gen_dir/generated/inst_builder_gen.cpp" ]
|
||||
|
||||
libarkcompiler_configs = [
|
||||
@ -142,7 +86,6 @@ libarkcompiler_configs = [
|
||||
"$ark_root:ark_config",
|
||||
"$ark_root/libpandabase:arkbase_public_config",
|
||||
"$ark_root/libpandafile:arkfile_public_config",
|
||||
"$ark_root/runtime:arkruntime_public_config",
|
||||
]
|
||||
|
||||
ohos_shared_library("libarkcompiler") {
|
||||
@ -157,7 +100,6 @@ ohos_shared_library("libarkcompiler") {
|
||||
":compiler_logger_components_inc",
|
||||
":compiler_options_gen_h",
|
||||
":cpu_features_gen_inc",
|
||||
":intrinsics_can_encode_inl",
|
||||
":intrinsics_codegen_ext_inl_h",
|
||||
":intrinsics_inline_inl",
|
||||
":intrinsics_ir_build_inl_h",
|
||||
@ -165,14 +107,10 @@ ohos_shared_library("libarkcompiler") {
|
||||
":intrinsics_ir_build_virtual_call_inl",
|
||||
":intrinsics_stub_inl",
|
||||
":intrinsics_stub_inl_h",
|
||||
":intrinsics_types_resolving_inl_h",
|
||||
":ir_dyn_base_types_h",
|
||||
":irtoc_builder_cpp",
|
||||
":irtoc_generate_ir_inline",
|
||||
":isa_gen_libarkcompiler_inst_builder_gen_cpp",
|
||||
":libarkcompiler_ecma_intrinsics_enum_inl",
|
||||
":libarkcompiler_generate_ecma_inl",
|
||||
":libarkcompiler_intrinsics_gen_inl_can_encode_builtin_inl",
|
||||
":libarkcompiler_intrinsics_gen_inl_generate_operations_intrinsic_graph_inl",
|
||||
":libarkcompiler_intrinsics_gen_inl_generate_operations_intrinsic_inst_inl",
|
||||
":libarkcompiler_intrinsics_gen_inl_get_intrinsics_inl",
|
||||
@ -187,26 +125,14 @@ ohos_shared_library("libarkcompiler") {
|
||||
":libarkcompiler_opcodes_h_IR-instructions_md",
|
||||
":libarkcompiler_opcodes_h_arch_info_gen_h",
|
||||
":libarkcompiler_opcodes_h_codegen_arm64_gen_inc",
|
||||
":libarkcompiler_opcodes_h_deoptimize_elimination_call_visitors_inl",
|
||||
":libarkcompiler_opcodes_h_inst_checker_gen_h",
|
||||
":libarkcompiler_opcodes_h_opcodes_h",
|
||||
":source_languages_h",
|
||||
"$ark_root/cross_values:cross_values_getters_generate(${default_toolchain})",
|
||||
"$ark_root/libpandabase:libarkbase",
|
||||
"$ark_root/libpandafile:isa_gen_libarkfile_bytecode_instruction-inl_gen_h",
|
||||
"$ark_root/libpandafile:isa_gen_libarkfile_bytecode_instruction_enum_gen_h",
|
||||
"$ark_root/libpandafile:libarkfile",
|
||||
"$ark_root/libpandafile:libarkfile_type_gen_h",
|
||||
"$ark_root/runtime:arkruntime_gen_entrypoints_compiler_checksum_entrypoints_compiler_checksum_inl",
|
||||
"$ark_root/runtime:arkruntime_gen_entrypoints_entrypoints_compiler_inl",
|
||||
"$ark_root/runtime:arkruntime_gen_entrypoints_entrypoints_gen_S",
|
||||
"$ark_root/runtime:arkruntime_gen_entrypoints_entrypoints_gen_h",
|
||||
"$ark_root/runtime:arkruntime_gen_intrinsics_intrinsics_enum_h",
|
||||
"$ark_root/runtime:arkruntime_gen_intrinsics_intrinsics_gen_h",
|
||||
"$ark_root/runtime:arkruntime_gen_intrinsics_intrinsics_h",
|
||||
"$ark_root/runtime:arkruntime_gen_intrinsics_unimplemented_intrinsics-inl_cpp",
|
||||
"$ark_root/runtime:plugins_entrypoints_gen_h",
|
||||
"$ark_root/runtime/asm_defines:asm_defines_generator",
|
||||
sdk_libc_secshared_dep,
|
||||
]
|
||||
|
||||
@ -230,7 +156,6 @@ ohos_static_library("libarkcompiler_frontend_static") {
|
||||
":compiler_logger_components_inc",
|
||||
":compiler_options_gen_h",
|
||||
":cpu_features_gen_inc",
|
||||
":intrinsics_can_encode_inl",
|
||||
":intrinsics_codegen_ext_inl_h",
|
||||
":intrinsics_inline_inl",
|
||||
":intrinsics_ir_build_inl_h",
|
||||
@ -238,14 +163,10 @@ ohos_static_library("libarkcompiler_frontend_static") {
|
||||
":intrinsics_ir_build_virtual_call_inl",
|
||||
":intrinsics_stub_inl",
|
||||
":intrinsics_stub_inl_h",
|
||||
":intrinsics_types_resolving_inl_h",
|
||||
":ir_dyn_base_types_h",
|
||||
":irtoc_builder_cpp",
|
||||
":irtoc_generate_ir_inline",
|
||||
":isa_gen_libarkcompiler_inst_builder_gen_cpp",
|
||||
":libarkcompiler_ecma_intrinsics_enum_inl",
|
||||
":libarkcompiler_generate_ecma_inl",
|
||||
":libarkcompiler_intrinsics_gen_inl_can_encode_builtin_inl",
|
||||
":libarkcompiler_intrinsics_gen_inl_generate_operations_intrinsic_graph_inl",
|
||||
":libarkcompiler_intrinsics_gen_inl_generate_operations_intrinsic_inst_inl",
|
||||
":libarkcompiler_intrinsics_gen_inl_get_intrinsics_inl",
|
||||
@ -260,26 +181,14 @@ ohos_static_library("libarkcompiler_frontend_static") {
|
||||
":libarkcompiler_opcodes_h_IR-instructions_md",
|
||||
":libarkcompiler_opcodes_h_arch_info_gen_h",
|
||||
":libarkcompiler_opcodes_h_codegen_arm64_gen_inc",
|
||||
":libarkcompiler_opcodes_h_deoptimize_elimination_call_visitors_inl",
|
||||
":libarkcompiler_opcodes_h_inst_checker_gen_h",
|
||||
":libarkcompiler_opcodes_h_opcodes_h",
|
||||
":source_languages_h",
|
||||
"$ark_root/cross_values:cross_values_getters_generate(${default_toolchain})",
|
||||
"$ark_root/libpandabase:libarkbase_frontend_static",
|
||||
"$ark_root/libpandafile:isa_gen_libarkfile_bytecode_instruction-inl_gen_h",
|
||||
"$ark_root/libpandafile:isa_gen_libarkfile_bytecode_instruction_enum_gen_h",
|
||||
"$ark_root/libpandafile:libarkfile_frontend_static",
|
||||
"$ark_root/libpandafile:libarkfile_type_gen_h",
|
||||
"$ark_root/runtime:arkruntime_gen_entrypoints_compiler_checksum_entrypoints_compiler_checksum_inl",
|
||||
"$ark_root/runtime:arkruntime_gen_entrypoints_entrypoints_compiler_inl",
|
||||
"$ark_root/runtime:arkruntime_gen_entrypoints_entrypoints_gen_S",
|
||||
"$ark_root/runtime:arkruntime_gen_entrypoints_entrypoints_gen_h",
|
||||
"$ark_root/runtime:arkruntime_gen_intrinsics_intrinsics_enum_h",
|
||||
"$ark_root/runtime:arkruntime_gen_intrinsics_intrinsics_gen_h",
|
||||
"$ark_root/runtime:arkruntime_gen_intrinsics_intrinsics_h",
|
||||
"$ark_root/runtime:arkruntime_gen_intrinsics_unimplemented_intrinsics-inl_cpp",
|
||||
"$ark_root/runtime:plugins_entrypoints_gen_h",
|
||||
"$ark_root/runtime/asm_defines:asm_defines_generator",
|
||||
sdk_libc_secshared_dep,
|
||||
]
|
||||
|
||||
@ -293,7 +202,7 @@ ark_isa_gen("isa_gen_libarkcompiler") {
|
||||
}
|
||||
|
||||
ark_gen("libarkcompiler_intrinsics_gen_inl") {
|
||||
data = "$target_gen_dir/../runtime/intrinsics.yaml"
|
||||
data = "intrinsics.yaml"
|
||||
template_files = [
|
||||
"intrinsics_enum.inl.erb",
|
||||
"get_intrinsics.inl.erb",
|
||||
@ -304,7 +213,6 @@ ark_gen("libarkcompiler_intrinsics_gen_inl") {
|
||||
"generate_operations_intrinsic_graph.inl.erb",
|
||||
"intrinsic_codegen_test.inl.erb",
|
||||
"intrinsic_flags_test.inl.erb",
|
||||
"can_encode_builtin.inl.erb",
|
||||
"intrinsics_codegen.inl.h.erb",
|
||||
"intrinsics_codegen.inl.erb",
|
||||
"intrinsics_flags.inl.erb",
|
||||
@ -314,7 +222,6 @@ ark_gen("libarkcompiler_intrinsics_gen_inl") {
|
||||
requires = [
|
||||
"$ark_root/compiler/optimizer/templates/intrinsics/compiler_intrinsics.rb",
|
||||
]
|
||||
extra_dependencies = [ "$ark_root/runtime:arkruntime_gen_intrinsics_yaml" ]
|
||||
}
|
||||
|
||||
ark_isa_gen("libarkcompiler") {
|
||||
@ -335,7 +242,6 @@ ark_gen("libarkcompiler_opcodes_h") {
|
||||
"inst_checker_gen.h.erb",
|
||||
"IR-instructions.md.erb",
|
||||
"codegen_arm64_gen.inc.erb",
|
||||
"deoptimize_elimination_call_visitors.inl.erb",
|
||||
]
|
||||
sources = "optimizer/templates"
|
||||
destination = "$target_gen_dir/generated"
|
||||
@ -399,14 +305,6 @@ ark_gen_file("intrinsics_inline_inl") {
|
||||
requires = [ "$ark_root/templates/plugin_options.rb" ]
|
||||
output_file = "$target_gen_dir/generated/intrinsics_inline.inl"
|
||||
}
|
||||
ark_gen_file("intrinsics_types_resolving_inl_h") {
|
||||
extra_dependencies = [ "$ark_root:concat_plugins_yamls" ]
|
||||
template_file =
|
||||
"optimizer/templates/intrinsics/intrinsics_types_resolving.inl.h.erb"
|
||||
data_file = "$target_gen_dir/../plugin_options.yaml"
|
||||
requires = [ "$ark_root/templates/plugin_options.rb" ]
|
||||
output_file = "$target_gen_dir/generated/intrinsics_types_resolving.inl.h"
|
||||
}
|
||||
ark_gen_file("intrinsics_ir_build_inl_h") {
|
||||
extra_dependencies = [ "$ark_root:concat_plugins_yamls" ]
|
||||
template_file = "optimizer/templates/intrinsics/intrinsics_ir_build.inl.h.erb"
|
||||
@ -430,13 +328,6 @@ ark_gen_file("intrinsics_ir_build_virtual_call_inl") {
|
||||
requires = [ "$ark_root/templates/plugin_options.rb" ]
|
||||
output_file = "$target_gen_dir/generated/intrinsics_ir_build_virtual_call.inl"
|
||||
}
|
||||
ark_gen_file("intrinsics_can_encode_inl") {
|
||||
extra_dependencies = [ "$ark_root:concat_plugins_yamls" ]
|
||||
template_file = "optimizer/templates/intrinsics/intrinsics_can_encode.inl.erb"
|
||||
data_file = "$target_gen_dir/../plugin_options.yaml"
|
||||
requires = [ "$ark_root/templates/plugin_options.rb" ]
|
||||
output_file = "$target_gen_dir/generated/intrinsics_can_encode.inl"
|
||||
}
|
||||
|
||||
ark_gen_file("ir_dyn_base_types_h") {
|
||||
extra_dependencies = [ "$ark_root:concat_plugins_yamls" ]
|
||||
@ -469,64 +360,3 @@ ark_gen_file("compiler_interface_extensions_inl_h") {
|
||||
requires = [ "$ark_root/templates/plugin_options.rb" ]
|
||||
output_file = "$target_gen_dir/generated/compiler_interface_extensions.inl.h"
|
||||
}
|
||||
|
||||
irtoc_file_plugin = "$root_gen_dir/generated/irtoc_plugin_builder.txt"
|
||||
write_file(irtoc_file_plugin, plugin_irts)
|
||||
|
||||
action("irtoc_builder_cpp") {
|
||||
script = "$ark_root/irtoc/lang/irtoc.rb"
|
||||
outputs = [ "$target_gen_dir/generated/irtoc_builder.cpp" ]
|
||||
args = [
|
||||
"--input",
|
||||
rebase_path("$ark_root/irtoc/scripts/interpreter.irt", root_build_dir),
|
||||
"--output",
|
||||
rebase_path("$target_gen_dir/generated/irtoc_builder.cpp", root_build_dir),
|
||||
"--ark_source_dir",
|
||||
rebase_path("$ark_root", root_build_dir),
|
||||
"--isa",
|
||||
rebase_path("$root_gen_dir/isa/isa.yaml", root_build_dir),
|
||||
"--definitions",
|
||||
"NDEBUG",
|
||||
"--arch",
|
||||
target_cpu,
|
||||
"--ir-api",
|
||||
"ir-builder",
|
||||
"--plugins",
|
||||
rebase_path(irtoc_file_plugin, root_build_dir),
|
||||
]
|
||||
|
||||
deps = [
|
||||
"$ark_root/isa:isa_combine",
|
||||
"$ark_root/runtime:plugins_asm_defines_def",
|
||||
"$ark_root/runtime:plugins_defines_h",
|
||||
]
|
||||
}
|
||||
|
||||
action("irtoc_generate_ir_inline") {
|
||||
script = "$ark_root/irtoc/lang/irtoc.rb"
|
||||
outputs = [ "$target_gen_dir/generated/irtoc_ir_inline.h" ]
|
||||
args = [
|
||||
"--input",
|
||||
rebase_path("$ark_root/irtoc/scripts/interpreter.irt", root_build_dir),
|
||||
"--output",
|
||||
rebase_path("$target_gen_dir/generated/irtoc_ir_inline.h", root_build_dir),
|
||||
"--ark_source_dir",
|
||||
rebase_path("$ark_root", root_build_dir),
|
||||
"--isa",
|
||||
rebase_path("$root_gen_dir/isa/isa.yaml", root_build_dir),
|
||||
"--definitions",
|
||||
"NDEBUG",
|
||||
"--arch",
|
||||
target_cpu,
|
||||
"--ir-api",
|
||||
"ir-inline",
|
||||
"--plugins",
|
||||
rebase_path(irtoc_file_plugin, root_build_dir),
|
||||
]
|
||||
|
||||
deps = [
|
||||
"$ark_root/isa:isa_combine",
|
||||
"$ark_root/runtime:plugins_asm_defines_def",
|
||||
"$ark_root/runtime:plugins_defines_h",
|
||||
]
|
||||
}
|
||||
|
@ -1,643 +0,0 @@
|
||||
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
project(compiler)
|
||||
|
||||
include(cmake/coverage.cmake)
|
||||
|
||||
if(PANDA_COMPILER_ENABLE)
|
||||
include(cmake/benchmark_coverage.cmake)
|
||||
include(cmake/ir_builder_coverage.cmake)
|
||||
endif()
|
||||
|
||||
include(cmake/target.cmake)
|
||||
|
||||
include_directories(
|
||||
.
|
||||
${PANDA_ROOT}
|
||||
)
|
||||
|
||||
include_directories(AFTER ${PANDA_BINARY_ROOT}/cross_values)
|
||||
|
||||
set(GENERATED_DIR ${CMAKE_CURRENT_BINARY_DIR}/generated)
|
||||
file(MAKE_DIRECTORY ${GENERATED_DIR})
|
||||
|
||||
set(IRTOC_BUILDER_GEN ${GENERATED_DIR}/irtoc_builder.cpp)
|
||||
irtoc_generate(
|
||||
TARGET irtoc_generate_inst_builder
|
||||
IR_API ir-builder
|
||||
INPUT_FILES ${IRTOC_SOURCE_DIR}/scripts/interpreter.irt
|
||||
OUTPUT_FILE ${IRTOC_BUILDER_GEN}
|
||||
WORKING_DIRECTORY ${IRTOC_BUILD_DIR}/inst_builder
|
||||
)
|
||||
|
||||
add_dependencies(irtoc_generate_inst_builder asm_defines)
|
||||
|
||||
set(IRTOC_IR_INLINE_GEN ${GENERATED_DIR}/irtoc_ir_inline.h)
|
||||
irtoc_generate(
|
||||
TARGET irtoc_generate_ir_inline
|
||||
IR_API ir-inline
|
||||
INPUT_FILES ${IRTOC_SOURCE_DIR}/scripts/interpreter.irt
|
||||
OUTPUT_FILE ${IRTOC_IR_INLINE_GEN}
|
||||
WORKING_DIRECTORY ${IRTOC_BUILD_DIR}/ir_inline
|
||||
)
|
||||
add_dependencies(irtoc_generate_ir_inline asm_defines)
|
||||
|
||||
set(COMPILER_TEMPLATES_DIR ${CMAKE_CURRENT_LIST_DIR}/optimizer/templates)
|
||||
|
||||
add_custom_target(inst_templates_gen)
|
||||
set_target_properties(inst_templates_gen PROPERTIES INST_TEMPLATES_YAML_FILES ${CMAKE_CURRENT_LIST_DIR}/optimizer/ir_builder/inst_templates.yaml)
|
||||
set(INST_TEMPLATES_GEN_YAML ${GENERATED_DIR}/inst_templates.yaml)
|
||||
set_target_properties(inst_templates_gen PROPERTIES INST_TEMPLATES_GEN_YAML ${INST_TEMPLATES_GEN_YAML})
|
||||
|
||||
function(add_inst_templates YAML_FILE_PATH)
|
||||
get_target_property(YAML_FILES inst_templates_gen INST_TEMPLATES_YAML_FILES)
|
||||
list(APPEND YAML_FILES ${YAML_FILE_PATH})
|
||||
set_target_properties(inst_templates_gen PROPERTIES INST_TEMPLATES_YAML_FILES "${YAML_FILES}")
|
||||
endfunction()
|
||||
|
||||
set(INST_BUILDER_GEN ${GENERATED_DIR}/inst_builder_gen.cpp)
|
||||
configure_file(${COMPILER_TEMPLATES_DIR}/inst_builder_gen.cpp.erb ${GENERATED_DIR}/inst_builder_gen.cpp.erb @ONLY)
|
||||
|
||||
panda_isa_gen(
|
||||
TEMPLATES
|
||||
"inst_builder_gen.cpp.erb"
|
||||
SOURCE ${GENERATED_DIR}
|
||||
REQUIRES ${PANDA_ROOT}/assembler/asm_isapi.rb
|
||||
DESTINATION ${GENERATED_DIR}
|
||||
EXTRA_DEPENDENCIES inst_templates_merge
|
||||
)
|
||||
|
||||
panda_gen(DATA ${PANDA_BINARY_ROOT}/runtime/intrinsics.yaml
|
||||
TEMPLATES
|
||||
intrinsics_enum.inl.erb
|
||||
get_intrinsics.inl.erb
|
||||
entrypoints_bridge_asm_macro.inl.erb
|
||||
intrinsics_ir_build.inl.erb
|
||||
intrinsics_flags.inl.erb
|
||||
get_intrinsics_names.inl.erb
|
||||
generate_operations_intrinsic_inst.inl.erb
|
||||
generate_operations_intrinsic_graph.inl.erb
|
||||
intrinsic_codegen_test.inl.erb
|
||||
intrinsic_flags_test.inl.erb
|
||||
can_encode_builtin.inl.erb
|
||||
intrinsics_codegen.inl.h.erb
|
||||
intrinsics_codegen.inl.erb
|
||||
SOURCE ${COMPILER_TEMPLATES_DIR}/intrinsics
|
||||
DESTINATION ${GENERATED_DIR}
|
||||
REQUIRES ${COMPILER_TEMPLATES_DIR}/intrinsics/compiler_intrinsics.rb
|
||||
EXTRA_DEPENDENCIES arkruntime_gen_intrinsics_yaml
|
||||
)
|
||||
|
||||
panda_gen(DATA ${CMAKE_CURRENT_LIST_DIR}/optimizer/ir/instructions.yaml
|
||||
TEMPLATES
|
||||
opcodes.h.erb
|
||||
arch_info_gen.h.erb
|
||||
inst_checker_gen.h.erb
|
||||
IR-instructions.md.erb
|
||||
codegen_arm64_gen.inc.erb
|
||||
deoptimize_elimination_call_visitors.inl.erb
|
||||
SOURCE ${COMPILER_TEMPLATES_DIR}
|
||||
DESTINATION ${GENERATED_DIR}
|
||||
REQUIRES ${COMPILER_TEMPLATES_DIR}/instructions.rb
|
||||
)
|
||||
|
||||
set(COMPILER_OPTIONS_GEN_H ${GENERATED_DIR}/compiler_options_gen.h)
|
||||
panda_gen_file(
|
||||
DATAFILE ${CMAKE_CURRENT_LIST_DIR}/compiler.yaml
|
||||
TEMPLATE ${PANDA_ROOT}/templates/options/options.h.erb
|
||||
OUTPUTFILE ${COMPILER_OPTIONS_GEN_H}
|
||||
REQUIRES ${PANDA_ROOT}/templates/common.rb
|
||||
)
|
||||
add_custom_target(compiler_options_gen DEPENDS ${COMPILER_OPTIONS_GEN_H})
|
||||
|
||||
set(COMPILER_EVENTS_GEN_H ${GENERATED_DIR}/compiler_events_gen.h)
|
||||
panda_gen_file(
|
||||
DATAFILE ${CMAKE_CURRENT_LIST_DIR}/compiler.yaml
|
||||
TEMPLATE ${PANDA_ROOT}/templates/events/events.h.erb
|
||||
OUTPUTFILE ${COMPILER_EVENTS_GEN_H}
|
||||
REQUIRES ${PANDA_ROOT}/templates/common.rb
|
||||
)
|
||||
add_custom_target(compiler_events_gen DEPENDS ${COMPILER_EVENTS_GEN_H})
|
||||
|
||||
set(COMPILER_LOGGER_COMPONENTS_GEN_H ${GENERATED_DIR}/compiler_logger_components.inc)
|
||||
panda_gen_file(
|
||||
DATAFILE ${CMAKE_CURRENT_LIST_DIR}/compiler.yaml
|
||||
TEMPLATE ${PANDA_ROOT}/templates/logger_components/logger_components.inc.erb
|
||||
OUTPUTFILE ${COMPILER_LOGGER_COMPONENTS_GEN_H}
|
||||
REQUIRES ${PANDA_ROOT}/templates/common.rb
|
||||
)
|
||||
add_custom_target(compiler_logger_components_gen DEPENDS ${COMPILER_LOGGER_COMPONENTS_GEN_H})
|
||||
|
||||
set(CPU_FEATURES_GEN_H ${GENERATED_DIR}/cpu_features.inc)
|
||||
panda_gen_file(
|
||||
DATAFILE ${CMAKE_CURRENT_LIST_DIR}/compiler.yaml
|
||||
TEMPLATE ${PANDA_ROOT}/templates/cpu_features.inc.erb
|
||||
OUTPUTFILE ${CPU_FEATURES_GEN_H}
|
||||
REQUIRES ${PANDA_ROOT}/templates/common.rb
|
||||
)
|
||||
add_custom_target(cpu_features_gen DEPENDS ${CPU_FEATURES_GEN_H})
|
||||
|
||||
set(OPTIMIZER_SOURCES
|
||||
optimizer/pass.cpp
|
||||
optimizer/pass_manager.cpp
|
||||
optimizer/pass_manager_statistics.cpp
|
||||
optimizer/analysis/alias_analysis.cpp
|
||||
optimizer/analysis/bounds_analysis.cpp
|
||||
optimizer/analysis/countable_loop_parser.cpp
|
||||
optimizer/analysis/dominators_tree.cpp
|
||||
optimizer/analysis/linear_order.cpp
|
||||
optimizer/analysis/liveness_analyzer.cpp
|
||||
optimizer/analysis/liveness_use_table.cpp
|
||||
optimizer/analysis/live_registers.cpp
|
||||
optimizer/analysis/loop_analyzer.cpp
|
||||
optimizer/analysis/monitor_analysis.cpp
|
||||
optimizer/analysis/object_type_propagation.cpp
|
||||
optimizer/analysis/rpo.cpp
|
||||
optimizer/analysis/reg_alloc_verifier.cpp
|
||||
optimizer/analysis/types_analysis.cpp
|
||||
optimizer/ir/analysis.cpp
|
||||
optimizer/ir/basicblock.cpp
|
||||
optimizer/ir/dump.cpp
|
||||
optimizer/ir/graph.cpp
|
||||
optimizer/ir/inst.cpp
|
||||
optimizer/ir/locations.cpp
|
||||
optimizer/ir/visualizer_printer.cpp
|
||||
optimizer/ir/graph_checker.cpp
|
||||
optimizer/ir/graph_cloner.cpp
|
||||
optimizer/optimizations/adjust_arefs.cpp
|
||||
optimizer/optimizations/balance_expressions.cpp
|
||||
optimizer/optimizations/branch_elimination.cpp
|
||||
optimizer/optimizations/checks_elimination.cpp
|
||||
optimizer/optimizations/code_sink.cpp
|
||||
optimizer/optimizations/const_folding.cpp
|
||||
optimizer/optimizations/deoptimize_elimination.cpp
|
||||
optimizer/optimizations/cleanup.cpp
|
||||
optimizer/optimizations/if_conversion.cpp
|
||||
optimizer/optimizations/licm.cpp
|
||||
optimizer/optimizations/locations_builder.cpp
|
||||
optimizer/optimizations/loop_peeling.cpp
|
||||
optimizer/optimizations/loop_unroll.cpp
|
||||
optimizer/optimizations/lse.cpp
|
||||
optimizer/optimizations/memory_barriers.cpp
|
||||
optimizer/optimizations/memory_coalescing.cpp
|
||||
optimizer/optimizations/object_type_check_elimination.cpp
|
||||
optimizer/optimizations/peepholes.cpp
|
||||
optimizer/optimizations/redundant_loop_elimination.cpp
|
||||
optimizer/optimizations/scheduler.cpp
|
||||
optimizer/optimizations/try_catch_resolving.cpp
|
||||
optimizer/optimizations/types_resolving.cpp
|
||||
optimizer/optimizations/vn.cpp
|
||||
optimizer/optimizations/cse.cpp
|
||||
tools/debug/jit_writer.cpp
|
||||
compiler_logger.cpp
|
||||
compiler_options.cpp
|
||||
)
|
||||
set(COMPILER_SOURCES
|
||||
optimizer/ir/aot_data.cpp
|
||||
optimizer/ir_builder/ir_builder.cpp
|
||||
optimizer/ir_builder/inst_builder.cpp
|
||||
optimizer/optimizations/inlining.cpp
|
||||
optimizer/optimizations/lowering.cpp
|
||||
optimizer/optimizations/move_constants.cpp
|
||||
optimizer/optimizations/regalloc/reg_alloc_base.cpp
|
||||
optimizer/optimizations/regalloc/interference_graph.cpp
|
||||
optimizer/optimizations/regalloc/reg_alloc.cpp
|
||||
optimizer/optimizations/regalloc/reg_alloc_stat.cpp
|
||||
optimizer/optimizations/regalloc/reg_alloc_graph_coloring.cpp
|
||||
optimizer/optimizations/regalloc/reg_map.cpp
|
||||
optimizer/optimizations/regalloc/reg_alloc_linear_scan.cpp
|
||||
optimizer/optimizations/regalloc/spill_fills_resolver.cpp
|
||||
optimizer/optimizations/regalloc/split_resolver.cpp
|
||||
optimizer/optimizations/regalloc/reg_alloc_resolver.cpp
|
||||
${OPTIMIZER_SOURCES}
|
||||
${INST_BUILDER_GEN}
|
||||
${IRTOC_IR_INLINE_GEN}
|
||||
)
|
||||
if (NOT PANDA_TARGET_WINDOWS AND NOT PANDA_TARGET_MACOS)
|
||||
list(APPEND COMPILER_SOURCES
|
||||
compile_method.cpp
|
||||
optimizer_run.cpp
|
||||
optimizer/code_generator/disassembly.cpp
|
||||
optimizer/code_generator/codegen.cpp
|
||||
optimizer/code_generator/codegen_native.cpp
|
||||
optimizer/code_generator/spill_fill_encoder.cpp
|
||||
optimizer/code_generator/slow_path.cpp
|
||||
optimizer/code_generator/method_properties.cpp
|
||||
code_info/code_info.cpp
|
||||
code_info/code_info_builder.cpp
|
||||
)
|
||||
endif()
|
||||
|
||||
add_library(arkcompiler ${PANDA_DEFAULT_LIB_TYPE} ${COMPILER_SOURCES})
|
||||
|
||||
panda_add_to_clang_tidy(TARGET arkcompiler)
|
||||
|
||||
add_dependencies(arkcompiler isa_gen_${PROJECT_NAME})
|
||||
add_dependencies(arkcompiler instructions_gen_${PROJECT_NAME})
|
||||
add_dependencies(arkcompiler compiler_events_gen)
|
||||
add_dependencies(arkcompiler compiler_logger_components_gen)
|
||||
add_dependencies(arkcompiler compiler_options_gen)
|
||||
add_dependencies(arkcompiler irtoc_generate_inst_builder)
|
||||
add_dependencies(arkcompiler irtoc_generate_ir_inline)
|
||||
add_dependencies(arkcompiler intrinsics_gen_compiler)
|
||||
add_dependencies(arkcompiler intrinsics_gen_arkruntime)
|
||||
add_dependencies(arkcompiler entrypoints_gen)
|
||||
add_dependencies(arkcompiler entrypoints_compiler_checksum_gen)
|
||||
add_dependencies(arkcompiler cross_values)
|
||||
add_dependencies(arkcompiler cpu_features_gen)
|
||||
add_dependencies(arkcompiler asm_defines_generator)
|
||||
|
||||
if (PANDA_TARGET_MOBILE OR PANDA_TARGET_OHOS)
|
||||
add_dependencies(host_tools_depends arkcompiler)
|
||||
endif()
|
||||
|
||||
target_link_libraries(arkcompiler arkbase arkfile)
|
||||
if (NOT PANDA_TARGET_WINDOWS AND NOT PANDA_TARGET_MACOS)
|
||||
target_link_libraries(arkcompiler arkencoder)
|
||||
endif()
|
||||
|
||||
target_include_directories(arkcompiler
|
||||
PUBLIC ${PANDA_ROOT}
|
||||
PUBLIC ${PANDA_ROOT}/runtime
|
||||
PUBLIC ${PANDA_BINARY_ROOT}/runtime/include
|
||||
PUBLIC ${PANDA_BINARY_ROOT}/cross_values
|
||||
PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
PUBLIC ${GENERATED_DIR}
|
||||
)
|
||||
|
||||
# Disable warning about offsetof usage for non-standard layout types.
|
||||
# In C++17 offsetof is conditionally implemented for such types, so
|
||||
# compiler should issue error if it doesn't implemented offsetof for
|
||||
# them. Also we use static asserts to ensure that offsetof works correcly
|
||||
# for non-standard layout types.
|
||||
target_compile_options(arkcompiler PUBLIC -Wno-invalid-offsetof)
|
||||
|
||||
set(PANDA_COMPILER_TESTS_SOURCES
|
||||
tests/adjust_arefs_test.cpp
|
||||
tests/alias_analysis_test.cpp
|
||||
tests/aot_test.cpp
|
||||
tests/balance_expressions_test.cpp
|
||||
tests/basicblock_test.cpp
|
||||
tests/bounds_analysis_test.cpp
|
||||
tests/branch_elimination_test.cpp
|
||||
tests/call_input_types_test.cpp
|
||||
tests/checks_elimination_test.cpp
|
||||
tests/cleanup_test.cpp
|
||||
tests/codegen_runner_test.cpp
|
||||
tests/code_info_test.cpp
|
||||
tests/code_sink_test.cpp
|
||||
tests/const_folding_test.cpp
|
||||
tests/cse_test.cpp
|
||||
tests/deoptimize_elimination_test.cpp
|
||||
tests/dom_tree_test.cpp
|
||||
tests/graph_cloner_test.cpp
|
||||
tests/graph_comparator_test.cpp
|
||||
tests/graph_creation_test.cpp
|
||||
tests/if_conversion_test.cpp
|
||||
tests/inlining_test.cpp
|
||||
tests/inst_test.cpp
|
||||
tests/iterators_test.cpp
|
||||
tests/licm_test.cpp
|
||||
tests/liveness_analyzer_test.cpp
|
||||
tests/live_registers_test.cpp
|
||||
tests/loop_analyzer_test.cpp
|
||||
tests/loop_peeling_test.cpp
|
||||
tests/lse_test.cpp
|
||||
tests/memory_barriers_test.cpp
|
||||
tests/memory_coalescing_test.cpp
|
||||
tests/method_properties_test.cpp
|
||||
tests/monitor_analysis_test.cpp
|
||||
tests/move_constants_test.cpp
|
||||
tests/osr_test.cpp
|
||||
tests/peepholes_test.cpp
|
||||
tests/redundant_loop_elimination_test.cpp
|
||||
tests/reg_alloc_common_test.cpp
|
||||
tests/reg_alloc_graph_coloring_test.cpp
|
||||
tests/reg_alloc_interference_graph_test.cpp
|
||||
tests/rpo_test.cpp
|
||||
tests/scheduler_test.cpp
|
||||
tests/split_resolver_test.cpp
|
||||
tests/try_catch_resolving_test.cpp
|
||||
tests/unit_test.cpp
|
||||
tests/vn_test.cpp
|
||||
tests/class_hash_table_test.cpp
|
||||
tests/linear_order_test.cpp
|
||||
tests/profiling_runner_test.cpp
|
||||
)
|
||||
set_source_files_properties(tests/loop_unroll_test.cpp PROPERTIES COMPILE_FLAGS -Wno-shadow)
|
||||
|
||||
# Distinguish 'PANDA_COMPILER_TARGET_..' and 'PANDA_TARGET_..' because for PANDA_TARGET_AMD64 tests
|
||||
# are being executed for Arch::AARCH64
|
||||
if(PANDA_COMPILER_TARGET_AARCH64 OR PANDA_TARGET_ARM32)
|
||||
if(PANDA_COMPILER_TARGET_AARCH64)
|
||||
list(APPEND PANDA_COMPILER_TESTS_SOURCES
|
||||
tests/aarch64/codegen_test.cpp
|
||||
)
|
||||
endif()
|
||||
list(APPEND PANDA_COMPILER_TESTS_SOURCES
|
||||
tests/life_intervals_test.cpp
|
||||
tests/loop_unroll_test.cpp
|
||||
tests/reg_alloc_linear_scan_test.cpp
|
||||
tests/reg_alloc_verifier_test.cpp
|
||||
)
|
||||
list(APPEND PANDA_COMPILER_TESTS_SOURCES
|
||||
tests/lowering_test.cpp)
|
||||
endif()
|
||||
|
||||
if(PANDA_TARGET_AMD64 OR PANDA_NIGHTLY_TEST_ON)
|
||||
list(APPEND PANDA_COMPILER_TESTS_SOURCES
|
||||
tests/ir_builder_test.cpp)
|
||||
endif()
|
||||
|
||||
set(PANDA_COMPILER_TESTS_LIBRARIES arkcompiler arkbase arkassembler arkruntime arkaotmanager aot_builder)
|
||||
|
||||
set(ENCODER_TESTS_SOURCES
|
||||
tests/encoder_operands.cpp
|
||||
tests/constructor_test.cpp
|
||||
)
|
||||
|
||||
if(PANDA_COMPILER_ENABLE)
|
||||
list(APPEND ENCODER_TESTS_SOURCES tests/asm_printer_test.cpp)
|
||||
endif()
|
||||
|
||||
if (PANDA_TARGET_ARM32 AND PANDA_COMPILER_TARGET_AARCH32)
|
||||
# Append to PANDA_COMPILER_TESTS_SOURCES modified version of inst_generator_test
|
||||
list(APPEND ENCODER_TESTS_SOURCES
|
||||
tests/aarch32/callconv32_test.cpp
|
||||
tests/aarch32/encoder32_test.cpp
|
||||
tests/aarch32/register32_test.cpp
|
||||
)
|
||||
endif (PANDA_TARGET_ARM32 AND PANDA_COMPILER_TARGET_AARCH32)
|
||||
|
||||
if (PANDA_TARGET_ARM64)
|
||||
list(APPEND ENCODER_TESTS_SOURCES
|
||||
tests/aarch64/callconv64_test.cpp
|
||||
tests/aarch64/encoder64_test.cpp
|
||||
tests/aarch64/register64_test.cpp
|
||||
)
|
||||
endif (PANDA_TARGET_ARM64)
|
||||
|
||||
if (PANDA_TARGET_X86)
|
||||
list(APPEND ENCODER_TESTS_SOURCES
|
||||
tests/x86/asmjit_test.cpp
|
||||
)
|
||||
list(APPEND PANDA_COMPILER_TESTS_LIBRARIES asmjit)
|
||||
endif (PANDA_TARGET_X86)
|
||||
|
||||
if (PANDA_TARGET_AMD64)
|
||||
list(APPEND ENCODER_TESTS_SOURCES
|
||||
tests/amd64/asmjit_test.cpp
|
||||
tests/amd64/callconv64_test.cpp
|
||||
tests/amd64/encoder64_test.cpp
|
||||
tests/amd64/register64_test.cpp
|
||||
)
|
||||
if (PANDA_COMPILER_TARGET_AARCH64)
|
||||
list(APPEND ENCODER_TESTS_SOURCES
|
||||
tests/encoders_test.cpp
|
||||
)
|
||||
endif()
|
||||
list(APPEND PANDA_COMPILER_TESTS_LIBRARIES asmjit)
|
||||
endif (PANDA_TARGET_AMD64)
|
||||
|
||||
|
||||
if (NOT (PANDA_TARGET_MOBILE OR PANDA_TARGET_OHOS OR PANDA_ENABLE_FUZZBENCH))
|
||||
list(APPEND PANDA_COMPILER_TESTS_LIBRARIES stdc++fs)
|
||||
endif()
|
||||
|
||||
if(NOT PANDA_MINIMAL_VIXL AND PANDA_COMPILER_ENABLE)
|
||||
panda_add_gtest(
|
||||
CONTAINS_MAIN
|
||||
NAME compiler_unit_tests
|
||||
SOURCES
|
||||
${PANDA_COMPILER_TESTS_SOURCES}
|
||||
LIBRARIES
|
||||
${PANDA_COMPILER_TESTS_LIBRARIES}
|
||||
SANITIZERS
|
||||
${PANDA_SANITIZERS_LIST}
|
||||
)
|
||||
endif()
|
||||
|
||||
# AMD64 and X86 - for unit tests
|
||||
if(NOT PANDA_MINIMAL_VIXL AND PANDA_TARGET_AMD64 AND PANDA_WITH_TESTS)
|
||||
set(PANDA_CODEGEN_TESTS_SOURCES
|
||||
tests/unit_test.cpp
|
||||
tests/spill_fill_encoder_test.cpp
|
||||
)
|
||||
if(PANDA_COMPILER_TARGET_AARCH64)
|
||||
list(APPEND PANDA_CODEGEN_TESTS_SOURCES
|
||||
tests/codegen_test.cpp
|
||||
)
|
||||
endif()
|
||||
set_source_files_properties(tests/codegen_test.cpp PROPERTIES COMPILE_FLAGS -Wno-shadow)
|
||||
panda_add_gtest(
|
||||
CONTAINS_MAIN
|
||||
NAME compiler_codegen_tests
|
||||
SOURCES
|
||||
${PANDA_CODEGEN_TESTS_SOURCES}
|
||||
LIBRARIES
|
||||
${PANDA_COMPILER_TESTS_LIBRARIES}
|
||||
SANITIZERS
|
||||
${PANDA_SANITIZERS_LIST}
|
||||
)
|
||||
|
||||
if (PANDA_WITH_TESTS AND PANDA_COMPILER_TARGET_AARCH64)
|
||||
set(PANDA_INST_GEN_TESTS_SOURCES
|
||||
tests/unit_test.cpp
|
||||
tests/inst_generator.cpp
|
||||
tests/inst_generator_test.cpp
|
||||
)
|
||||
set_source_files_properties(tests/inst_generator_test.cpp PROPERTIES COMPILE_FLAGS -Wno-shadow)
|
||||
panda_add_gtest(
|
||||
CONTAINS_MAIN
|
||||
NAME compiler_inst_gen_tests
|
||||
SOURCES
|
||||
${PANDA_INST_GEN_TESTS_SOURCES}
|
||||
LIBRARIES
|
||||
${PANDA_COMPILER_TESTS_LIBRARIES}
|
||||
SANITIZERS
|
||||
${PANDA_SANITIZERS_LIST}
|
||||
)
|
||||
|
||||
set(PANDA_INTRINSIC_CODEGEN_TESTS_SOURCES
|
||||
tests/unit_test.cpp
|
||||
tests/inst_generator.cpp
|
||||
tests/intrinsic_codegen_test.cpp
|
||||
)
|
||||
set_source_files_properties(tests/intrinsic_codegen_test.cpp PROPERTIES COMPILE_FLAGS -Wno-shadow)
|
||||
panda_add_gtest(
|
||||
CONTAINS_MAIN
|
||||
NAME compiler_intrinsic_codegen_arm64_tests
|
||||
SOURCES
|
||||
${PANDA_INTRINSIC_CODEGEN_TESTS_SOURCES}
|
||||
LIBRARIES
|
||||
${PANDA_COMPILER_TESTS_LIBRARIES}
|
||||
SANITIZERS
|
||||
${PANDA_SANITIZERS_LIST}
|
||||
)
|
||||
target_compile_options(compiler_intrinsic_codegen_arm64_tests PUBLIC "-DINTRINSIC_CODEGEN_TEST_ARM64")
|
||||
|
||||
panda_add_gtest(
|
||||
CONTAINS_MAIN
|
||||
NAME compiler_intrinsic_codegen_amd64_tests
|
||||
SOURCES
|
||||
${PANDA_INTRINSIC_CODEGEN_TESTS_SOURCES}
|
||||
LIBRARIES
|
||||
${PANDA_COMPILER_TESTS_LIBRARIES}
|
||||
SANITIZERS
|
||||
${PANDA_SANITIZERS_LIST}
|
||||
)
|
||||
target_compile_options(compiler_intrinsic_codegen_amd64_tests PUBLIC "-DINTRINSIC_CODEGEN_TEST_AMD64")
|
||||
|
||||
panda_add_gtest(
|
||||
CONTAINS_MAIN
|
||||
NAME compiler_intrinsic_codegen_arm32_tests
|
||||
SOURCES
|
||||
${PANDA_INTRINSIC_CODEGEN_TESTS_SOURCES}
|
||||
LIBRARIES
|
||||
${PANDA_COMPILER_TESTS_LIBRARIES}
|
||||
SANITIZERS
|
||||
${PANDA_SANITIZERS_LIST}
|
||||
)
|
||||
target_compile_options(compiler_intrinsic_codegen_arm32_tests PUBLIC "-DINTRINSIC_CODEGEN_TEST_ARM32")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT PANDA_MINIMAL_VIXL AND PANDA_WITH_TESTS AND PANDA_COMPILER_ENABLE)
|
||||
add_dependencies(compiler_unit_tests ark_aot)
|
||||
# TODO(igorban): Enable build in other platforms
|
||||
if (PANDA_TARGET_AMD64 OR PANDA_TARGET_ARM64)
|
||||
add_dependencies(compiler_unit_tests ark_aotdump)
|
||||
endif()
|
||||
|
||||
|
||||
target_include_directories(compiler_unit_tests
|
||||
PUBLIC "$<TARGET_PROPERTY:arkruntime,INTERFACE_INCLUDE_DIRECTORIES>"
|
||||
)
|
||||
|
||||
if(PANDA_TARGET_AMD64 AND PANDA_COMPILER_TARGET_AARCH64)
|
||||
target_include_directories(compiler_inst_gen_tests
|
||||
PUBLIC "$<TARGET_PROPERTY:arkruntime,INTERFACE_INCLUDE_DIRECTORIES>"
|
||||
)
|
||||
endif()
|
||||
if(PANDA_TARGET_AMD64)
|
||||
target_include_directories(compiler_codegen_tests
|
||||
PUBLIC "$<TARGET_PROPERTY:arkruntime,INTERFACE_INCLUDE_DIRECTORIES>"
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# Encoder gtests are separate from other, because they are test the library, not whole compiler
|
||||
# Please, do not build them together - to do not make additional dependencies!
|
||||
if(NOT PANDA_MINIMAL_VIXL AND PANDA_WITH_TESTS)
|
||||
panda_add_gtest(
|
||||
NAME encoder_unit_tests
|
||||
SOURCES
|
||||
${ENCODER_TESTS_SOURCES}
|
||||
LIBRARIES
|
||||
${PANDA_COMPILER_TESTS_LIBRARIES}
|
||||
SANITIZERS
|
||||
${PANDA_SANITIZERS_LIST}
|
||||
)
|
||||
target_compile_options(encoder_unit_tests PUBLIC "-Wno-unused-variable" "-Wno-uninitialized" "-Wno-shadow")
|
||||
endif()
|
||||
|
||||
panda_add_sanitizers(TARGET arkcompiler SANITIZERS ${PANDA_SANITIZERS_LIST})
|
||||
|
||||
# Special target to create compile_commands.json in right dir.
|
||||
add_check_style(".")
|
||||
|
||||
# Enable documentation
|
||||
add_doxygen(
|
||||
NAME "compiler"
|
||||
PATH "."
|
||||
)
|
||||
|
||||
# Support mobile execution
|
||||
if(NOT PANDA_MINIMAL_VIXL AND PANDA_WITH_TESTS AND NOT (PANDA_TARGET_MOBILE OR PANDA_TARGET_OHOS) AND PANDA_COMPILER_ENABLE)
|
||||
set(ASM_TEST_LIST
|
||||
"mov"
|
||||
"neg"
|
||||
"abs"
|
||||
"not"
|
||||
"add"
|
||||
"sub"
|
||||
"mul"
|
||||
"shl"
|
||||
"shr"
|
||||
"ashr"
|
||||
"and"
|
||||
"or"
|
||||
"xor"
|
||||
)
|
||||
|
||||
if (PANDA_TARGET_AMD64)
|
||||
set(LIBGENERATED_DIR ${CMAKE_BINARY_DIR}/bin-gtests/asm_output/amd64/)
|
||||
elseif (PANDA_TARGET_ARM64)
|
||||
set(LIBGENERATED_DIR ${CMAKE_BINARY_DIR}/bin-gtests/asm_output/aarch64/)
|
||||
elseif(PANDA_TARGET_ARM32)
|
||||
set(LIBGENERATED_DIR ${CMAKE_BINARY_DIR}/bin-gtests/asm_output/aarch32/)
|
||||
else()
|
||||
set(LIBGENERATED_FILES "")
|
||||
endif()
|
||||
|
||||
# Main target
|
||||
add_custom_target(asm_test)
|
||||
|
||||
# Pseudo-target for generate asm
|
||||
add_custom_target(asm_generate)
|
||||
add_dependencies(asm_generate encoder_unit_tests_gtests)
|
||||
|
||||
foreach(TEST ${ASM_TEST_LIST})
|
||||
set(TEST_ASM ${LIBGENERATED_DIR}/${TEST}.S)
|
||||
add_custom_command(
|
||||
OUTPUT ${TEST_ASM}
|
||||
COMMAND echo " Pseudo-command for generate asm ${TEST_ASM}" > /dev/null
|
||||
DEPENDS asm_generate)
|
||||
set_property(SOURCE ${TEST_ASM} PROPERTY GENERATED TRUE)
|
||||
add_custom_target(${TEST} DEPENDS ${TEST_ASM})
|
||||
add_dependencies(asm_test ${TEST})
|
||||
list(APPEND LIBGENERATED_FILES ${LIBGENERATED_DIR}/${TEST}.S)
|
||||
endforeach()
|
||||
|
||||
enable_language(ASM)
|
||||
|
||||
set(ASM_CALL_TEST
|
||||
tests/asm_caller.cpp
|
||||
${LIBGENERATED_FILES}
|
||||
)
|
||||
|
||||
common_add_gtest(
|
||||
NAME compiler_asm_tests
|
||||
SOURCES
|
||||
${ASM_CALL_TEST}
|
||||
LIBRARIES
|
||||
${PANDA_COMPILER_TESTS_LIBRARIES}
|
||||
SANITIZERS
|
||||
${PANDA_SANITIZERS_LIST}
|
||||
OUTPUT_DIRECTORY
|
||||
${PANDA_BINARY_ROOT}/bin-gtests
|
||||
)
|
||||
if(PANDA_WITH_TESTS)
|
||||
add_dependencies(asm_test compiler_asm_tests)
|
||||
endif()
|
||||
# PANDA_TARGET_MOBILE
|
||||
endif()
|
||||
|
||||
add_subdirectory(tools/paoc ark_aot)
|
||||
|
||||
if (NOT PANDA_MINIMAL_VIXL AND (PANDA_TARGET_AMD64 OR PANDA_TARGET_ARM64))
|
||||
add_subdirectory(tools/aotdump aotdump)
|
||||
endif()
|
||||
add_subdirectory(aot)
|
||||
add_subdirectory(aot/aot_builder aot_builder)
|
@ -1,22 +0,0 @@
|
||||
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
get_target_property(INST_TEMPLATES_YAML_FILES inst_templates_gen INST_TEMPLATES_YAML_FILES)
|
||||
get_target_property(INST_TEMPLATES_GEN_YAML inst_templates_gen INST_TEMPLATES_GEN_YAML)
|
||||
|
||||
add_custom_command(OUTPUT ${INST_TEMPLATES_GEN_YAML}
|
||||
COMMENT "Merge yaml files: ${INST_TEMPLATES_YAML_FILES}"
|
||||
COMMAND ${PANDA_ROOT}/templates/concat_yamls.sh "${INST_TEMPLATES_GEN_YAML}" ${INST_TEMPLATES_YAML_FILES}
|
||||
DEPENDS ${INST_TEMPLATES_YAML_FILES}
|
||||
)
|
||||
add_custom_target(inst_templates_merge DEPENDS ${INST_TEMPLATES_GEN_YAML})
|
@ -1,172 +0,0 @@
|
||||
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set(INTRINSICS_STUB_INL ${PANDA_BINARY_ROOT}/compiler/generated/intrinsics_stub.inl)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/intrinsics/intrinsics_stub.inl.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES plugin_options_merge
|
||||
OUTPUTFILE ${INTRINSICS_STUB_INL}
|
||||
)
|
||||
|
||||
set(INTRINSICS_STUB_INL_H ${PANDA_BINARY_ROOT}/compiler/generated/intrinsics_stub.inl.h)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/intrinsics/intrinsics_stub.inl.h.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES plugin_options_merge
|
||||
OUTPUTFILE ${INTRINSICS_STUB_INL_H}
|
||||
)
|
||||
|
||||
set(INTRINSICS_CODEGEN_EXT_INL_H ${PANDA_BINARY_ROOT}/compiler/generated/intrinsics_codegen_ext.inl.h)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/intrinsics/intrinsics_codegen_ext.inl.h.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES plugin_options_merge
|
||||
OUTPUTFILE ${INTRINSICS_CODEGEN_EXT_INL_H}
|
||||
)
|
||||
|
||||
set(INTRINSICS_IR_BUILD_STATIC_CALL_INL ${PANDA_BINARY_ROOT}/compiler/generated/intrinsics_ir_build_static_call.inl)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/intrinsics/intrinsics_ir_build_static_call.inl.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES plugin_options_merge
|
||||
OUTPUTFILE ${INTRINSICS_IR_BUILD_STATIC_CALL_INL}
|
||||
)
|
||||
|
||||
set(INTRINSICS_IR_BUILD_VIRTUAL_CALL_INL ${PANDA_BINARY_ROOT}/compiler/generated/intrinsics_ir_build_virtual_call.inl)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/intrinsics/intrinsics_ir_build_virtual_call.inl.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES plugin_options_merge
|
||||
OUTPUTFILE ${INTRINSICS_IR_BUILD_VIRTUAL_CALL_INL}
|
||||
)
|
||||
|
||||
set(INTRINSICS_IR_BUILD_INL_H ${PANDA_BINARY_ROOT}/compiler/generated/intrinsics_ir_build.inl.h)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/intrinsics/intrinsics_ir_build.inl.h.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES plugin_options_merge
|
||||
OUTPUTFILE ${INTRINSICS_IR_BUILD_INL_H}
|
||||
)
|
||||
|
||||
set(INTRINSICS_CAN_ENCODE_INL ${PANDA_BINARY_ROOT}/compiler/generated/intrinsics_can_encode.inl)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/intrinsics/intrinsics_can_encode.inl.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES plugin_options_merge
|
||||
OUTPUTFILE ${INTRINSICS_CAN_ENCODE_INL}
|
||||
)
|
||||
|
||||
set(IR_DYN_BASE_TYPES_H ${PANDA_BINARY_ROOT}/compiler/generated/ir-dyn-base-types.h)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/ir-dyn-base-types.h.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES ${YAML_FILES}
|
||||
OUTPUTFILE ${IR_DYN_BASE_TYPES_H}
|
||||
)
|
||||
|
||||
add_custom_target(ir_dyn_base_types_h DEPENDS ${IR_DYN_BASE_TYPES_H})
|
||||
|
||||
set(SOURCE_LANGUAGES_H ${PANDA_BINARY_ROOT}/compiler/generated/source_languages.h)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/source_languages.h.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES ${YAML_FILES}
|
||||
OUTPUTFILE ${SOURCE_LANGUAGES_H}
|
||||
)
|
||||
|
||||
add_custom_target(source_languages_h DEPENDS ${SOURCE_LANGUAGES_H})
|
||||
|
||||
set(CODEGEN_LANGUAGE_EXTENSIONS_H ${PANDA_BINARY_ROOT}/compiler/generated/codegen_language_extensions.h)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/codegen_language_extensions.h.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES ${YAML_FILES}
|
||||
OUTPUTFILE ${CODEGEN_LANGUAGE_EXTENSIONS_H}
|
||||
)
|
||||
|
||||
set(COMPILER_INTERFACE_EXTENSIONS_H ${PANDA_BINARY_ROOT}/compiler/generated/compiler_interface_extensions.inl.h)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/compiler_interface_extensions.inl.h.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES ${YAML_FILES}
|
||||
OUTPUTFILE ${COMPILER_INTERFACE_EXTENSIONS_H}
|
||||
)
|
||||
|
||||
set(INST_BUILDER_EXTENSIONS_H ${PANDA_BINARY_ROOT}/compiler/generated/inst_builder_extensions.inl.h)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/inst_builder_extensions.inl.h.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES ${YAML_FILES}
|
||||
OUTPUTFILE ${INST_BUILDER_EXTENSIONS_H}
|
||||
)
|
||||
|
||||
set(INTRINSICS_EXTENSIONS_H ${PANDA_BINARY_ROOT}/compiler/generated/intrinsics_extensions.inl.h)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/intrinsics_extensions.inl.h.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES ${YAML_FILES}
|
||||
OUTPUTFILE ${INTRINSICS_EXTENSIONS_H}
|
||||
)
|
||||
|
||||
set(INTRINSICS_INLINE_INL ${PANDA_BINARY_ROOT}/compiler/generated/intrinsics_inline.inl)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/intrinsics/intrinsics_inline.inl.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES plugin_options_merge
|
||||
OUTPUTFILE ${INTRINSICS_INLINE_INL}
|
||||
)
|
||||
|
||||
set(INTRINSICS_TYPES_RESOLVING_INL_H ${PANDA_BINARY_ROOT}/compiler/generated/intrinsics_types_resolving.inl.h)
|
||||
panda_gen_file(
|
||||
DATAFILE ${GEN_PLUGIN_OPTIONS_YAML}
|
||||
TEMPLATE ${PANDA_ROOT}/compiler/optimizer/templates/intrinsics/intrinsics_types_resolving.inl.h.erb
|
||||
REQUIRES ${PANDA_ROOT}/templates/plugin_options.rb
|
||||
EXTRA_DEPENDENCIES plugin_options_merge
|
||||
OUTPUTFILE ${INTRINSICS_TYPES_RESOLVING_INL_H}
|
||||
)
|
||||
|
||||
add_custom_target(compiler_intrinsics DEPENDS
|
||||
plugin_options_gen
|
||||
${INTRINSICS_STUB_INL}
|
||||
${INTRINSICS_STUB_INL_H}
|
||||
${INTRINSICS_CODEGEN_EXT_INL_H}
|
||||
${INTRINSICS_IR_BUILD_STATIC_CALL_INL}
|
||||
${INTRINSICS_IR_BUILD_VIRTUAL_CALL_INL}
|
||||
${INTRINSICS_IR_BUILD_INL_H}
|
||||
${INTRINSICS_CAN_ENCODE_INL}
|
||||
${IR_DYN_BASE_TYPES_H}
|
||||
${SOURCE_LANGUAGES_H}
|
||||
${CODEGEN_LANGUAGE_EXTENSIONS_H}
|
||||
${COMPILER_INTERFACE_EXTENSIONS_H}
|
||||
${INST_BUILDER_EXTENSIONS_H}
|
||||
${INTRINSICS_EXTENSIONS_H}
|
||||
${INTRINSICS_INLINE_INL}
|
||||
${INTRINSICS_TYPES_RESOLVING_INL_H}
|
||||
)
|
||||
|
||||
add_dependencies(arkcompiler compiler_intrinsics)
|
@ -1,67 +0,0 @@
|
||||
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import("//arkcompiler/runtime_core/ark_config.gni")
|
||||
|
||||
config("aot_manager_config") {
|
||||
include_dirs = [
|
||||
"$ark_root/compiler",
|
||||
"$ark_root/compiler/aot",
|
||||
"$ark_root/libpandabase",
|
||||
"$ark_root/libpandafile",
|
||||
"$ark_root/runtime",
|
||||
"$target_gen_dir/../generated",
|
||||
"$target_gen_dir/include",
|
||||
]
|
||||
}
|
||||
|
||||
ohos_shared_library("libarkaotmanager") {
|
||||
sources = [
|
||||
"aot_file.cpp",
|
||||
"aot_manager.cpp",
|
||||
]
|
||||
|
||||
configs = [
|
||||
":aot_manager_config",
|
||||
"$ark_root:ark_config",
|
||||
sdk_libc_secshared_config,
|
||||
"$ark_root/libpandabase:arkbase_public_config",
|
||||
"$ark_root/libpandafile:arkfile_public_config",
|
||||
"$ark_root/runtime:arkruntime_public_config",
|
||||
]
|
||||
|
||||
deps = [
|
||||
"$ark_root/compiler:intrinsics_stub_inl_h",
|
||||
"$ark_root/compiler:ir_dyn_base_types_h",
|
||||
"$ark_root/compiler:libarkcompiler",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_generate_operations_intrinsic_graph_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_generate_operations_intrinsic_inst_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_get_intrinsics_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_get_intrinsics_names_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_intrinsic_codegen_test_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_intrinsics_enum_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_intrinsics_ir_build_inl",
|
||||
"$ark_root/libpandabase:libarkbase",
|
||||
"$ark_root/libpandafile:libarkfile",
|
||||
"$ark_root/runtime:arkruntime_gen_entrypoints_compiler_checksum_entrypoints_compiler_checksum_inl",
|
||||
"$ark_root/runtime:arkruntime_gen_entrypoints_entrypoints_compiler_inl",
|
||||
"$ark_root/runtime:arkruntime_gen_entrypoints_entrypoints_gen_h",
|
||||
"$ark_root/runtime:plugins_entrypoints_gen_h",
|
||||
sdk_libc_secshared_dep,
|
||||
]
|
||||
|
||||
output_extension = "so"
|
||||
relative_install_dir = "ark"
|
||||
part_name = "runtime_core"
|
||||
subsystem_name = "arkcompiler"
|
||||
}
|
@ -1,41 +0,0 @@
|
||||
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
project(aot_manager)
|
||||
|
||||
add_library(arkaotmanager SHARED aot_file.cpp aot_manager.cpp)
|
||||
|
||||
target_link_libraries(arkaotmanager arkbase arkfile arkcompiler)
|
||||
|
||||
add_dependencies(arkaotmanager intrinsics_gen_compiler isa_gen_compiler entrypoints_gen arkbase)
|
||||
target_include_directories(arkaotmanager
|
||||
PUBLIC ${GENERATED_DIR}
|
||||
PUBLIC ${PANDA_ROOT}/runtime
|
||||
)
|
||||
|
||||
if(PANDA_WITH_TESTS AND TARGET arkruntime_test_interpreter_impl)
|
||||
target_include_directories(arkaotmanager
|
||||
PUBLIC "$<TARGET_PROPERTY:arkruntime,INTERFACE_INCLUDE_DIRECTORIES>"
|
||||
PUBLIC "$<TARGET_PROPERTY:arkruntime_test_interpreter_impl,INTERFACE_INCLUDE_DIRECTORIES>"
|
||||
)
|
||||
else()
|
||||
target_include_directories(arkaotmanager
|
||||
PUBLIC "$<TARGET_PROPERTY:arkruntime,INTERFACE_INCLUDE_DIRECTORIES>"
|
||||
)
|
||||
endif()
|
||||
|
||||
panda_add_sanitizers(TARGET arkaotmanager SANITIZERS ${PANDA_SANITIZERS_LIST})
|
||||
panda_add_to_clang_tidy(TARGET arkaotmanager)
|
||||
|
||||
add_check_style(".")
|
||||
|
@ -1,46 +0,0 @@
|
||||
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import("//arkcompiler/runtime_core/ark_config.gni")
|
||||
|
||||
config("aotbuilder_public_config") {
|
||||
include_dirs = [
|
||||
"$ark_root/compiler",
|
||||
"$ark_root/compiler/aot",
|
||||
"$ark_root/libpandabase",
|
||||
"$ark_root/libpandafile",
|
||||
"$ark_root/runtime",
|
||||
"$ark_root",
|
||||
"$target_gen_dir/../../../libpandafile",
|
||||
"$target_gen_dir/../../../libpandabase/include",
|
||||
"$target_gen_dir/../../../runtime",
|
||||
]
|
||||
}
|
||||
|
||||
ohos_static_library("aotbuilder") {
|
||||
sources = [ "aot_builder.cpp" ]
|
||||
|
||||
configs = [
|
||||
"$ark_root:ark_config",
|
||||
"$ark_root/compiler:arkcompiler_public_config",
|
||||
":aotbuilder_public_config",
|
||||
sdk_libc_secshared_config,
|
||||
"$ark_root/runtime:arkruntime_public_config",
|
||||
"$ark_root/libpandafile:arkfile_public_config",
|
||||
]
|
||||
|
||||
deps = [
|
||||
"$ark_root/compiler:libarkcompiler",
|
||||
"$ark_root/libpandafile:isa_gen_libarkfile_bytecode_instruction_enum_gen_h",
|
||||
]
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
project(aot_builder)
|
||||
|
||||
add_library(aot_builder STATIC
|
||||
aot_builder.cpp
|
||||
)
|
||||
|
||||
target_link_libraries(aot_builder arkbase arkfile arkcompiler)
|
||||
|
||||
panda_set_lib_32bit_property(aot_builder)
|
||||
|
||||
add_check_style(".")
|
||||
|
||||
panda_add_to_clang_tidy(TARGET aot_builder)
|
@ -1,348 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "aot_builder.h"
|
||||
#include "aot/aot_file.h"
|
||||
#include "elf_builder.h"
|
||||
#include "include/class.h"
|
||||
#include "include/method.h"
|
||||
#include "optimizer/code_generator/encode.h"
|
||||
#include "code_info/code_info.h"
|
||||
|
||||
#include <numeric>
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
/**
|
||||
* Fills text section in the ELF builder by the code from the methods in AotBuilder.
|
||||
*/
|
||||
class CodeDataProvider : public ElfSectionDataProvider {
|
||||
public:
|
||||
explicit CodeDataProvider(AotBuilder *aot_builder) : aot_builder_(aot_builder) {}
|
||||
|
||||
void FillData(Span<uint8_t> stream, size_t stream_begin) const override
|
||||
{
|
||||
const size_t code_offset = CodeInfo::GetCodeOffset(aot_builder_->GetArch());
|
||||
CodePrefix prefix;
|
||||
size_t curr_pos = stream_begin;
|
||||
for (size_t i = 0; i < aot_builder_->methods_.size(); i++) {
|
||||
auto &method = aot_builder_->methods_[i];
|
||||
auto &method_header = aot_builder_->method_headers_[i];
|
||||
prefix.code_size = method.GetCode().size();
|
||||
prefix.code_info_offset = code_offset + RoundUp(method.GetCode().size(), CodeInfo::ALIGNMENT);
|
||||
prefix.code_info_size = method.GetCodeInfo().size();
|
||||
// Prefix
|
||||
curr_pos = stream_begin + method_header.code_offset;
|
||||
const char *data = reinterpret_cast<char *>(&prefix);
|
||||
CopyToSpan(stream, data, sizeof(prefix), curr_pos);
|
||||
curr_pos += sizeof(prefix);
|
||||
|
||||
// Code
|
||||
curr_pos += code_offset - sizeof(prefix);
|
||||
data = reinterpret_cast<const char *>(method.GetCode().data());
|
||||
CopyToSpan(stream, data, method.GetCode().size(), curr_pos);
|
||||
curr_pos += method.GetCode().size();
|
||||
|
||||
// CodeInfo
|
||||
curr_pos += RoundUp(method.GetCode().size(), CodeInfo::ALIGNMENT) - method.GetCode().size();
|
||||
data = reinterpret_cast<const char *>(method.GetCodeInfo().data());
|
||||
CopyToSpan(stream, data, method.GetCodeInfo().size(), curr_pos);
|
||||
}
|
||||
}
|
||||
|
||||
size_t GetDataSize() const override
|
||||
{
|
||||
return aot_builder_->current_code_size_;
|
||||
}
|
||||
|
||||
private:
|
||||
AotBuilder *aot_builder_;
|
||||
};
|
||||
|
||||
void AotBuilder::StartFile(const std::string &name, uint32_t checksum)
|
||||
{
|
||||
auto &file_header = file_headers_.emplace_back();
|
||||
file_header.classes_offset = class_headers_.size();
|
||||
file_header.file_checksum = checksum;
|
||||
file_header.file_offset = 0;
|
||||
file_header.file_name_str = AddString(name);
|
||||
file_header.methods_offset = method_headers_.size();
|
||||
}
|
||||
|
||||
void AotBuilder::EndFile()
|
||||
{
|
||||
ASSERT(!file_headers_.empty());
|
||||
auto &file_header = file_headers_.back();
|
||||
file_header.classes_count = class_headers_.size() - file_header.classes_offset;
|
||||
if (file_header.classes_count == 0 && (class_hash_tables_size_.empty() || class_hash_tables_size_.back() == 0)) {
|
||||
/* Just return, if there is nothing compiled in the file */
|
||||
CHECK_EQ(file_header.methods_count, 0U);
|
||||
file_headers_.pop_back();
|
||||
return;
|
||||
}
|
||||
ASSERT(!class_hash_tables_size_.empty());
|
||||
file_header.class_hash_table_offset =
|
||||
(entity_pair_headers_.size() - class_hash_tables_size_.back()) * sizeof(panda_file::EntityPairHeader);
|
||||
file_header.class_hash_table_size = class_hash_tables_size_.back();
|
||||
file_header.methods_count = method_headers_.size() - file_header.methods_offset;
|
||||
// We should keep class headers sorted, since AOT manager uses binary search to find classes.
|
||||
std::sort(class_headers_.begin() + file_header.classes_offset, class_headers_.end(),
|
||||
[](const auto &a, const auto &b) { return a.class_id < b.class_id; });
|
||||
}
|
||||
|
||||
int AotBuilder::Write(const std::string &cmdline, const std::string &file_name)
|
||||
{
|
||||
switch (arch_) {
|
||||
case Arch::AARCH32:
|
||||
return WriteImpl<Arch::AARCH32>(cmdline, file_name);
|
||||
case Arch::AARCH64:
|
||||
return WriteImpl<Arch::AARCH64>(cmdline, file_name);
|
||||
case Arch::X86:
|
||||
return WriteImpl<Arch::X86>(cmdline, file_name);
|
||||
case Arch::X86_64:
|
||||
return WriteImpl<Arch::X86_64>(cmdline, file_name);
|
||||
default:
|
||||
LOG(ERROR, COMPILER) << "AotBuilder: Unsupported arch";
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
void AotBuilder::FillHeader(const std::string &cmdline, const std::string &file_name)
|
||||
{
|
||||
aot_header_.magic = compiler::AotFile::MAGIC;
|
||||
aot_header_.version = compiler::AotFile::VERSION;
|
||||
aot_header_.checksum = 0; // TODO(msherstennikov)
|
||||
ASSERT(GetRuntime() != nullptr);
|
||||
aot_header_.environment_checksum = GetRuntime()->GetEnvironmentChecksum(arch_);
|
||||
aot_header_.arch = static_cast<uint32_t>(arch_);
|
||||
aot_header_.gc_type = gc_type_;
|
||||
aot_header_.files_offset = sizeof(aot_header_);
|
||||
aot_header_.files_count = file_headers_.size();
|
||||
aot_header_.class_hash_tables_offset =
|
||||
aot_header_.files_offset + aot_header_.files_count * sizeof(compiler::PandaFileHeader);
|
||||
size_t class_hash_tables_size = entity_pair_headers_.size() * sizeof(panda_file::EntityPairHeader);
|
||||
aot_header_.classes_offset = aot_header_.class_hash_tables_offset + class_hash_tables_size;
|
||||
aot_header_.methods_offset = aot_header_.classes_offset + class_headers_.size() * sizeof(compiler::ClassHeader);
|
||||
aot_header_.bitmap_offset = aot_header_.methods_offset + methods_.size() * sizeof(compiler::MethodHeader);
|
||||
size_t bitmaps_size =
|
||||
std::accumulate(class_methods_bitmaps_.begin(), class_methods_bitmaps_.end(), 0U,
|
||||
[](size_t sum, const auto &vec) { return vec.GetContainerSizeInBytes() + sum; });
|
||||
aot_header_.strtab_offset = aot_header_.bitmap_offset + bitmaps_size;
|
||||
aot_header_.file_name_str = AddString(file_name);
|
||||
aot_header_.cmdline_str = AddString(cmdline);
|
||||
aot_header_.boot_aot = static_cast<uint32_t>(boot_aot_);
|
||||
aot_header_.with_cha = static_cast<uint32_t>(with_cha_);
|
||||
aot_header_.class_ctx_str = AddString(class_ctx_);
|
||||
}
|
||||
|
||||
template <Arch arch>
|
||||
int AotBuilder::WriteImpl(const std::string &cmdline, const std::string &file_name)
|
||||
{
|
||||
constexpr size_t PAGE_SIZE_BYTES = 0x1000;
|
||||
constexpr size_t CALL_STATIC_SLOT_SIZE = 3;
|
||||
constexpr size_t CALL_VIRTUAL_SLOT_SIZE = 2;
|
||||
constexpr size_t STRING_SLOT_SIZE = 2;
|
||||
constexpr size_t INLINE_CACHE_SLOT_SIZE = 1;
|
||||
ElfBuilder<arch> builder;
|
||||
|
||||
auto text_section = builder.GetTextSection();
|
||||
auto aot_section = builder.GetAotSection();
|
||||
auto got_section = builder.GetGotSection();
|
||||
std::vector<uint8_t> &got_data = got_section->GetVector();
|
||||
// +1 is the extra slot that indicates the end of the aot table
|
||||
auto got_data_size = static_cast<size_t>(RuntimeInterface::IntrinsicId::COUNT) + 1 +
|
||||
CALL_STATIC_SLOT_SIZE * (got_plt_.size() + got_class_.size()) +
|
||||
CALL_VIRTUAL_SLOT_SIZE * got_virt_indexes_.size() + STRING_SLOT_SIZE * got_string_.size() +
|
||||
INLINE_CACHE_SLOT_SIZE * got_intf_inline_cache_.size();
|
||||
// We need to fill the whole segment with aot_got section because it is filled from the end.
|
||||
got_data.resize(RoundUp(PointerSize(arch) * got_data_size, PAGE_SIZE_BYTES), 0);
|
||||
|
||||
GenerateSymbols(builder);
|
||||
|
||||
FillHeader(cmdline, file_name);
|
||||
|
||||
aot_section->AppendData(&aot_header_, sizeof(aot_header_));
|
||||
aot_section->AppendData(file_headers_.data(), file_headers_.size() * sizeof(compiler::PandaFileHeader));
|
||||
aot_section->AppendData(entity_pair_headers_.data(),
|
||||
entity_pair_headers_.size() * sizeof(panda_file::EntityPairHeader));
|
||||
aot_section->AppendData(class_headers_.data(), class_headers_.size() * sizeof(compiler::ClassHeader));
|
||||
aot_section->AppendData(method_headers_.data(), method_headers_.size() * sizeof(compiler::MethodHeader));
|
||||
|
||||
for (auto &bitmap : class_methods_bitmaps_) {
|
||||
aot_section->AppendData(bitmap.data(), bitmap.GetContainerSizeInBytes());
|
||||
}
|
||||
aot_section->AppendData(string_table_.data(), string_table_.size());
|
||||
|
||||
CodeDataProvider code_provider(this);
|
||||
text_section->SetDataProvider(&code_provider);
|
||||
|
||||
using PtrType = typename ArchTraits<arch>::WordType;
|
||||
auto ptr_view = Span(got_data).template SubSpan<PtrType>(0, got_data.size() / sizeof(PtrType));
|
||||
EmitPlt<arch>(ptr_view, got_data_size);
|
||||
|
||||
#ifdef PANDA_COMPILER_CFI
|
||||
builder.SetFrameData(&frame_data_);
|
||||
#endif
|
||||
builder.Build(file_name);
|
||||
builder.Write(file_name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
template <Arch arch>
|
||||
void AotBuilder::EmitPlt(Span<typename ArchTraits<arch>::WordType> ptr_view, size_t got_data_size)
|
||||
{
|
||||
if (!got_plt_.empty() || !got_virt_indexes_.empty() || !got_class_.empty() || !got_string_.empty() ||
|
||||
!got_intf_inline_cache_.empty()) {
|
||||
ASSERT(PointerSize(arch) >= sizeof(uint32_t));
|
||||
|
||||
auto ptr_cnt = ptr_view.Size();
|
||||
auto end = static_cast<size_t>(RuntimeInterface::IntrinsicId::COUNT);
|
||||
|
||||
ptr_view[ptr_cnt - got_data_size] = 0;
|
||||
constexpr size_t IMM_2 = 2;
|
||||
for (auto [method, idx] : got_plt_) {
|
||||
ASSERT(idx <= 0);
|
||||
ptr_view[ptr_cnt - end + idx] = AotFile::AotSlotType::PLT_SLOT;
|
||||
ptr_view[ptr_cnt - end + idx - IMM_2] = method.second;
|
||||
}
|
||||
for (auto [method, idx] : got_virt_indexes_) {
|
||||
ASSERT(idx <= 0);
|
||||
ptr_view[ptr_cnt - end + idx] = AotFile::AotSlotType::VTABLE_INDEX;
|
||||
ptr_view[ptr_cnt - end + idx - 1] = method.second;
|
||||
}
|
||||
for (auto [klass, idx] : got_class_) {
|
||||
ASSERT(idx <= 0);
|
||||
ptr_view[ptr_cnt - end + idx] = AotFile::AotSlotType::CLASS_SLOT;
|
||||
ptr_view[ptr_cnt - end + idx - IMM_2] = klass.second;
|
||||
}
|
||||
for (auto [string_id, idx] : got_string_) {
|
||||
ASSERT(idx <= 0);
|
||||
ptr_view[ptr_cnt - end + idx] = AotFile::AotSlotType::STRING_SLOT;
|
||||
ptr_view[ptr_cnt - end + idx - 1] = string_id.second;
|
||||
}
|
||||
for (auto [cache, idx] : got_intf_inline_cache_) {
|
||||
(void)cache;
|
||||
ASSERT(idx < 0);
|
||||
ptr_view[ptr_cnt - end + idx] = AotFile::AotSlotType::INLINECACHE_SLOT;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add method names to the symbol table
|
||||
*/
|
||||
template <Arch arch>
|
||||
void AotBuilder::GenerateSymbols(ElfBuilder<arch> &builder)
|
||||
{
|
||||
if (generate_symbols_) {
|
||||
auto text_section = builder.GetTextSection();
|
||||
size_t offset = 0;
|
||||
std::string method_name;
|
||||
for (auto &method : methods_) {
|
||||
if (method.GetMethod()->GetPandaFile() == nullptr) {
|
||||
method_name = "Error: method doesn't belong to any panda file";
|
||||
} else {
|
||||
auto method_casted = reinterpret_cast<RuntimeInterface::MethodPtr>(method.GetMethod());
|
||||
method_name = runtime_->GetMethodFullName(method_casted, true);
|
||||
}
|
||||
builder.template AddSymbol<true>(
|
||||
method_name, method.GetOverallSize(), *text_section, [offset, text_section]() {
|
||||
return text_section->GetAddress() + offset + CodeInfo::GetCodeOffset(arch);
|
||||
});
|
||||
offset += RoundUp(method.GetOverallSize(), ArchTraits<arch>::CODE_ALIGNMENT);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AotBuilder::AddClassHashTable(const panda_file::File &panda_file)
|
||||
{
|
||||
const panda_file::File::Header *header = panda_file.GetHeader();
|
||||
uint32_t num_classes = header->num_classes;
|
||||
if (num_classes == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
size_t hash_table_size = panda::helpers::math::GetPowerOfTwoValue32(num_classes);
|
||||
std::vector<panda_file::EntityPairHeader> entity_pairs;
|
||||
std::vector<unsigned int> conflict_entity_table;
|
||||
entity_pairs.resize(hash_table_size);
|
||||
conflict_entity_table.resize(hash_table_size);
|
||||
size_t conflict_num = 0;
|
||||
|
||||
auto classes = panda_file.GetClasses();
|
||||
for (size_t i = 0; i < num_classes; ++i) {
|
||||
auto entity_id = panda_file::File::EntityId(classes[i]);
|
||||
auto name = panda_file.GetStringData(entity_id).data;
|
||||
uint32_t hash = GetHash32String(name);
|
||||
uint32_t pos = hash & (hash_table_size - 1);
|
||||
auto &entity_pair = entity_pairs[pos];
|
||||
if (entity_pair.descriptor_hash == 0) {
|
||||
entity_pair.descriptor_hash = hash;
|
||||
entity_pair.entity_id_offset = entity_id.GetOffset();
|
||||
} else {
|
||||
conflict_entity_table[conflict_num] = i;
|
||||
conflict_num++;
|
||||
}
|
||||
}
|
||||
if (conflict_num == 0) {
|
||||
entity_pair_headers_.insert(entity_pair_headers_.end(), entity_pairs.begin(), entity_pairs.end());
|
||||
class_hash_tables_size_.emplace_back(entity_pairs.size());
|
||||
} else {
|
||||
ResolveConflictClassHashTable(panda_file, std::move(conflict_entity_table), conflict_num, entity_pairs);
|
||||
}
|
||||
}
|
||||
|
||||
void AotBuilder::ResolveConflictClassHashTable(const panda_file::File &panda_file,
|
||||
std::vector<unsigned int> conflict_entity_table, size_t conflict_num,
|
||||
std::vector<panda_file::EntityPairHeader> &entity_pairs)
|
||||
{
|
||||
auto classes = panda_file.GetClasses();
|
||||
auto hash_table_size = entity_pairs.size();
|
||||
for (size_t j = 0; j < conflict_num; ++j) {
|
||||
if (j > 0 && conflict_entity_table[j - 1] == conflict_entity_table[j]) {
|
||||
break; // Exit for loop if there is no conlict elements anymore
|
||||
}
|
||||
auto i = conflict_entity_table[j];
|
||||
auto entity_id = panda_file::File::EntityId(classes[i]);
|
||||
auto name = panda_file.GetStringData(entity_id).data;
|
||||
uint32_t hash = GetHash32String(name);
|
||||
uint32_t theory_pos = hash & (hash_table_size - 1);
|
||||
ASSERT(entity_pairs[theory_pos].descriptor_hash != 0);
|
||||
|
||||
uint32_t actual_pos = theory_pos;
|
||||
while (actual_pos < (hash_table_size - 1) && entity_pairs[actual_pos].descriptor_hash != 0) {
|
||||
actual_pos++;
|
||||
}
|
||||
if (actual_pos == (hash_table_size - 1) && entity_pairs[actual_pos].descriptor_hash != 0) {
|
||||
actual_pos = 0;
|
||||
while (actual_pos < theory_pos && entity_pairs[actual_pos].descriptor_hash != 0) {
|
||||
actual_pos++;
|
||||
}
|
||||
}
|
||||
ASSERT(entity_pairs[actual_pos].descriptor_hash == 0);
|
||||
auto &entity_pair = entity_pairs[actual_pos];
|
||||
entity_pair.descriptor_hash = hash;
|
||||
entity_pair.entity_id_offset = entity_id.GetOffset();
|
||||
while (entity_pairs[theory_pos].next_pos != 0) {
|
||||
theory_pos = entity_pairs[theory_pos].next_pos - 1;
|
||||
}
|
||||
// add 1 is to distinguish the initial value 0 of next_pos and the situation that the next pos is really 0
|
||||
entity_pairs[theory_pos].next_pos = actual_pos + 1;
|
||||
}
|
||||
entity_pair_headers_.insert(entity_pair_headers_.end(), entity_pairs.begin(), entity_pairs.end());
|
||||
class_hash_tables_size_.emplace_back(entity_pairs.size());
|
||||
}
|
||||
|
||||
} // namespace panda::compiler
|
@ -1,163 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_AOT_AOT_BULDER_AOT_FILE_BUILDER_H
|
||||
#define COMPILER_AOT_AOT_BULDER_AOT_FILE_BUILDER_H
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "aot/compiled_method.h"
|
||||
#include "aot/aot_file.h"
|
||||
#include "elf_builder.h"
|
||||
#include "utils/arch.h"
|
||||
#include "utils/arena_containers.h"
|
||||
#include "utils/bit_vector.h"
|
||||
#include "optimizer/ir/runtime_interface.h"
|
||||
#include "mem/gc/gc_types.h"
|
||||
|
||||
namespace panda {
|
||||
class Class;
|
||||
} // namespace panda
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
template <Arch arch, bool is_jit_mode>
|
||||
class ElfBuilder;
|
||||
|
||||
class AotBuilder : public ElfWriter {
|
||||
public:
|
||||
void SetGcType(uint32_t gc_type)
|
||||
{
|
||||
gc_type_ = gc_type;
|
||||
}
|
||||
uint32_t GetGcType() const
|
||||
{
|
||||
return gc_type_;
|
||||
}
|
||||
|
||||
uint64_t *GetIntfInlineCacheIndex()
|
||||
{
|
||||
return &intf_inline_cache_index_;
|
||||
}
|
||||
|
||||
int Write(const std::string &cmdline, const std::string &file_name);
|
||||
|
||||
void StartFile(const std::string &name, uint32_t checksum);
|
||||
void EndFile();
|
||||
|
||||
auto *GetGotPlt()
|
||||
{
|
||||
return &got_plt_;
|
||||
}
|
||||
|
||||
auto *GetGotVirtIndexes()
|
||||
{
|
||||
return &got_virt_indexes_;
|
||||
}
|
||||
|
||||
auto *GetGotClass()
|
||||
{
|
||||
return &got_class_;
|
||||
}
|
||||
|
||||
auto *GetGotString()
|
||||
{
|
||||
return &got_string_;
|
||||
}
|
||||
|
||||
auto *GetGotIntfInlineCache()
|
||||
{
|
||||
return &got_intf_inline_cache_;
|
||||
}
|
||||
|
||||
void SetBootAot(bool boot_aot)
|
||||
{
|
||||
boot_aot_ = boot_aot;
|
||||
}
|
||||
|
||||
void SetWithCha(bool with_cha)
|
||||
{
|
||||
with_cha_ = with_cha;
|
||||
}
|
||||
|
||||
void SetGenerateSymbols(bool generate_symbols)
|
||||
{
|
||||
generate_symbols_ = generate_symbols;
|
||||
}
|
||||
|
||||
void AddClassHashTable(const panda_file::File &panda_file);
|
||||
|
||||
void InsertEntityPairHeader(uint32_t class_hash, uint32_t class_id)
|
||||
{
|
||||
entity_pair_headers_.emplace_back();
|
||||
auto &entity_pair = entity_pair_headers_.back();
|
||||
entity_pair.descriptor_hash = class_hash;
|
||||
entity_pair.entity_id_offset = class_id;
|
||||
}
|
||||
|
||||
auto *GetEntityPairHeaders() const
|
||||
{
|
||||
return &entity_pair_headers_;
|
||||
}
|
||||
|
||||
void InsertClassHashTableSize(uint32_t size)
|
||||
{
|
||||
class_hash_tables_size_.emplace_back(size);
|
||||
}
|
||||
|
||||
auto *GetClassHashTableSize() const
|
||||
{
|
||||
return &class_hash_tables_size_;
|
||||
}
|
||||
|
||||
private:
|
||||
template <Arch arch>
|
||||
int WriteImpl(const std::string &cmdline, const std::string &file_name);
|
||||
|
||||
template <Arch arch>
|
||||
void GenerateSymbols(ElfBuilder<arch> &builder);
|
||||
|
||||
template <Arch arch>
|
||||
void EmitPlt(Span<typename ArchTraits<arch>::WordType> ptr_view, size_t got_data_size);
|
||||
|
||||
void FillHeader(const std::string &cmdline, const std::string &file_name);
|
||||
|
||||
void ResolveConflictClassHashTable(const panda_file::File &panda_file,
|
||||
std::vector<unsigned int> conflict_entity_table, size_t conflict_num,
|
||||
std::vector<panda_file::EntityPairHeader> &entity_pairs);
|
||||
|
||||
private:
|
||||
std::string file_name_;
|
||||
compiler::AotHeader aot_header_ {};
|
||||
uint32_t gc_type_ {static_cast<uint32_t>(mem::GCType::INVALID_GC)};
|
||||
uint64_t intf_inline_cache_index_ {0};
|
||||
std::map<std::pair<const panda_file::File *, uint32_t>, int32_t> got_plt_;
|
||||
std::map<std::pair<const panda_file::File *, uint32_t>, int32_t> got_virt_indexes_;
|
||||
std::map<std::pair<const panda_file::File *, uint32_t>, int32_t> got_class_;
|
||||
std::map<std::pair<const panda_file::File *, uint32_t>, int32_t> got_string_;
|
||||
std::map<std::pair<const panda_file::File *, uint64_t>, int32_t> got_intf_inline_cache_;
|
||||
bool boot_aot_ {false};
|
||||
bool with_cha_ {true};
|
||||
bool generate_symbols_ {false};
|
||||
|
||||
std::vector<panda_file::EntityPairHeader> entity_pair_headers_;
|
||||
std::vector<uint32_t> class_hash_tables_size_;
|
||||
friend class CodeDataProvider;
|
||||
friend class JitCodeDataProvider;
|
||||
};
|
||||
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_AOT_AOT_BULDER_AOT_FILE_BUILDER_H
|
File diff suppressed because it is too large
Load Diff
@ -1,194 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "aot_file.h"
|
||||
#include "compiler/optimizer/ir/inst.h"
|
||||
#include "optimizer/ir/runtime_interface.h"
|
||||
#include "utils/logger.h"
|
||||
#include "code_info/code_info.h"
|
||||
#include "mem/gc/gc_types.h"
|
||||
#include "trace/trace.h"
|
||||
#include "entrypoints/entrypoints.h"
|
||||
|
||||
// In some targets, runtime library is not linked, so linker will fail while searching CallStaticPltResolver symbol.
|
||||
// To solve this issue, we define this function as weak.
|
||||
// TODO(msherstennikov): find a better way instead of weak function, e.g. make aot_manager library static.
|
||||
extern "C" void CallStaticPltResolver([[maybe_unused]] void *slot) __attribute__((weak));
|
||||
extern "C" void CallStaticPltResolver([[maybe_unused]] void *slot) {}
|
||||
|
||||
namespace panda::compiler {
|
||||
static inline Expected<const uint8_t *, std::string> LoadSymbol(const panda::os::library_loader::LibraryHandle &handle,
|
||||
const char *name)
|
||||
{
|
||||
auto sym = panda::os::library_loader::ResolveSymbol(handle, name);
|
||||
if (!sym) {
|
||||
return Unexpected(sym.Error().ToString());
|
||||
}
|
||||
return reinterpret_cast<uint8_t *>(sym.Value());
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define LOAD_AOT_SYMBOL(name) \
|
||||
auto name = LoadSymbol(handle, #name); \
|
||||
if (!name) { \
|
||||
return Unexpected("Cannot load name section: " + name.Error()); \
|
||||
}
|
||||
|
||||
Expected<std::unique_ptr<AotFile>, std::string> AotFile::Open(const std::string &file_name, uint32_t gc_type,
|
||||
bool for_dump)
|
||||
{
|
||||
trace::ScopedTrace scoped_trace("Open aot file " + file_name);
|
||||
auto handle_load = panda::os::library_loader::Load(file_name);
|
||||
if (!handle_load) {
|
||||
return Unexpected("AOT elf library open failed: " + handle_load.Error().ToString());
|
||||
}
|
||||
auto handle = std::move(handle_load.Value());
|
||||
|
||||
LOAD_AOT_SYMBOL(aot);
|
||||
LOAD_AOT_SYMBOL(aot_end);
|
||||
LOAD_AOT_SYMBOL(code);
|
||||
LOAD_AOT_SYMBOL(code_end);
|
||||
|
||||
if (code_end.Value() < code.Value() || aot_end.Value() <= aot.Value()) {
|
||||
return Unexpected(std::string("Invalid symbols"));
|
||||
}
|
||||
|
||||
auto aot_header = reinterpret_cast<const AotHeader *>(aot.Value());
|
||||
if (aot_header->magic != MAGIC) {
|
||||
return Unexpected(std::string("Wrong AotHeader magic"));
|
||||
}
|
||||
|
||||
if (aot_header->version != VERSION) {
|
||||
return Unexpected(std::string("Wrong AotHeader version"));
|
||||
}
|
||||
|
||||
if (!for_dump && aot_header->environment_checksum != RuntimeInterface::GetEnvironmentChecksum(RUNTIME_ARCH)) {
|
||||
return Unexpected(std::string("Compiler environment checksum mismatch"));
|
||||
}
|
||||
|
||||
if (!for_dump && aot_header->gc_type != gc_type) {
|
||||
return Unexpected(std::string("Wrong AotHeader gc-type: ") +
|
||||
std::string(mem::GCStringFromType(static_cast<mem::GCType>(aot_header->gc_type))) + " vs " +
|
||||
std::string(mem::GCStringFromType(static_cast<mem::GCType>(gc_type))));
|
||||
}
|
||||
return std::make_unique<AotFile>(std::move(handle), Span(aot.Value(), aot_end.Value() - aot.Value()),
|
||||
Span(code.Value(), code_end.Value() - code.Value()));
|
||||
}
|
||||
|
||||
void AotFile::InitializeGot(RuntimeInterface *runtime)
|
||||
{
|
||||
size_t minus_first_slot = static_cast<size_t>(RuntimeInterface::IntrinsicId::COUNT) + 1;
|
||||
auto *table = const_cast<uintptr_t *>(
|
||||
reinterpret_cast<const uintptr_t *>(code_.data() - minus_first_slot * PointerSize(RUNTIME_ARCH)));
|
||||
|
||||
while (*table != 0) {
|
||||
switch (*table) {
|
||||
case AotSlotType::PLT_SLOT:
|
||||
table -= 2U; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
table[1] = reinterpret_cast<uintptr_t>(CallStaticPltResolver);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
table[2U] = reinterpret_cast<uintptr_t>(
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
table + 1 - runtime->GetCompiledEntryPointOffset(RUNTIME_ARCH) / sizeof(uintptr_t));
|
||||
break;
|
||||
case AotSlotType::VTABLE_INDEX:
|
||||
table--; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
table[1] = 0; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
break;
|
||||
case AotSlotType::CLASS_SLOT:
|
||||
table -= 2U; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
table[1] = 0; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
table[2U] = 0; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
break;
|
||||
case AotSlotType::STRING_SLOT:
|
||||
table--; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
table[1] = 0; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
break;
|
||||
case AotSlotType::INLINECACHE_SLOT:
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
table--; // NOLINT(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
}
|
||||
}
|
||||
|
||||
void AotFile::PatchTable(RuntimeInterface *runtime)
|
||||
{
|
||||
auto *table = const_cast<uintptr_t *>(reinterpret_cast<const uintptr_t *>(
|
||||
code_.data() - static_cast<size_t>(RuntimeInterface::IntrinsicId::COUNT) * PointerSize(RUNTIME_ARCH)));
|
||||
for (size_t i = 0; i < static_cast<size_t>(RuntimeInterface::IntrinsicId::COUNT); i++) {
|
||||
IntrinsicInst inst(Opcode::Intrinsic, static_cast<RuntimeInterface::IntrinsicId>(i));
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
table[i] = runtime->GetIntrinsicAddress(inst.IsRuntimeCall(), static_cast<RuntimeInterface::IntrinsicId>(i));
|
||||
}
|
||||
}
|
||||
|
||||
AotClass AotPandaFile::GetClass(uint32_t class_id) const
|
||||
{
|
||||
auto classes = aot_file_->GetClassHeaders(*header_);
|
||||
auto it = std::lower_bound(classes.begin(), classes.end(), class_id,
|
||||
[](const auto &a, uintptr_t klass_id) { return a.class_id < klass_id; });
|
||||
if (it == classes.end() || it->class_id != class_id) {
|
||||
return {};
|
||||
}
|
||||
ASSERT(it->methods_count != 0 && "AOT file shall not contain empty classes");
|
||||
return AotClass(aot_file_, &*it);
|
||||
}
|
||||
|
||||
const void *AotClass::FindMethodCodeEntry(size_t index) const
|
||||
{
|
||||
auto method_header = FindMethodHeader(index);
|
||||
if (method_header == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
return aot_file_->GetMethodCode(method_header) + CodeInfo::GetCodeOffset(RUNTIME_ARCH);
|
||||
}
|
||||
|
||||
Span<const uint8_t> AotClass::FindMethodCodeSpan(size_t index) const
|
||||
{
|
||||
auto method_header = FindMethodHeader(index);
|
||||
if (method_header == nullptr) {
|
||||
return {};
|
||||
}
|
||||
auto code = Span(aot_file_->GetMethodCode(method_header), method_header->code_size);
|
||||
return CodeInfo(code).GetCodeSpan();
|
||||
}
|
||||
|
||||
const MethodHeader *AotClass::FindMethodHeader(size_t index) const
|
||||
{
|
||||
auto bitmap = GetBitmap();
|
||||
CHECK_LT(index, bitmap.size());
|
||||
if (!bitmap[index]) {
|
||||
return nullptr;
|
||||
}
|
||||
auto method_index = bitmap.PopCount(index);
|
||||
ASSERT(method_index < header_->methods_count);
|
||||
return aot_file_->GetMethodHeader(header_->methods_offset + method_index);
|
||||
}
|
||||
|
||||
BitVectorSpan AotClass::GetBitmap() const
|
||||
{
|
||||
// TODO(msherstennikov): remove const_cast once BitVector support constant storage
|
||||
auto bitmap_base = const_cast<uint32_t *>(reinterpret_cast<const uint32_t *>(aot_file_->GetMethodsBitmap()));
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
return BitVectorSpan(bitmap_base + header_->methods_bitmap_offset, header_->methods_bitmap_size);
|
||||
}
|
||||
|
||||
} // namespace panda::compiler
|
@ -1,254 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_AOT_AOT_FILE_H
|
||||
#define COMPILER_AOT_AOT_FILE_H
|
||||
|
||||
#include "aot_headers.h"
|
||||
#include "compiler/code_info/code_info.h"
|
||||
#include "os/library_loader.h"
|
||||
#include "utils/span.h"
|
||||
#include "libpandafile/file.h"
|
||||
|
||||
#include <string>
|
||||
#include <array>
|
||||
#include <memory>
|
||||
#include <algorithm>
|
||||
|
||||
namespace panda::compiler {
|
||||
class RuntimeInterface;
|
||||
|
||||
class AotFile {
|
||||
public:
|
||||
static constexpr std::array MAGIC = {'.', 'a', 'n', '\0'};
|
||||
static constexpr std::array VERSION = {'0', '0', '6', '\0'};
|
||||
|
||||
enum AotSlotType { PLT_SLOT = 1, VTABLE_INDEX = 2, CLASS_SLOT = 3, STRING_SLOT = 4, INLINECACHE_SLOT = 5 };
|
||||
|
||||
AotFile(panda::os::library_loader::LibraryHandle &&handle, Span<const uint8_t> aot_data, Span<const uint8_t> code)
|
||||
: handle_(std::move(handle)), aot_data_(aot_data), code_(code)
|
||||
{
|
||||
}
|
||||
|
||||
NO_MOVE_SEMANTIC(AotFile);
|
||||
NO_COPY_SEMANTIC(AotFile);
|
||||
~AotFile() = default;
|
||||
|
||||
public:
|
||||
static Expected<std::unique_ptr<AotFile>, std::string> Open(const std::string &file_name, uint32_t gc_type,
|
||||
bool for_dump = false);
|
||||
|
||||
const void *GetCode() const
|
||||
{
|
||||
return code_.data();
|
||||
}
|
||||
|
||||
size_t GetCodeSize() const
|
||||
{
|
||||
return code_.size();
|
||||
}
|
||||
|
||||
auto FileHeaders() const
|
||||
{
|
||||
return aot_data_.SubSpan<const PandaFileHeader>(GetAotHeader()->files_offset, GetFilesCount());
|
||||
}
|
||||
|
||||
auto GetMethodHeader(size_t index) const
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
return reinterpret_cast<const MethodHeader *>(&aot_data_[GetAotHeader()->methods_offset]) + index;
|
||||
}
|
||||
|
||||
const MethodHeader *GetMethodHeadersPtr() const
|
||||
{
|
||||
return reinterpret_cast<const MethodHeader *>(&aot_data_[GetAotHeader()->methods_offset]);
|
||||
}
|
||||
|
||||
auto GetClassHeaders(const PandaFileHeader &file_header) const
|
||||
{
|
||||
return aot_data_.SubSpan<const ClassHeader>(GetAotHeader()->classes_offset +
|
||||
file_header.classes_offset * sizeof(ClassHeader),
|
||||
file_header.classes_count);
|
||||
}
|
||||
|
||||
auto GetClassHashTable(const PandaFileHeader &file_header) const
|
||||
{
|
||||
return aot_data_.SubSpan<const panda::panda_file::EntityPairHeader>(GetAotHeader()->class_hash_tables_offset +
|
||||
file_header.class_hash_table_offset,
|
||||
file_header.class_hash_table_size);
|
||||
}
|
||||
|
||||
const uint8_t *GetMethodsBitmap() const
|
||||
{
|
||||
return &aot_data_[GetAotHeader()->bitmap_offset];
|
||||
}
|
||||
|
||||
size_t GetFilesCount() const
|
||||
{
|
||||
return GetAotHeader()->files_count;
|
||||
}
|
||||
|
||||
const PandaFileHeader *FindPandaFile(const std::string &file_name) const
|
||||
{
|
||||
auto file_headers = FileHeaders();
|
||||
auto res = std::find_if(file_headers.begin(), file_headers.end(), [this, &file_name](auto &header) {
|
||||
return file_name == GetString(header.file_name_str);
|
||||
});
|
||||
return res == file_headers.end() ? nullptr : res;
|
||||
}
|
||||
|
||||
const uint8_t *GetMethodCode(const MethodHeader *method_header) const
|
||||
{
|
||||
return code_.data() + method_header->code_offset;
|
||||
}
|
||||
|
||||
const char *GetString(size_t offset) const
|
||||
{
|
||||
return reinterpret_cast<const char *>(aot_data_.data() + GetAotHeader()->strtab_offset + offset);
|
||||
}
|
||||
|
||||
const AotHeader *GetAotHeader() const
|
||||
{
|
||||
return reinterpret_cast<const AotHeader *>(aot_data_.data());
|
||||
}
|
||||
|
||||
const char *GetFileName() const
|
||||
{
|
||||
return GetString(GetAotHeader()->file_name_str);
|
||||
}
|
||||
|
||||
const char *GetCommandLine() const
|
||||
{
|
||||
return GetString(GetAotHeader()->cmdline_str);
|
||||
}
|
||||
|
||||
const char *GetClassContext() const
|
||||
{
|
||||
return GetString(GetAotHeader()->class_ctx_str);
|
||||
}
|
||||
|
||||
bool IsCompiledWithCha() const
|
||||
{
|
||||
return GetAotHeader()->with_cha != 0U;
|
||||
}
|
||||
|
||||
bool IsBootPandaFile() const
|
||||
{
|
||||
return GetAotHeader()->boot_aot != 0U;
|
||||
}
|
||||
|
||||
void InitializeGot(RuntimeInterface *runtime);
|
||||
|
||||
void PatchTable(RuntimeInterface *runtime);
|
||||
|
||||
private:
|
||||
panda::os::library_loader::LibraryHandle handle_ {nullptr};
|
||||
Span<const uint8_t> aot_data_;
|
||||
Span<const uint8_t> code_;
|
||||
};
|
||||
|
||||
class AotClass final {
|
||||
public:
|
||||
AotClass() = default;
|
||||
AotClass(const AotFile *file, const ClassHeader *header) : aot_file_(file), header_(header) {}
|
||||
~AotClass() = default;
|
||||
DEFAULT_COPY_SEMANTIC(AotClass);
|
||||
DEFAULT_MOVE_SEMANTIC(AotClass);
|
||||
|
||||
const void *FindMethodCodeEntry(size_t index) const;
|
||||
Span<const uint8_t> FindMethodCodeSpan(size_t index) const;
|
||||
const MethodHeader *FindMethodHeader(size_t index) const;
|
||||
|
||||
BitVectorSpan GetBitmap() const;
|
||||
|
||||
auto GetMethodHeaders() const
|
||||
{
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
return Span(aot_file_->GetMethodHeadersPtr() + header_->methods_offset, header_->methods_count);
|
||||
}
|
||||
|
||||
static AotClass Invalid()
|
||||
{
|
||||
return AotClass();
|
||||
}
|
||||
|
||||
bool IsValid() const
|
||||
{
|
||||
return header_ != nullptr;
|
||||
}
|
||||
|
||||
private:
|
||||
const AotFile *aot_file_ {nullptr};
|
||||
const ClassHeader *header_ {nullptr};
|
||||
};
|
||||
|
||||
class AotPandaFile {
|
||||
public:
|
||||
AotPandaFile() = default;
|
||||
AotPandaFile(AotFile *file, const PandaFileHeader *header) : aot_file_(file), header_(header)
|
||||
{
|
||||
LoadClassHashTable();
|
||||
}
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(AotPandaFile);
|
||||
DEFAULT_COPY_SEMANTIC(AotPandaFile);
|
||||
~AotPandaFile() = default;
|
||||
|
||||
const AotFile *GetAotFile() const
|
||||
{
|
||||
return aot_file_;
|
||||
}
|
||||
const PandaFileHeader *GetHeader()
|
||||
{
|
||||
return header_;
|
||||
}
|
||||
const PandaFileHeader *GetHeader() const
|
||||
{
|
||||
return header_;
|
||||
}
|
||||
std::string GetFileName() const
|
||||
{
|
||||
return GetAotFile()->GetString(GetHeader()->file_name_str);
|
||||
}
|
||||
AotClass GetClass(uint32_t class_id) const;
|
||||
|
||||
Span<const ClassHeader> GetClassHeaders() const
|
||||
{
|
||||
return aot_file_->GetClassHeaders(*header_);
|
||||
}
|
||||
|
||||
CodeInfo GetMethodCodeInfo(const MethodHeader *method_header) const
|
||||
{
|
||||
return CodeInfo(GetAotFile()->GetMethodCode(method_header), method_header->code_size);
|
||||
}
|
||||
|
||||
void LoadClassHashTable()
|
||||
{
|
||||
class_hash_table_ = GetAotFile()->GetClassHashTable(*header_);
|
||||
}
|
||||
|
||||
panda::Span<const panda::panda_file::EntityPairHeader> GetClassHashTable() const
|
||||
{
|
||||
return class_hash_table_;
|
||||
}
|
||||
|
||||
private:
|
||||
AotFile *aot_file_ {nullptr};
|
||||
const PandaFileHeader *header_ {nullptr};
|
||||
panda::Span<const panda::panda_file::EntityPairHeader> class_hash_table_;
|
||||
};
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_AOT_AOT_FILE_H
|
@ -1,83 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_AOT_AOT_HEADERS_H
|
||||
#define COMPILER_AOT_AOT_HEADERS_H
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
constexpr size_t AOT_HEADER_MAGIC_SIZE = 4;
|
||||
constexpr size_t AOT_HEADER_VERSION_SIZE = 4;
|
||||
|
||||
struct AotHeader {
|
||||
alignas(alignof(uint32_t)) std::array<char, AOT_HEADER_MAGIC_SIZE> magic;
|
||||
alignas(alignof(uint32_t)) std::array<char, AOT_HEADER_VERSION_SIZE> version;
|
||||
uint32_t checksum;
|
||||
uint32_t environment_checksum;
|
||||
uint32_t arch;
|
||||
uint32_t gc_type;
|
||||
uint32_t files_count;
|
||||
uint32_t files_offset;
|
||||
uint32_t class_hash_tables_offset;
|
||||
uint32_t classes_offset;
|
||||
uint32_t methods_offset;
|
||||
uint32_t bitmap_offset;
|
||||
uint32_t strtab_offset;
|
||||
uint32_t file_name_str;
|
||||
uint32_t cmdline_str;
|
||||
uint32_t boot_aot;
|
||||
uint32_t with_cha;
|
||||
uint32_t class_ctx_str;
|
||||
};
|
||||
|
||||
static_assert((sizeof(AotHeader) % sizeof(uint32_t)) == 0);
|
||||
static_assert(alignof(AotHeader) == alignof(uint32_t));
|
||||
|
||||
struct PandaFileHeader {
|
||||
uint32_t class_hash_table_size;
|
||||
uint32_t class_hash_table_offset;
|
||||
uint32_t classes_count;
|
||||
uint32_t classes_offset;
|
||||
uint32_t methods_count;
|
||||
uint32_t methods_offset;
|
||||
uint32_t file_checksum;
|
||||
uint32_t file_offset;
|
||||
uint32_t file_name_str;
|
||||
};
|
||||
|
||||
struct ClassHeader {
|
||||
uint32_t class_id;
|
||||
uint32_t pab_offset;
|
||||
uint32_t methods_count;
|
||||
uint32_t methods_offset;
|
||||
// Offset to the methods bitmap (aligned as uint32_t)
|
||||
uint32_t methods_bitmap_offset;
|
||||
// Size of bitmap in bits
|
||||
uint32_t methods_bitmap_size;
|
||||
};
|
||||
|
||||
struct MethodHeader {
|
||||
uint32_t method_id;
|
||||
uint32_t code_offset;
|
||||
uint32_t code_size;
|
||||
};
|
||||
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_AOT_AOT_HEADERS_H
|
@ -1,186 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "aot_manager.h"
|
||||
#include "os/filesystem.h"
|
||||
#include "events/events.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
class RuntimeInterface;
|
||||
|
||||
Expected<bool, std::string> AotManager::AddFile(const std::string &file_name, RuntimeInterface *runtime,
|
||||
uint32_t gc_type, bool force)
|
||||
{
|
||||
if (GetFile(file_name) != nullptr) {
|
||||
LOG(DEBUG, AOT) << "Trying to add already existing AOT file: '" << file_name << "'";
|
||||
return true;
|
||||
}
|
||||
auto aot_file = AotFile::Open(file_name, gc_type);
|
||||
if (!aot_file) {
|
||||
EVENT_AOT_MANAGER(file_name, events::AotManagerAction::OPEN_FAILED);
|
||||
return Unexpected("AotFile::Open failed: " + aot_file.Error());
|
||||
}
|
||||
if (runtime != nullptr) {
|
||||
aot_file.Value()->PatchTable(runtime);
|
||||
aot_file.Value()->InitializeGot(runtime);
|
||||
}
|
||||
|
||||
LOG(DEBUG, AOT) << "AOT file '" << file_name << "' has been loaded, code=" << aot_file.Value()->GetCode()
|
||||
<< ", code_size=" << aot_file.Value()->GetCodeSize();
|
||||
LOG(DEBUG, AOT) << " It contains the following panda files:";
|
||||
for (auto header : aot_file.Value()->FileHeaders()) {
|
||||
LOG(DEBUG, AOT) << " " << aot_file.Value()->GetString(header.file_name_str);
|
||||
}
|
||||
aot_files_.push_back(std::move(aot_file.Value()));
|
||||
auto &aot_insert_file = aot_files_[aot_files_.size() - 1];
|
||||
for (auto header : aot_insert_file->FileHeaders()) {
|
||||
auto pf_name = aot_insert_file->GetString(header.file_name_str);
|
||||
auto file_header = aot_insert_file->FindPandaFile(pf_name);
|
||||
if (force) {
|
||||
files_map_[pf_name] = AotPandaFile(aot_insert_file.get(), file_header);
|
||||
} else {
|
||||
files_map_.emplace(std::make_pair(pf_name, AotPandaFile(aot_insert_file.get(), file_header)));
|
||||
}
|
||||
}
|
||||
EVENT_AOT_MANAGER(file_name, events::AotManagerAction::ADDED);
|
||||
return true;
|
||||
}
|
||||
|
||||
const AotPandaFile *AotManager::FindPandaFile(const std::string &file_name)
|
||||
{
|
||||
if (file_name.empty()) {
|
||||
return nullptr;
|
||||
}
|
||||
auto it = files_map_.find(file_name);
|
||||
if (it != files_map_.end()) {
|
||||
return &it->second;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
const AotFile *AotManager::GetFile(const std::string &file_name) const
|
||||
{
|
||||
auto res = std::find_if(aot_files_.begin(), aot_files_.end(),
|
||||
[&file_name](auto &file) { return file_name == file->GetFileName(); });
|
||||
return res == aot_files_.end() ? nullptr : (*res).get();
|
||||
}
|
||||
|
||||
/* We need such kind of complex print because line length of some tool is limited by 4000 characters */
|
||||
static void FancyClassContextPrint(std::string_view context)
|
||||
{
|
||||
constexpr char DELIMITER = ':';
|
||||
size_t start = 0;
|
||||
size_t end = context.find(DELIMITER, start);
|
||||
while (end != std::string::npos) {
|
||||
LOG(ERROR, AOT) << "\t\t" << context.substr(start, end - start);
|
||||
start = end + 1;
|
||||
end = context.find(DELIMITER, start);
|
||||
}
|
||||
LOG(ERROR, AOT) << "\t\t" << context.substr(start);
|
||||
}
|
||||
|
||||
static bool CheckFilesInClassContext(std::string_view context, std::string_view aot_context)
|
||||
{
|
||||
constexpr char DELIMITER = ':';
|
||||
size_t start = 0;
|
||||
size_t end = aot_context.find(DELIMITER, start);
|
||||
while (end != std::string::npos) {
|
||||
auto file_context = aot_context.substr(start, end - start);
|
||||
if (context.find(file_context) == std::string::npos) {
|
||||
LOG(ERROR, AOT) << "Cannot found file " << file_context << " in runtime context";
|
||||
return false;
|
||||
}
|
||||
start = end + 1;
|
||||
end = aot_context.find(DELIMITER, start);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void AotManager::VerifyClassHierarchy(bool only_boot)
|
||||
{
|
||||
auto complete_context = boot_class_context_;
|
||||
if (!only_boot && !app_class_context_.empty()) {
|
||||
if (!complete_context.empty()) {
|
||||
complete_context.append(":");
|
||||
}
|
||||
complete_context.append(app_class_context_);
|
||||
}
|
||||
auto verify_aot = [this, &complete_context](auto &aot_file) {
|
||||
auto context = aot_file->IsBootPandaFile() ? boot_class_context_ : complete_context;
|
||||
bool is_check = true;
|
||||
|
||||
if (aot_file->IsCompiledWithCha()) {
|
||||
// Aot file context must be prefix of current runtime context
|
||||
if (context.rfind(aot_file->GetClassContext(), 0) != 0) {
|
||||
is_check = false;
|
||||
EVENT_AOT_MANAGER(aot_file->GetFileName(), events::AotManagerAction::CHA_VERIFY_FAILED);
|
||||
}
|
||||
} else {
|
||||
// Aot file context must be contained in current runtime context
|
||||
if (!CheckFilesInClassContext(context, aot_file->GetClassContext())) {
|
||||
is_check = false;
|
||||
EVENT_AOT_MANAGER(aot_file->GetFileName(), events::AotManagerAction::FILE_VERIFY_FAILED);
|
||||
}
|
||||
}
|
||||
if (!is_check) {
|
||||
auto boot_pref = aot_file->IsBootPandaFile() ? "boot " : "";
|
||||
LOG(ERROR, AOT) << "Cannot use " << boot_pref << "AOT file '" << aot_file->GetFileName() << '\'';
|
||||
LOG(ERROR, AOT) << "\tRuntime " << boot_pref << "class context: ";
|
||||
FancyClassContextPrint(context);
|
||||
LOG(ERROR, AOT) << "\tAOT class context: ";
|
||||
FancyClassContextPrint(aot_file->GetClassContext());
|
||||
LOG(FATAL, AOT) << "Aborting due to mismatched class hierarchy";
|
||||
return true;
|
||||
}
|
||||
EVENT_AOT_MANAGER(aot_file->GetFileName(), events::AotManagerAction::VERIFIED);
|
||||
return false;
|
||||
};
|
||||
|
||||
for (auto &cur_aot_file : aot_files_) {
|
||||
verify_aot(cur_aot_file);
|
||||
}
|
||||
}
|
||||
|
||||
void AotManager::RegisterAotStringRoot(ObjectHeader **slot, bool is_young)
|
||||
{
|
||||
os::memory::LockHolder lock(aot_string_roots_lock_);
|
||||
aot_string_gc_roots_.push_back(slot);
|
||||
// Atomic with acquire order reason: data race with aot_string_gc_roots_count_ with dependecies on reads after the
|
||||
// load which should become visible
|
||||
size_t roots_count = aot_string_gc_roots_count_.load(std::memory_order_acquire);
|
||||
if (aot_string_young_set_.size() <= roots_count / MASK_WIDTH) {
|
||||
aot_string_young_set_.push_back(0);
|
||||
}
|
||||
if (is_young) {
|
||||
has_young_aot_string_refs_ = true;
|
||||
aot_string_young_set_[roots_count / MASK_WIDTH] |= 1ULL << (roots_count % MASK_WIDTH);
|
||||
}
|
||||
// Atomic with acq_rel order reason: data race with aot_string_gc_roots_count_ with dependecies on reads after the
|
||||
// load and on writes before the store
|
||||
aot_string_gc_roots_count_.fetch_add(1, std::memory_order_acq_rel);
|
||||
}
|
||||
|
||||
bool AotClassContextCollector::operator()(const panda_file::File &pf)
|
||||
{
|
||||
if (!acc_->empty()) {
|
||||
acc_->append(":");
|
||||
}
|
||||
acc_->append(os::GetAbsolutePath(pf.GetFilename()));
|
||||
acc_->append("*");
|
||||
acc_->append(std::to_string(pf.GetHeader()->checksum));
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace panda::compiler
|
@ -1,179 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_AOT_AOT_MANAGER_H
|
||||
#define COMPILER_AOT_AOT_MANAGER_H
|
||||
|
||||
#include "aot_file.h"
|
||||
#include "file.h"
|
||||
#include "utils/arena_containers.h"
|
||||
#include "runtime/include/mem/panda_containers.h"
|
||||
#include "runtime/include/mem/panda_string.h"
|
||||
#include "utils/expected.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
class RuntimeInterface;
|
||||
|
||||
class AotManager {
|
||||
using BitSetElement = uint32_t;
|
||||
static constexpr size_t MASK_WIDTH = BITS_PER_BYTE * sizeof(BitSetElement);
|
||||
|
||||
public:
|
||||
explicit AotManager() = default;
|
||||
|
||||
NO_MOVE_SEMANTIC(AotManager);
|
||||
NO_COPY_SEMANTIC(AotManager);
|
||||
~AotManager() = default;
|
||||
|
||||
Expected<bool, std::string> AddFile(const std::string &file_name, RuntimeInterface *runtime, uint32_t gc_type,
|
||||
bool force = false);
|
||||
|
||||
const AotFile *GetFile(const std::string &file_name) const;
|
||||
|
||||
const AotPandaFile *FindPandaFile(const std::string &file_name);
|
||||
|
||||
PandaString GetBootClassContext() const
|
||||
{
|
||||
return boot_class_context_;
|
||||
}
|
||||
|
||||
void SetBootClassContext(PandaString context)
|
||||
{
|
||||
boot_class_context_ = std::move(context);
|
||||
}
|
||||
|
||||
PandaString GetAppClassContext() const
|
||||
{
|
||||
return app_class_context_;
|
||||
}
|
||||
|
||||
void SetAppClassContext(PandaString context)
|
||||
{
|
||||
app_class_context_ = std::move(context);
|
||||
}
|
||||
|
||||
void VerifyClassHierarchy(bool only_boot = false);
|
||||
|
||||
uint32_t GetAotStringRootsCount()
|
||||
{
|
||||
// use counter to get roots count without acquiring vector's lock
|
||||
// Atomic with acquire order reason: data race with aot_string_gc_roots_count_ with dependecies on reads after
|
||||
// the load which should become visible
|
||||
return aot_string_gc_roots_count_.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
void RegisterAotStringRoot(ObjectHeader **slot, bool is_young);
|
||||
|
||||
template <typename Callback>
|
||||
void VisitAotStringRoots(Callback cb, bool visit_only_young)
|
||||
{
|
||||
ASSERT(aot_string_gc_roots_.empty() ||
|
||||
(aot_string_young_set_.size() - 1) == (aot_string_gc_roots_.size() - 1) / MASK_WIDTH);
|
||||
|
||||
if (!visit_only_young) {
|
||||
for (auto root : aot_string_gc_roots_) {
|
||||
cb(root);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (!has_young_aot_string_refs_) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Atomic with acquire order reason: data race with aot_string_gc_roots_count_ with dependecies on reads after
|
||||
// the load which should become visible
|
||||
size_t total_roots = aot_string_gc_roots_count_.load(std::memory_order_acquire);
|
||||
for (size_t idx = 0; idx < aot_string_young_set_.size(); idx++) {
|
||||
auto mask = aot_string_young_set_[idx];
|
||||
if (mask == 0) {
|
||||
continue;
|
||||
}
|
||||
for (size_t offset = 0; offset < MASK_WIDTH && idx * MASK_WIDTH + offset < total_roots; offset++) {
|
||||
if ((mask & (1ULL << offset)) != 0) {
|
||||
cb(aot_string_gc_roots_[idx * MASK_WIDTH + offset]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Callback, typename IsYoungPredicate>
|
||||
void UpdateAotStringRoots(Callback cb, IsYoungPredicate p)
|
||||
{
|
||||
ASSERT(aot_string_gc_roots_.empty() ||
|
||||
(aot_string_young_set_.size() - 1) == (aot_string_gc_roots_.size() - 1) / MASK_WIDTH);
|
||||
|
||||
has_young_aot_string_refs_ = false;
|
||||
size_t idx = 0;
|
||||
for (auto root : aot_string_gc_roots_) {
|
||||
cb(root);
|
||||
uint64_t bitmask = 1ULL << (idx % MASK_WIDTH);
|
||||
|
||||
if ((aot_string_young_set_[idx / MASK_WIDTH] & bitmask) != 0) {
|
||||
bool is_young = p(*root);
|
||||
has_young_aot_string_refs_ |= is_young;
|
||||
if (!is_young) {
|
||||
aot_string_young_set_[idx / MASK_WIDTH] &= ~bitmask;
|
||||
}
|
||||
}
|
||||
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
|
||||
bool InAotFileRange(uintptr_t pc)
|
||||
{
|
||||
for (auto &aot_file : aot_files_) {
|
||||
auto code = reinterpret_cast<uintptr_t>(aot_file->GetCode());
|
||||
if (pc >= code && pc < code + reinterpret_cast<uintptr_t>(aot_file->GetCodeSize())) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool HasAotFiles()
|
||||
{
|
||||
return !aot_files_.empty();
|
||||
}
|
||||
|
||||
private:
|
||||
PandaVector<std::unique_ptr<AotFile>> aot_files_;
|
||||
PandaUnorderedMap<std::string, AotPandaFile> files_map_;
|
||||
PandaString boot_class_context_;
|
||||
PandaString app_class_context_;
|
||||
|
||||
os::memory::RecursiveMutex aot_string_roots_lock_;
|
||||
PandaVector<ObjectHeader **> aot_string_gc_roots_;
|
||||
std::atomic_uint32_t aot_string_gc_roots_count_ {0};
|
||||
bool has_young_aot_string_refs_ {false};
|
||||
PandaVector<BitSetElement> aot_string_young_set_;
|
||||
};
|
||||
|
||||
class AotClassContextCollector {
|
||||
public:
|
||||
explicit AotClassContextCollector(PandaString *acc) : acc_(acc) {};
|
||||
bool operator()(const panda_file::File &pf);
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(AotClassContextCollector);
|
||||
DEFAULT_COPY_SEMANTIC(AotClassContextCollector);
|
||||
~AotClassContextCollector() = default;
|
||||
|
||||
private:
|
||||
PandaString *acc_;
|
||||
};
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_AOT_AOT_MANAGER_H
|
@ -1,106 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_AOT_COMPILED_METHOD_H
|
||||
#define COMPILER_AOT_COMPILED_METHOD_H
|
||||
|
||||
#include "utils/arch.h"
|
||||
#include "utils/span.h"
|
||||
#include "compiler/code_info/code_info.h"
|
||||
#include "compiler/optimizer/code_generator/callconv.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
|
||||
namespace panda {
|
||||
class Method;
|
||||
} // namespace panda
|
||||
|
||||
namespace panda::compiler {
|
||||
class CompiledMethod {
|
||||
public:
|
||||
CompiledMethod(Arch arch, Method *method) : arch_(arch), method_(method) {}
|
||||
NO_COPY_OPERATOR(CompiledMethod);
|
||||
DEFAULT_COPY_CTOR(CompiledMethod)
|
||||
DEFAULT_MOVE_SEMANTIC(CompiledMethod);
|
||||
~CompiledMethod() = default;
|
||||
|
||||
void SetCode(Span<const uint8_t> data)
|
||||
{
|
||||
code_.reserve(data.size());
|
||||
std::copy(data.begin(), data.end(), std::back_inserter(code_));
|
||||
}
|
||||
|
||||
void SetCodeInfo(Span<const uint8_t> data)
|
||||
{
|
||||
code_info_.reserve(data.size());
|
||||
std::copy(data.begin(), data.end(), std::back_inserter(code_info_));
|
||||
}
|
||||
|
||||
Method *GetMethod()
|
||||
{
|
||||
return method_;
|
||||
}
|
||||
|
||||
const Method *GetMethod() const
|
||||
{
|
||||
return method_;
|
||||
}
|
||||
|
||||
Span<const uint8_t> GetCode() const
|
||||
{
|
||||
return Span(code_);
|
||||
}
|
||||
|
||||
Span<const uint8_t> GetCodeInfo() const
|
||||
{
|
||||
return Span(code_info_);
|
||||
}
|
||||
|
||||
size_t GetOverallSize() const
|
||||
{
|
||||
return RoundUp(CodePrefix::STRUCT_SIZE, GetCodeAlignment(arch_)) + RoundUp(code_.size(), CodeInfo::ALIGNMENT) +
|
||||
RoundUp(code_info_.size(), CodeInfo::SIZE_ALIGNMENT);
|
||||
}
|
||||
|
||||
#ifdef PANDA_COMPILER_CFI
|
||||
CfiInfo &GetCfiInfo()
|
||||
{
|
||||
return cfi_info_;
|
||||
}
|
||||
|
||||
const CfiInfo &GetCfiInfo() const
|
||||
{
|
||||
return cfi_info_;
|
||||
}
|
||||
|
||||
void SetCfiInfo(const CfiInfo &cfi_info)
|
||||
{
|
||||
cfi_info_ = cfi_info;
|
||||
}
|
||||
#endif
|
||||
|
||||
private:
|
||||
Arch arch_ {RUNTIME_ARCH};
|
||||
Method *method_ {nullptr};
|
||||
std::vector<uint8_t> code_;
|
||||
std::vector<uint8_t> code_info_;
|
||||
#ifdef PANDA_COMPILER_CFI
|
||||
CfiInfo cfi_info_;
|
||||
#endif
|
||||
};
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_AOT_COMPILED_METHOD_H
|
@ -1,61 +0,0 @@
|
||||
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
add_custom_target(benchmark_coverage DEPENDS benchmark_coverage_arm64 benchmark_coverage_x86_64 benchmark_coverage_arm)
|
||||
|
||||
# arm64 targets:
|
||||
add_custom_target(benchmark_coverage_arm64 DEPENDS benchmark_coverage_arm64_aot benchmark_coverage_arm64_jit benchmark_coverage_arm64_osr)
|
||||
add_custom_target(benchmark_coverage_arm64_aot DEPENDS benchmark_coverage_arm64_aot pandasm paoc)
|
||||
add_custom_target(benchmark_coverage_arm64_jit DEPENDS benchmark_coverage_arm64_jit pandasm paoc)
|
||||
add_custom_target(benchmark_coverage_arm64_osr DEPENDS benchmark_coverage_arm64_osr pandasm paoc)
|
||||
|
||||
# x86_64 targets:
|
||||
add_custom_target(benchmark_coverage_x86_64 DEPENDS benchmark_coverage_x86_64_aot benchmark_coverage_x86_64_jit)
|
||||
add_custom_target(benchmark_coverage_x86_64_aot DEPENDS benchmark_coverage_x86_64_aot pandasm paoc)
|
||||
add_custom_target(benchmark_coverage_x86_64_jit DEPENDS benchmark_coverage_x86_64_jit pandasm paoc)
|
||||
|
||||
# arm32 targets:
|
||||
add_custom_target(benchmark_coverage_arm DEPENDS benchmark_coverage_arm_jit)
|
||||
add_custom_target(benchmark_coverage_arm_jit DEPENDS benchmark_coverage_arm_jit pandasm paoc)
|
||||
|
||||
# Build rules:
|
||||
add_custom_command(TARGET benchmark_coverage_arm64_aot POST_BUILD
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMAND bash ${PANDA_ROOT}/compiler/tools/benchmark_coverage.sh --binary-dir=${PANDA_BINARY_ROOT} --root-dir=${PANDA_ROOT} --target-arch=arm64 --paoc-mode=aot 2>/dev/null
|
||||
)
|
||||
|
||||
add_custom_command(TARGET benchmark_coverage_arm64_jit POST_BUILD
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMAND bash ${PANDA_ROOT}/compiler/tools/benchmark_coverage.sh --binary-dir=${PANDA_BINARY_ROOT} --root-dir=${PANDA_ROOT} --target-arch=arm64 --paoc-mode=jit 2>/dev/null
|
||||
)
|
||||
|
||||
add_custom_command(TARGET benchmark_coverage_arm64_osr POST_BUILD
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMAND bash ${PANDA_ROOT}/compiler/tools/benchmark_coverage.sh --binary-dir=${PANDA_BINARY_ROOT} --root-dir=${PANDA_ROOT} --target-arch=arm64 --paoc-mode=osr 2>/dev/null
|
||||
)
|
||||
|
||||
add_custom_command(TARGET benchmark_coverage_x86_64_jit POST_BUILD
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMAND bash ${PANDA_ROOT}/compiler/tools/benchmark_coverage.sh --binary-dir=${PANDA_BINARY_ROOT} --root-dir=${PANDA_ROOT} --target-arch=x86_64 --paoc-mode=jit 2>/dev/null
|
||||
)
|
||||
|
||||
add_custom_command(TARGET benchmark_coverage_x86_64_aot POST_BUILD
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMAND bash ${PANDA_ROOT}/compiler/tools/benchmark_coverage.sh --binary-dir=${PANDA_BINARY_ROOT} --root-dir=${PANDA_ROOT} --target-arch=x86_64 --paoc-mode=aot 2>/dev/null
|
||||
)
|
||||
|
||||
# TODO (asidorov): enable after AOT support on arm32
|
||||
#add_custom_command(TARGET benchmark_coverage_arm_jit POST_BUILD
|
||||
# WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
# COMMAND bash ${PANDA_ROOT}/compiler/tools/benchmark_coverage.sh --binary-dir=${PANDA_BINARY_ROOT} --root-dir=${PANDA_ROOT} --target-arch=arm --paoc-mode=jit
|
||||
#)
|
@ -1,50 +0,0 @@
|
||||
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
option(ENABLE_COMPILER_COVERAGE "Enable coverage-calculation for compiler" false)
|
||||
|
||||
find_program(
|
||||
LCOV
|
||||
NAMES "lcov"
|
||||
DOC "Path to lcov executable")
|
||||
if(NOT LCOV)
|
||||
set(ENABLE_COMPILER_COVERAGE false)
|
||||
endif()
|
||||
|
||||
find_program(
|
||||
GENHTML
|
||||
NAMES "genhtml"
|
||||
DOC "Path to genhtml executable")
|
||||
if(NOT GENHTML)
|
||||
set(ENABLE_COMPILER_COVERAGE false)
|
||||
endif()
|
||||
|
||||
if(ENABLE_COMPILER_COVERAGE)
|
||||
# Set coverage options
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage")
|
||||
add_custom_target(coverage DEPENDS cts-assembly tests benchmarks) # Execute tests
|
||||
set(ADD_COV_FLAGS --quiet --rc lcov_branch_coverage=1)
|
||||
add_custom_command(TARGET coverage POST_BUILD
|
||||
WORKING_DIRECTORY ${PANDA_BINARY_ROOT}
|
||||
# Update current coverage info
|
||||
COMMAND lcov --no-external -b ${PANDA_ROOT}/compiler -d ${CMAKE_CURRENT_BINARY_DIR} -c -o compiler_coverage.info ${ADD_COV_FLAGS}
|
||||
# Generating an html report
|
||||
COMMAND genhtml -o compiler_coverage_report compiler_coverage.info --ignore-errors source ${ADD_COV_FLAGS}
|
||||
COMMAND echo "Coverage report: ${PANDA_BINARY_ROOT}/compiler_coverage_report"
|
||||
# Delete temporary files to collect statistics
|
||||
COMMAND rm compiler_coverage.info
|
||||
COMMAND find ${PANDA_BINARY_ROOT}/* -iname "*.gcda" -delete
|
||||
)
|
||||
else()
|
||||
message(STATUS "Coverage will not be calculated (may be enabled by -DENABLE_COMPILER_COVERAGE=true ).")
|
||||
endif(ENABLE_COMPILER_COVERAGE)
|
@ -1,25 +0,0 @@
|
||||
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
add_custom_target(ir_builder_coverage DEPENDS compiler_unit_tests)
|
||||
add_custom_target(pbc_2_ir_doc_gen)
|
||||
|
||||
add_custom_command(TARGET ir_builder_coverage POST_BUILD
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMAND bash ${PANDA_ROOT}/compiler/tools/ir_builder_coverage.sh --binary-dir=${PANDA_BINARY_ROOT} --root-dir=${PANDA_ROOT}
|
||||
)
|
||||
|
||||
add_custom_command(TARGET pbc_2_ir_doc_gen POST_BUILD
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMAND bash ${PANDA_ROOT}/compiler/tools/pbc_2_ir_doc_gen.sh --binary-dir=${PANDA_BINARY_ROOT} --root-dir=${PANDA_ROOT}
|
||||
)
|
@ -1,60 +0,0 @@
|
||||
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set(PANDA_COMPILER_MULTI_TARGET OFF)
|
||||
|
||||
# Define function just to restrict the scope of variables
|
||||
function(panda_check_compiler_multi_target)
|
||||
set(compiler_any_target OFF)
|
||||
foreach(target X86 X86_64 AARCH32 AARCH64)
|
||||
set(target_name "PANDA_COMPILER_TARGET_${target}")
|
||||
if(compiler_any_target AND ${target_name})
|
||||
set(PANDA_COMPILER_MULTI_TARGET ON PARENT_SCOPE)
|
||||
endif()
|
||||
if(${target_name})
|
||||
set(compiler_any_target ON)
|
||||
endif()
|
||||
endforeach()
|
||||
endfunction()
|
||||
panda_check_compiler_multi_target()
|
||||
|
||||
panda_promote_to_definitions(
|
||||
PANDA_COMPILER_MULTI_TARGET
|
||||
PANDA_COMPILER_CFI
|
||||
)
|
||||
|
||||
if (PANDA_COMPILER_TARGET_AARCH32)
|
||||
if (PANDA_TARGET_ARM64)
|
||||
message(ERROR "Unimplemented multi-build aarch32 on aarch64-target")
|
||||
endif()
|
||||
if (PANDA_TARGET_X86)
|
||||
message(ERROR "Unimplemented multi-build aarch32 on x86-build-target")
|
||||
endif()
|
||||
# ABI must be defined for build on non-arm archs
|
||||
if (NOT(PANDA_TARGET_ARM32_ABI_SOFT OR PANDA_TARGET_ARM32_ABI_SOFTFP OR PANDA_TARGET_ARM32_ABI_HARD))
|
||||
panda_set_flag(PANDA_TARGET_ARM32_ABI_SOFTFP)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if (PANDA_COMPILER_TARGET_AARCH64)
|
||||
if (PANDA_TARGET_ARM32)
|
||||
message(ERROR "Unimplemented multi-build aarch64 on aarch32-target")
|
||||
endif()
|
||||
if (PANDA_TARGET_X86)
|
||||
message(ERROR "Unimplemented multi-build aarch64 on x86-build-target")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
message(STATUS "Compiler backends status:")
|
||||
message(STATUS "PANDA_COMPILER_MULTI_TARGET = ${PANDA_COMPILER_MULTI_TARGET}")
|
||||
message(STATUS "PANDA_COMPILER_TARGET: X86=${PANDA_COMPILER_TARGET_X86} X86_64=${PANDA_COMPILER_TARGET_X86_64} AARCH32=${PANDA_COMPILER_TARGET_AARCH32} AARCH64=${PANDA_COMPILER_TARGET_AARCH64}")
|
@ -1,65 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "code_info.h"
|
||||
#include "utils/bit_memory_region-inl.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
void CodeInfo::Dump(std::ostream &stream) const
|
||||
{
|
||||
stream << "CodeInfo: vregs_num=" << GetHeader().GetVRegsCount() << ", frame_size=" << GetHeader().GetFrameSize()
|
||||
<< std::endl;
|
||||
EnumerateTables([this, &stream](size_t index, auto member) {
|
||||
if (HasTable(index)) {
|
||||
const auto &table = this->*member;
|
||||
table.Dump(stream);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void CodeInfo::Dump(std::ostream &stream, const StackMap &stack_map, Arch arch) const
|
||||
{
|
||||
stream << "Stackmap #" << stack_map.GetRow() << ": npc=0x" << std::hex << stack_map.GetNativePcUnpacked(arch)
|
||||
<< ", bpc=0x" << std::hex << stack_map.GetBytecodePc();
|
||||
if (stack_map.HasInlineInfoIndex()) {
|
||||
stream << ", inline_depth=" << GetInlineDepth(stack_map) + 1;
|
||||
}
|
||||
if (stack_map.HasRootsRegMaskIndex() || stack_map.HasRootsStackMaskIndex()) {
|
||||
stream << ", roots=[";
|
||||
const char *sep = "";
|
||||
if (stack_map.HasRootsRegMaskIndex()) {
|
||||
stream << "r:0x" << std::hex << GetRootsRegMask(stack_map);
|
||||
sep = ",";
|
||||
}
|
||||
if (stack_map.HasRootsStackMaskIndex()) {
|
||||
auto region = GetRootsStackMask(stack_map);
|
||||
stream << sep << "s:" << region;
|
||||
}
|
||||
stream << "]";
|
||||
}
|
||||
if (stack_map.HasVRegMaskIndex()) {
|
||||
stream << ", vregs=" << GetVRegMask(stack_map);
|
||||
}
|
||||
}
|
||||
|
||||
void CodeInfo::DumpInlineInfo(std::ostream &stream, const StackMap &stack_map, int depth) const
|
||||
{
|
||||
auto ii = GetInlineInfo(stack_map, depth);
|
||||
stream << "InlineInfo #" << depth << ": bpc=0x" << std::hex << ii.GetBytecodePc() << std::dec
|
||||
<< ", vregs_num: " << ii.GetVRegsCount();
|
||||
}
|
||||
|
||||
} // namespace panda::compiler
|
@ -1,610 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef PANDA_CODE_INFO_H
|
||||
#define PANDA_CODE_INFO_H
|
||||
|
||||
#include "code_info_tables.h"
|
||||
#include "utils/bit_field.h"
|
||||
#include "utils/bit_table.h"
|
||||
#include "utils/cframe_layout.h"
|
||||
#include "utils/small_vector.h"
|
||||
#include "utils/span.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
/*
|
||||
*
|
||||
* Compiled code layout:
|
||||
* +-------------+
|
||||
* | CodePrefix |
|
||||
* +-------------+ <- Method::CompiledCodeEntrypoint
|
||||
* | Code |
|
||||
* +-------------+-----------------+
|
||||
* | CodeInfo | CodeInfoHeader |
|
||||
* | |-----------------+----------------------+
|
||||
* | | | StackMap |
|
||||
* | | | InlineInfo |
|
||||
* | | | Roots Reg Mask |
|
||||
* | | | Roots Stack Mask |
|
||||
* | | Bit Tables | Method indexes |
|
||||
* | | | VRegs mask |
|
||||
* | | | VRegs map |
|
||||
* | | | VRegs catalogue |
|
||||
* | | | Implicit Nullchecks |
|
||||
* | | | Constants |
|
||||
* |-------------+-----------------+----------------------+
|
||||
*/
|
||||
|
||||
struct CodePrefix {
|
||||
static constexpr uint32_t MAGIC = 0xaccadeca;
|
||||
uint32_t magic {MAGIC};
|
||||
uint32_t code_size {};
|
||||
uint32_t code_info_offset {};
|
||||
uint32_t code_info_size {};
|
||||
|
||||
static constexpr size_t STRUCT_SIZE = 16;
|
||||
};
|
||||
|
||||
static_assert(sizeof(CodePrefix) == CodePrefix::STRUCT_SIZE);
|
||||
|
||||
class CodeInfoHeader {
|
||||
public:
|
||||
enum Elements { PROPERTIES, CALLEE_REG_MASK, CALLEE_FP_REG_MASK, TABLE_MASK, VREGS_COUNT, SIZE };
|
||||
|
||||
void SetFrameSize(uint32_t size)
|
||||
{
|
||||
ASSERT(MinimumBitsToStore(size) <= FRAME_SIZE_FIELD_WIDTH);
|
||||
FieldFrameSize::Set(size, &data_[PROPERTIES]);
|
||||
}
|
||||
uint32_t GetFrameSize() const
|
||||
{
|
||||
return FieldFrameSize::Get(data_[PROPERTIES]);
|
||||
}
|
||||
|
||||
void SetCalleeRegMask(uint32_t value)
|
||||
{
|
||||
data_[CALLEE_REG_MASK] = value;
|
||||
}
|
||||
uint32_t GetCalleeRegMask() const
|
||||
{
|
||||
return data_[CALLEE_REG_MASK];
|
||||
}
|
||||
|
||||
void SetCalleeFpRegMask(uint32_t value)
|
||||
{
|
||||
data_[CALLEE_FP_REG_MASK] = value;
|
||||
}
|
||||
uint32_t GetCalleeFpRegMask() const
|
||||
{
|
||||
return data_[CALLEE_FP_REG_MASK];
|
||||
}
|
||||
|
||||
void SetTableMask(uint32_t value)
|
||||
{
|
||||
data_[TABLE_MASK] = value;
|
||||
}
|
||||
uint32_t GetTableMask() const
|
||||
{
|
||||
return data_[TABLE_MASK];
|
||||
}
|
||||
|
||||
void SetVRegsCount(uint32_t value)
|
||||
{
|
||||
data_[VREGS_COUNT] = value;
|
||||
}
|
||||
uint32_t GetVRegsCount() const
|
||||
{
|
||||
return data_[VREGS_COUNT];
|
||||
}
|
||||
|
||||
void SetHasFloatRegs(bool value)
|
||||
{
|
||||
HasFloatRegsFlag::Set(value, &data_[PROPERTIES]);
|
||||
}
|
||||
bool HasFloatRegs() const
|
||||
{
|
||||
return HasFloatRegsFlag::Get(data_[PROPERTIES]);
|
||||
}
|
||||
|
||||
template <typename Container>
|
||||
void Encode(BitMemoryStreamOut<Container> &out)
|
||||
{
|
||||
VarintPack::Write(out, data_);
|
||||
}
|
||||
void Decode(BitMemoryStreamIn *in)
|
||||
{
|
||||
data_ = VarintPack::Read<SIZE>(in);
|
||||
}
|
||||
|
||||
private:
|
||||
std::array<uint32_t, SIZE> data_;
|
||||
|
||||
static constexpr size_t FRAME_SIZE_FIELD_WIDTH = 16;
|
||||
using FieldFrameSize = BitField<uint32_t, 0, FRAME_SIZE_FIELD_WIDTH>;
|
||||
using HasFloatRegsFlag = FieldFrameSize::NextFlag;
|
||||
};
|
||||
|
||||
class CodeInfo final {
|
||||
public:
|
||||
static constexpr size_t TABLES_COUNT = 10;
|
||||
static constexpr size_t VREG_LIST_STATIC_SIZE = 16;
|
||||
static constexpr size_t ALIGNMENT = sizeof(uint64_t);
|
||||
static constexpr size_t SIZE_ALIGNMENT = sizeof(uint64_t);
|
||||
|
||||
template <typename Allocator>
|
||||
using VRegList = SmallVector<VRegInfo, VREG_LIST_STATIC_SIZE, Allocator, true>;
|
||||
|
||||
NO_COPY_SEMANTIC(CodeInfo);
|
||||
NO_MOVE_SEMANTIC(CodeInfo);
|
||||
|
||||
CodeInfo() = default;
|
||||
|
||||
CodeInfo(const void *data, size_t size)
|
||||
: CodeInfo(Span<const uint8_t>(reinterpret_cast<const uint8_t *>(data), size))
|
||||
{
|
||||
}
|
||||
|
||||
explicit CodeInfo(Span<const uint8_t> code) : CodeInfo(code.data())
|
||||
{
|
||||
ASSERT(GetDataSize() <= code.size());
|
||||
}
|
||||
|
||||
explicit CodeInfo(Span<uint8_t> code) : CodeInfo(code.data())
|
||||
{
|
||||
ASSERT(GetDataSize() <= code.size());
|
||||
}
|
||||
|
||||
explicit CodeInfo(const void *code_entry)
|
||||
{
|
||||
ASSERT(code_entry != nullptr);
|
||||
auto prefix = reinterpret_cast<const CodePrefix *>(code_entry);
|
||||
ASSERT(prefix->magic == CodePrefix::MAGIC);
|
||||
data_ = Span(reinterpret_cast<const uint8_t *>(code_entry), prefix->code_info_offset + prefix->code_info_size);
|
||||
auto code_info = Span<const uint8_t>(&data_[prefix->code_info_offset], prefix->code_info_size);
|
||||
Decode(code_info);
|
||||
}
|
||||
|
||||
virtual ~CodeInfo() = default;
|
||||
|
||||
static const void *GetCodeOriginFromEntryPoint(const void *data)
|
||||
{
|
||||
return reinterpret_cast<const void *>(reinterpret_cast<uintptr_t>(data) -
|
||||
CodeInfo::GetCodeOffset(RUNTIME_ARCH));
|
||||
}
|
||||
|
||||
static CodeInfo CreateFromCodeEntryPoint(const void *data)
|
||||
{
|
||||
ASSERT(data != nullptr);
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
return CodeInfo(reinterpret_cast<const uint8_t *>(data) - CodeInfo::GetCodeOffset(RUNTIME_ARCH));
|
||||
}
|
||||
|
||||
void Decode(Span<const uint8_t> code_info)
|
||||
{
|
||||
BitMemoryStreamIn stream(const_cast<uint8_t *>(code_info.data()), code_info.size() * BITS_PER_BYTE);
|
||||
header_.Decode(&stream);
|
||||
EnumerateTables([this, &stream](size_t index, auto member) {
|
||||
if (HasTable(index)) {
|
||||
(this->*member).Decode(&stream);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
const CodeInfoHeader &GetHeader() const
|
||||
{
|
||||
return header_;
|
||||
}
|
||||
CodeInfoHeader &GetHeader()
|
||||
{
|
||||
return header_;
|
||||
}
|
||||
|
||||
const CodePrefix *GetPrefix() const
|
||||
{
|
||||
return reinterpret_cast<const CodePrefix *>(data_.data());
|
||||
}
|
||||
|
||||
uint32_t GetFrameSize() const
|
||||
{
|
||||
return GetHeader().GetFrameSize();
|
||||
}
|
||||
|
||||
const uint8_t *GetData()
|
||||
{
|
||||
return data_.data();
|
||||
}
|
||||
|
||||
size_t GetDataSize()
|
||||
{
|
||||
return data_.size();
|
||||
}
|
||||
|
||||
const uint8_t *GetCode() const
|
||||
{
|
||||
return &data_[CodeInfo::GetCodeOffset(RUNTIME_ARCH)];
|
||||
}
|
||||
|
||||
size_t GetCodeSize() const
|
||||
{
|
||||
return GetPrefix()->code_size;
|
||||
}
|
||||
|
||||
Span<const uint8_t> GetCodeSpan() const
|
||||
{
|
||||
return {&data_[CodeInfo::GetCodeOffset(RUNTIME_ARCH)], GetCodeSize()};
|
||||
}
|
||||
|
||||
size_t GetInfoSize() const
|
||||
{
|
||||
return GetPrefix()->code_info_size;
|
||||
}
|
||||
|
||||
bool HasTable(size_t index) const
|
||||
{
|
||||
return (GetHeader().GetTableMask() & (1U << index)) != 0;
|
||||
}
|
||||
|
||||
std::variant<void *, uint32_t> GetMethod(const StackMap &stackmap, int inline_depth)
|
||||
{
|
||||
ASSERT(inline_depth >= 0);
|
||||
auto inline_info = inline_infos_.GetRow(stackmap.GetInlineInfoIndex() + inline_depth);
|
||||
if (inline_info.HasMethodLow()) {
|
||||
if constexpr (ArchTraits<RUNTIME_ARCH>::IS_64_BITS) {
|
||||
uintptr_t val =
|
||||
inline_info.GetMethodLow() | (static_cast<uint64_t>(inline_info.GetMethodHi()) << BITS_PER_UINT32);
|
||||
return reinterpret_cast<void *>(val);
|
||||
} else {
|
||||
return reinterpret_cast<void *>(inline_info.GetMethodLow());
|
||||
}
|
||||
}
|
||||
return method_ids_.GetRow(inline_info.GetMethodIdIndex()).GetId();
|
||||
}
|
||||
|
||||
uint64_t GetConstant(const VRegInfo &vreg) const
|
||||
{
|
||||
ASSERT(vreg.GetLocation() == VRegInfo::Location::CONSTANT);
|
||||
uint64_t low = constant_table_.GetRow(vreg.GetConstantLowIndex()).GetValue();
|
||||
uint64_t hi = constant_table_.GetRow(vreg.GetConstantHiIndex()).GetValue();
|
||||
return low | (hi << BITS_PER_UINT32);
|
||||
}
|
||||
|
||||
static size_t GetCodeOffset(Arch arch)
|
||||
{
|
||||
return RoundUp(CodePrefix::STRUCT_SIZE, GetCodeAlignment(arch));
|
||||
}
|
||||
|
||||
uint32_t GetSavedCalleeRegsMask(bool is_fp) const
|
||||
{
|
||||
return is_fp ? GetHeader().GetCalleeFpRegMask() : GetHeader().GetCalleeRegMask();
|
||||
}
|
||||
|
||||
auto GetVRegMask(const StackMap &stack_map)
|
||||
{
|
||||
return stack_map.HasVRegMaskIndex() ? vreg_masks_.GetBitMemoryRegion(stack_map.GetVRegMaskIndex())
|
||||
: BitMemoryRegion<const uint8_t>();
|
||||
}
|
||||
|
||||
auto GetVRegMask(const StackMap &stack_map) const
|
||||
{
|
||||
return const_cast<CodeInfo *>(this)->GetVRegMask(stack_map);
|
||||
}
|
||||
|
||||
size_t GetVRegCount(const StackMap &stack_map) const
|
||||
{
|
||||
return GetVRegMask(stack_map).Popcount();
|
||||
}
|
||||
|
||||
uint32_t GetRootsRegMask(const StackMap &stack_map) const
|
||||
{
|
||||
return stack_map.HasRootsRegMaskIndex() ? roots_reg_masks_.GetRow(stack_map.GetRootsRegMaskIndex()).GetMask()
|
||||
: 0;
|
||||
}
|
||||
|
||||
auto GetRootsStackMask(const StackMap &stack_map) const
|
||||
{
|
||||
return stack_map.HasRootsStackMaskIndex()
|
||||
? roots_stack_masks_.GetBitMemoryRegion(stack_map.GetRootsStackMaskIndex())
|
||||
: BitMemoryRegion<const uint8_t>();
|
||||
}
|
||||
|
||||
auto GetInlineInfos(const StackMap &stack_map)
|
||||
{
|
||||
if (!stack_map.HasInlineInfoIndex()) {
|
||||
return inline_infos_.GetRangeReversed(0, 0);
|
||||
}
|
||||
auto index = stack_map.GetInlineInfoIndex();
|
||||
uint32_t size = index;
|
||||
for (; inline_infos_.GetRow(size).GetIsLast() == 0; size++) {
|
||||
}
|
||||
|
||||
return inline_infos_.GetRangeReversed(index, helpers::ToSigned(size) + 1);
|
||||
}
|
||||
|
||||
auto GetInlineInfo(const StackMap &stack_map, int inline_depth) const
|
||||
{
|
||||
ASSERT(stack_map.HasInlineInfoIndex());
|
||||
CHECK_GE(GetInlineDepth(stack_map), inline_depth);
|
||||
return inline_infos_.GetRow(stack_map.GetInlineInfoIndex() + inline_depth);
|
||||
}
|
||||
|
||||
int GetInlineDepth(const StackMap &stack_map) const
|
||||
{
|
||||
if (!stack_map.HasInlineInfoIndex()) {
|
||||
return -1;
|
||||
}
|
||||
int index = stack_map.GetInlineInfoIndex();
|
||||
int depth = index;
|
||||
for (; inline_infos_.GetRow(depth).GetIsLast() == 0; depth++) {
|
||||
}
|
||||
return depth - index;
|
||||
}
|
||||
|
||||
StackMap FindStackMapForNativePc(uint32_t pc, Arch arch = RUNTIME_ARCH) const
|
||||
{
|
||||
auto it =
|
||||
std::lower_bound(stack_maps_.begin(), stack_maps_.end(), pc, [arch](const auto &a, uintptr_t counter) {
|
||||
return a.GetNativePcUnpacked(arch) < counter;
|
||||
});
|
||||
return (it == stack_maps_.end() || it->GetNativePcUnpacked(arch) != pc) ? stack_maps_.GetInvalidRow() : *it;
|
||||
}
|
||||
|
||||
StackMap FindOsrStackMap(uint32_t pc) const
|
||||
{
|
||||
auto it = std::find_if(stack_maps_.begin(), stack_maps_.end(),
|
||||
[pc](const auto &a) { return a.GetBytecodePc() == pc && a.IsOsr(); });
|
||||
return it == stack_maps_.end() ? stack_maps_.GetInvalidRow() : *it;
|
||||
}
|
||||
|
||||
auto GetStackMap(size_t index) const
|
||||
{
|
||||
return StackMap(&stack_maps_, index);
|
||||
}
|
||||
|
||||
auto &GetStackMaps()
|
||||
{
|
||||
return stack_maps_;
|
||||
}
|
||||
|
||||
auto &GetVRegCatalogue()
|
||||
{
|
||||
return vregs_catalogue_;
|
||||
}
|
||||
|
||||
auto &GetVRegMapTable()
|
||||
{
|
||||
return vregs_map_;
|
||||
}
|
||||
|
||||
auto &GetVRegMaskTable()
|
||||
{
|
||||
return vreg_masks_;
|
||||
}
|
||||
|
||||
auto &GetInlineInfosTable()
|
||||
{
|
||||
return inline_infos_;
|
||||
}
|
||||
|
||||
auto &GetConstantTable()
|
||||
{
|
||||
return constant_table_;
|
||||
}
|
||||
|
||||
const auto &GetImplicitNullChecksTable() const
|
||||
{
|
||||
return implicit_nullchecks_;
|
||||
}
|
||||
|
||||
bool HasFloatRegs() const
|
||||
{
|
||||
return GetHeader().HasFloatRegs();
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
static void EnumerateTables(Func func)
|
||||
{
|
||||
size_t index = 0;
|
||||
func(index++, &CodeInfo::stack_maps_);
|
||||
func(index++, &CodeInfo::inline_infos_);
|
||||
func(index++, &CodeInfo::roots_reg_masks_);
|
||||
func(index++, &CodeInfo::roots_stack_masks_);
|
||||
func(index++, &CodeInfo::method_ids_);
|
||||
func(index++, &CodeInfo::vreg_masks_);
|
||||
func(index++, &CodeInfo::vregs_map_);
|
||||
func(index++, &CodeInfo::vregs_catalogue_);
|
||||
func(index++, &CodeInfo::implicit_nullchecks_);
|
||||
func(index++, &CodeInfo::constant_table_);
|
||||
ASSERT(index == TABLES_COUNT);
|
||||
}
|
||||
|
||||
template <typename Callback>
|
||||
void EnumerateStaticRoots(const StackMap &stack_map, Callback callback)
|
||||
{
|
||||
return EnumerateRoots<Callback, false>(stack_map, callback);
|
||||
}
|
||||
|
||||
template <typename Callback>
|
||||
void EnumerateDynamicRoots(const StackMap &stack_map, Callback callback)
|
||||
{
|
||||
return EnumerateRoots<Callback, true>(stack_map, callback);
|
||||
}
|
||||
|
||||
template <typename Allocator>
|
||||
VRegList<Allocator> GetVRegList(StackMap stack_map, uint32_t first_vreg, uint32_t vregs_count,
|
||||
Allocator *allocator = nullptr) const
|
||||
{
|
||||
if (vregs_count == 0 || !stack_map.HasRegMap()) {
|
||||
return CodeInfo::VRegList<Allocator>(allocator);
|
||||
}
|
||||
VRegList<Allocator> vreg_list(allocator);
|
||||
vreg_list.resize(vregs_count, VRegInfo());
|
||||
ASSERT(!vreg_list[0].IsLive());
|
||||
std::vector<bool> reg_set(vregs_count);
|
||||
|
||||
uint32_t remaining_registers = vregs_count;
|
||||
for (int sindex = stack_map.GetRow(); sindex >= 0 && remaining_registers > 0; sindex--) {
|
||||
stack_map = GetStackMap(sindex);
|
||||
if (!stack_map.HasVRegMaskIndex()) {
|
||||
continue;
|
||||
}
|
||||
// Skip stackmaps that are not in the same inline depth
|
||||
auto vreg_mask = GetVRegMask(stack_map);
|
||||
if (vreg_mask.Size() <= first_vreg) {
|
||||
continue;
|
||||
}
|
||||
ASSERT(stack_map.HasVRegMapIndex());
|
||||
uint32_t map_index = stack_map.GetVRegMapIndex();
|
||||
|
||||
map_index += vreg_mask.Popcount(0, first_vreg);
|
||||
vreg_mask = vreg_mask.Subregion(first_vreg, vreg_mask.Size() - first_vreg);
|
||||
|
||||
uint32_t end = std::min<uint32_t>(vreg_mask.Size(), vregs_count);
|
||||
for (size_t i = 0; i < end; i += BITS_PER_UINT32) {
|
||||
uint32_t mask = vreg_mask.Read(i, std::min<uint32_t>(end - i, BITS_PER_UINT32));
|
||||
while (mask != 0) {
|
||||
uint32_t reg_idx = Ctz(mask);
|
||||
if (!reg_set[i + reg_idx]) {
|
||||
auto vreg_index = vregs_map_.GetRow(map_index);
|
||||
if (vreg_index.GetIndex() != StackMap::NO_VALUE) {
|
||||
ASSERT(!vreg_list[i + reg_idx].IsLive());
|
||||
vreg_list[i + reg_idx] = vregs_catalogue_.GetRow(vreg_index.GetIndex()).GetVRegInfo();
|
||||
vreg_list[i + reg_idx].SetIndex(i + reg_idx);
|
||||
}
|
||||
remaining_registers--;
|
||||
reg_set[i + reg_idx] = true;
|
||||
}
|
||||
map_index++;
|
||||
mask ^= 1U << reg_idx;
|
||||
}
|
||||
}
|
||||
}
|
||||
return vreg_list;
|
||||
}
|
||||
|
||||
template <typename Allocator>
|
||||
VRegList<Allocator> GetVRegList(StackMap stack_map, int inline_depth, Allocator *allocator = nullptr) const
|
||||
{
|
||||
if (inline_depth < 0) {
|
||||
return GetVRegList<Allocator>(stack_map, 0, GetHeader().GetVRegsCount(), allocator);
|
||||
}
|
||||
ASSERT(stack_map.HasInlineInfoIndex());
|
||||
auto inline_info = GetInlineInfo(stack_map, inline_depth);
|
||||
if (inline_info.GetVRegsCount() == 0) {
|
||||
return VRegList<Allocator>(allocator);
|
||||
}
|
||||
auto depth = inline_info.GetRow() - stack_map.GetInlineInfoIndex();
|
||||
uint32_t first =
|
||||
depth == 0 ? GetHeader().GetVRegsCount() : inline_infos_.GetRow(inline_info.GetRow() - 1).GetVRegsCount();
|
||||
ASSERT(inline_info.GetVRegsCount() >= first);
|
||||
return GetVRegList<Allocator>(stack_map, first, inline_info.GetVRegsCount() - first, allocator);
|
||||
}
|
||||
|
||||
template <typename Allocator>
|
||||
VRegList<Allocator> GetVRegList(StackMap stack_map, Allocator *allocator = nullptr) const
|
||||
{
|
||||
return GetVRegList<Allocator>(stack_map, -1, allocator);
|
||||
}
|
||||
|
||||
static bool VerifyCompiledEntry(uintptr_t compiled_entry)
|
||||
{
|
||||
auto codeheader = compiled_entry - GetCodeOffset(RUNTIME_ARCH);
|
||||
return (*reinterpret_cast<const uint32_t *>(codeheader) == CodePrefix::MAGIC);
|
||||
}
|
||||
|
||||
void Dump(std::ostream &stream) const;
|
||||
|
||||
void Dump(std::ostream &stream, const StackMap &stack_map, Arch arch = RUNTIME_ARCH) const;
|
||||
|
||||
void DumpInlineInfo(std::ostream &stream, const StackMap &stack_map, int depth) const;
|
||||
|
||||
size_t CountSpillSlots()
|
||||
{
|
||||
auto frame_slots = GetFrameSize() / PointerSize(RUNTIME_ARCH);
|
||||
auto spills_count = frame_slots - (static_cast<size_t>(CFrameSlots::Start()) + GetRegsCount(RUNTIME_ARCH) + 1U);
|
||||
// Reverse 'CFrameLayout::AlignSpillCount' counting
|
||||
if (RUNTIME_ARCH == Arch::AARCH32) {
|
||||
spills_count = spills_count / 2U - 1;
|
||||
}
|
||||
if (spills_count % 2U != 0) {
|
||||
spills_count--;
|
||||
}
|
||||
return spills_count;
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename Callback, bool is_dynamic>
|
||||
void EnumerateRoots(const StackMap &stack_map, Callback callback);
|
||||
|
||||
BitTable<StackMap> stack_maps_;
|
||||
BitTable<InlineInfo> inline_infos_;
|
||||
BitTable<RegisterMask> roots_reg_masks_;
|
||||
BitTable<StackMask> roots_stack_masks_;
|
||||
BitTable<MethodId> method_ids_;
|
||||
BitTable<VRegisterInfo> vregs_catalogue_;
|
||||
BitTable<VRegisterCatalogueIndex> vregs_map_;
|
||||
BitTable<VRegisterMask> vreg_masks_;
|
||||
BitTable<ImplicitNullChecks> implicit_nullchecks_;
|
||||
BitTable<ConstantTable> constant_table_;
|
||||
|
||||
CodeInfoHeader header_ {};
|
||||
|
||||
Span<const uint8_t> data_;
|
||||
};
|
||||
|
||||
template <typename Callback, bool is_dynamic>
|
||||
void CodeInfo::EnumerateRoots(const StackMap &stack_map, Callback callback)
|
||||
{
|
||||
auto root_type = is_dynamic ? VRegInfo::Type::ANY : VRegInfo::Type::OBJECT;
|
||||
|
||||
if (stack_map.HasRootsRegMaskIndex()) {
|
||||
auto reg_mask = roots_reg_masks_.GetRow(stack_map.GetRootsRegMaskIndex()).GetMask();
|
||||
ArenaBitVectorSpan vec(®_mask, BITS_PER_UINT32);
|
||||
for (auto reg_idx : vec.GetSetBitsIndices()) {
|
||||
if (!callback(VRegInfo(reg_idx, VRegInfo::Location::REGISTER, root_type, false))) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Simplify after renumbering stack slots
|
||||
if (stack_map.HasRootsStackMaskIndex()) {
|
||||
auto stack_slots_count = CountSpillSlots();
|
||||
auto reg_mask = roots_stack_masks_.GetBitMemoryRegion(stack_map.GetRootsStackMaskIndex());
|
||||
for (auto reg_idx : reg_mask) {
|
||||
if (reg_idx >= stack_slots_count) {
|
||||
// Parameter-slots' indexes are added to the root-mask with `stack_slots_count` offset to distinct them
|
||||
// from spill-slots
|
||||
auto param_slot_idx = reg_idx - stack_slots_count;
|
||||
reg_idx = static_cast<size_t>(CFrameLayout::StackArgSlot::Start()) - param_slot_idx -
|
||||
static_cast<size_t>(CFrameSlots::Start());
|
||||
} else {
|
||||
if constexpr (!ArchTraits<RUNTIME_ARCH>::IS_64_BITS) { // NOLINT
|
||||
reg_idx = (reg_idx << 1U) + 1;
|
||||
}
|
||||
// Stack roots are began from spill/fill stack origin, so we need to adjust it according to registers
|
||||
// buffer
|
||||
reg_idx += GetRegsCount(RUNTIME_ARCH);
|
||||
}
|
||||
VRegInfo vreg(reg_idx, VRegInfo::Location::SLOT, root_type, false);
|
||||
if (!callback(vreg)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // PANDA_CODE_INFO_H
|
@ -1,232 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "code_info_builder.h"
|
||||
#include "utils/bit_memory_region-inl.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
void CodeInfoBuilder::BeginMethod(uint32_t frame_size, uint32_t vregs_count)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
ASSERT(!was_method_begin_);
|
||||
ASSERT(!was_stack_map_begin_);
|
||||
ASSERT(!was_inline_info_begin_);
|
||||
was_method_begin_ = true;
|
||||
#endif
|
||||
|
||||
header_.SetFrameSize(frame_size);
|
||||
vregs_count_ = vregs_count;
|
||||
constant_table_.Add({0});
|
||||
}
|
||||
|
||||
void CodeInfoBuilder::EndMethod()
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
ASSERT(was_method_begin_);
|
||||
ASSERT(!was_stack_map_begin_);
|
||||
ASSERT(!was_inline_info_begin_);
|
||||
was_method_begin_ = false;
|
||||
#endif
|
||||
}
|
||||
|
||||
void CodeInfoBuilder::BeginStackMap(uint32_t bpc, uint32_t npc, ArenaBitVector *stack_roots, uint32_t regs_roots,
|
||||
bool require_vreg_map, bool is_osr)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
ASSERT(was_method_begin_);
|
||||
ASSERT(!was_stack_map_begin_);
|
||||
ASSERT(!was_inline_info_begin_);
|
||||
was_stack_map_begin_ = true;
|
||||
#endif
|
||||
inline_info_stack_.clear();
|
||||
current_vregs_.clear();
|
||||
|
||||
ASSERT(stack_maps_.GetSize() == 0 || npc >= stack_maps_.GetLast()[StackMap::COLUMN_NATIVE_PC]);
|
||||
|
||||
current_vregs_count_ = require_vreg_map ? vregs_count_ : 0;
|
||||
|
||||
current_stack_map_ = BitTableBuilder<StackMap>::Entry();
|
||||
current_stack_map_[StackMap::COLUMN_PROPERTIES] = StackMap::CreateProperties(is_osr, require_vreg_map);
|
||||
current_stack_map_[StackMap::COLUMN_BYTECODE_PC] = bpc;
|
||||
current_stack_map_[StackMap::COLUMN_NATIVE_PC] = StackMap::PackAddress(npc, arch_);
|
||||
if (regs_roots != 0) {
|
||||
current_stack_map_[StackMap::COLUMN_ROOTS_REG_MASK_INDEX] = roots_reg_masks_.Add({regs_roots});
|
||||
}
|
||||
if (stack_roots != nullptr && !stack_roots->empty()) {
|
||||
current_stack_map_[StackMap::COLUMN_ROOTS_STACK_MASK_INDEX] = roots_stack_masks_.Add(stack_roots->GetFixed());
|
||||
}
|
||||
// Ensure that stackmaps are inserted in sorted order
|
||||
if (stack_maps_.GetRowsCount() != 0) {
|
||||
ASSERT(current_stack_map_[StackMap::COLUMN_NATIVE_PC] >= stack_maps_.GetLast()[StackMap::COLUMN_NATIVE_PC]);
|
||||
}
|
||||
}
|
||||
|
||||
void CodeInfoBuilder::EndStackMap()
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
ASSERT(was_method_begin_);
|
||||
ASSERT(was_stack_map_begin_);
|
||||
ASSERT(!was_inline_info_begin_);
|
||||
was_stack_map_begin_ = false;
|
||||
#endif
|
||||
if (!inline_info_stack_.empty()) {
|
||||
inline_info_stack_.back()[InlineInfo::COLUMN_IS_LAST] = static_cast<uint32_t>(true);
|
||||
current_stack_map_[StackMap::COLUMN_INLINE_INFO_INDEX] = inline_infos_.AddArray(Span(inline_info_stack_));
|
||||
}
|
||||
|
||||
EmitVRegs();
|
||||
|
||||
stack_maps_.Add(current_stack_map_);
|
||||
}
|
||||
|
||||
void CodeInfoBuilder::DumpCurrentStackMap(std::ostream &stream) const
|
||||
{
|
||||
stream << "Stackmap #" << stack_maps_.GetRowsCount() - 1 << ": npc=0x" << std::hex
|
||||
<< StackMap::UnpackAddress(current_stack_map_[StackMap::COLUMN_NATIVE_PC], arch_) << ", bpc=0x" << std::hex
|
||||
<< current_stack_map_[StackMap::COLUMN_BYTECODE_PC];
|
||||
if (current_stack_map_[StackMap::COLUMN_INLINE_INFO_INDEX] != StackMap::NO_VALUE) {
|
||||
stream << ", inline_depth=" << inline_info_stack_.size();
|
||||
}
|
||||
if (current_stack_map_[StackMap::COLUMN_ROOTS_REG_MASK_INDEX] != StackMap::NO_VALUE ||
|
||||
current_stack_map_[StackMap::COLUMN_ROOTS_STACK_MASK_INDEX] != StackMap::NO_VALUE) {
|
||||
stream << ", roots=[";
|
||||
const char *sep = "";
|
||||
if (current_stack_map_[StackMap::COLUMN_ROOTS_REG_MASK_INDEX] != StackMap::NO_VALUE) {
|
||||
auto &entry = roots_reg_masks_.GetEntry(current_stack_map_[StackMap::COLUMN_ROOTS_REG_MASK_INDEX]);
|
||||
stream << "r:0x" << std::hex << entry[RegisterMask::COLUMN_MASK];
|
||||
sep = ",";
|
||||
}
|
||||
if (current_stack_map_[StackMap::COLUMN_ROOTS_STACK_MASK_INDEX] != StackMap::NO_VALUE) {
|
||||
auto region = roots_stack_masks_.GetEntry(current_stack_map_[StackMap::COLUMN_ROOTS_STACK_MASK_INDEX]);
|
||||
stream << sep << "s:" << region;
|
||||
}
|
||||
stream << "]";
|
||||
}
|
||||
if (current_stack_map_[StackMap::COLUMN_VREG_MASK_INDEX] != StackMap::NO_VALUE) {
|
||||
stream << ", vregs=" << vreg_masks_.GetEntry(current_stack_map_[StackMap::COLUMN_VREG_MASK_INDEX]);
|
||||
}
|
||||
}
|
||||
|
||||
void CodeInfoBuilder::BeginInlineInfo(void *method, uint32_t method_id, uint32_t bpc, uint32_t vregs_count)
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
ASSERT(was_method_begin_);
|
||||
ASSERT(was_stack_map_begin_);
|
||||
was_inline_info_begin_ = true;
|
||||
#endif
|
||||
BitTableBuilder<InlineInfo>::Entry inline_info;
|
||||
current_vregs_count_ += vregs_count;
|
||||
|
||||
inline_info[InlineInfo::COLUMN_IS_LAST] = static_cast<uint32_t>(false);
|
||||
inline_info[InlineInfo::COLUMN_BYTECODE_PC] = bpc;
|
||||
inline_info[InlineInfo::COLUMN_VREGS_COUNT] = current_vregs_count_;
|
||||
if (method != nullptr) {
|
||||
inline_info[InlineInfo::COLUMN_METHOD_HI] = High32Bits(method);
|
||||
inline_info[InlineInfo::COLUMN_METHOD_LOW] = Low32Bits(method);
|
||||
} else {
|
||||
ASSERT(method_id != 0);
|
||||
inline_info[InlineInfo::COLUMN_METHOD_ID_INDEX] = method_ids_.Add({method_id});
|
||||
}
|
||||
|
||||
inline_info_stack_.push_back(inline_info);
|
||||
}
|
||||
|
||||
void CodeInfoBuilder::EndInlineInfo()
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
ASSERT(was_method_begin_);
|
||||
ASSERT(was_stack_map_begin_);
|
||||
ASSERT(was_inline_info_begin_);
|
||||
was_inline_info_begin_ = false;
|
||||
#endif
|
||||
ASSERT(current_vregs_.size() == current_vregs_count_);
|
||||
}
|
||||
|
||||
void CodeInfoBuilder::AddConstant(uint64_t value, VRegInfo::Type type, bool is_acc)
|
||||
{
|
||||
VRegInfo vreg(0, VRegInfo::Location::CONSTANT, type, is_acc);
|
||||
uint32_t low = value & ((1LLU << BITS_PER_UINT32) - 1);
|
||||
uint32_t hi = (value >> BITS_PER_UINT32) & ((1LLU << BITS_PER_UINT32) - 1);
|
||||
vreg.SetConstantIndices(constant_table_.Add({low}), constant_table_.Add({hi}));
|
||||
current_vregs_.push_back(vreg);
|
||||
}
|
||||
|
||||
void CodeInfoBuilder::EmitVRegs()
|
||||
{
|
||||
ASSERT(current_vregs_.size() == current_vregs_count_);
|
||||
if (current_vregs_.empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (current_vregs_.size() > last_vregs_.size()) {
|
||||
last_vregs_.resize(current_vregs_.size(), VRegInfo::Invalid());
|
||||
vregs_last_change_.resize(current_vregs_.size());
|
||||
}
|
||||
|
||||
ArenaVector<BitTableBuilder<VRegisterCatalogueIndex>::Entry> &vregs_map = vregs_map_storage_;
|
||||
ArenaBitVector &vregs_mask = vregs_mask_storage_;
|
||||
vregs_map.clear();
|
||||
vregs_mask.clear();
|
||||
|
||||
for (size_t i = 0; i < current_vregs_.size(); i++) {
|
||||
auto &vreg = current_vregs_[i];
|
||||
uint32_t distatnce = stack_maps_.GetRowsCount() - vregs_last_change_[i];
|
||||
if (last_vregs_[i] != vreg || distatnce > MAX_VREG_LIVE_DISTANCE) {
|
||||
BitTableBuilder<VRegisterInfo>::Entry vreg_entry;
|
||||
vreg_entry[VRegisterInfo::COLUMN_INFO] = vreg.GetInfo();
|
||||
vreg_entry[VRegisterInfo::COLUMN_VALUE] = vreg.GetValue();
|
||||
uint32_t index = vreg.IsLive() ? vregs_catalogue_.Add(vreg_entry) : decltype(vregs_catalogue_)::NO_VALUE;
|
||||
vregs_map.push_back({index});
|
||||
vregs_mask.SetBit(i);
|
||||
last_vregs_[i] = vreg;
|
||||
vregs_last_change_[i] = stack_maps_.GetRowsCount();
|
||||
}
|
||||
}
|
||||
|
||||
BitMemoryRegion rgn(vregs_mask.data(), vregs_mask.size());
|
||||
ASSERT(vregs_mask.PopCount() == vregs_map.size());
|
||||
if (vregs_mask.PopCount() != 0) {
|
||||
current_stack_map_[StackMap::COLUMN_VREG_MASK_INDEX] = vreg_masks_.Add(vregs_mask.GetFixed());
|
||||
}
|
||||
if (!current_vregs_.empty()) {
|
||||
current_stack_map_[StackMap::COLUMN_VREG_MAP_INDEX] = vregs_map_.AddArray(Span(vregs_map));
|
||||
}
|
||||
}
|
||||
|
||||
void CodeInfoBuilder::Encode(ArenaVector<uint8_t> *stream, size_t offset)
|
||||
{
|
||||
BitMemoryStreamOut out(stream, offset);
|
||||
|
||||
uint32_t tables_mask = 0;
|
||||
EnumerateTables([&tables_mask](size_t index, const auto &table) {
|
||||
if (table->GetRowsCount() != 0) {
|
||||
tables_mask |= (1U << index);
|
||||
}
|
||||
});
|
||||
|
||||
header_.SetTableMask(tables_mask);
|
||||
header_.SetVRegsCount(vregs_count_);
|
||||
header_.Encode(out);
|
||||
|
||||
EnumerateTables([&out]([[maybe_unused]] size_t index, const auto &table) {
|
||||
if (table->GetRowsCount() != 0) {
|
||||
table->Encode(out);
|
||||
}
|
||||
});
|
||||
stream->resize(RoundUp(stream->size(), CodeInfo::SIZE_ALIGNMENT));
|
||||
}
|
||||
|
||||
} // namespace panda::compiler
|
@ -1,158 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef PANDA_CODE_INFO_BUILDER_H
|
||||
#define PANDA_CODE_INFO_BUILDER_H
|
||||
|
||||
#include "code_info.h"
|
||||
#include "utils/arena_containers.h"
|
||||
#include "utils/bit_vector.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
class CodeInfoBuilder {
|
||||
public:
|
||||
CodeInfoBuilder(Arch arch, ArenaAllocator *allocator)
|
||||
: arch_(arch),
|
||||
stack_maps_(allocator),
|
||||
inline_infos_(allocator),
|
||||
roots_reg_masks_(allocator),
|
||||
roots_stack_masks_(allocator),
|
||||
method_ids_(allocator),
|
||||
vregs_catalogue_(allocator),
|
||||
vregs_map_(allocator),
|
||||
vreg_masks_(allocator),
|
||||
implicit_nullchecks_(allocator),
|
||||
constant_table_(allocator),
|
||||
current_vregs_(allocator->Adapter()),
|
||||
last_vregs_(allocator->Adapter()),
|
||||
vregs_last_change_(allocator->Adapter()),
|
||||
inline_info_stack_(allocator->Adapter()),
|
||||
vregs_map_storage_(allocator->Adapter()),
|
||||
vregs_mask_storage_(allocator)
|
||||
{
|
||||
}
|
||||
|
||||
NO_COPY_SEMANTIC(CodeInfoBuilder);
|
||||
NO_MOVE_SEMANTIC(CodeInfoBuilder);
|
||||
~CodeInfoBuilder() = default;
|
||||
|
||||
void BeginMethod(uint32_t frame_size, uint32_t vregs_count);
|
||||
|
||||
void EndMethod();
|
||||
|
||||
void BeginStackMap(uint32_t bpc, uint32_t npc, ArenaBitVector *stack_roots, uint32_t regs_roots,
|
||||
bool require_vreg_map, bool is_osr);
|
||||
|
||||
void EndStackMap();
|
||||
|
||||
void BeginInlineInfo(void *method, uint32_t method_id, uint32_t bpc, uint32_t vregs_count);
|
||||
|
||||
void EndInlineInfo();
|
||||
|
||||
void AddVReg(VRegInfo reg)
|
||||
{
|
||||
// Constant should be added via `AddConstant` method
|
||||
ASSERT(reg.GetLocation() != VRegInfo::Location::CONSTANT);
|
||||
current_vregs_.push_back(reg);
|
||||
}
|
||||
|
||||
void AddConstant(uint64_t value, VRegInfo::Type type, bool is_acc);
|
||||
|
||||
void SetFrameSize(uint32_t size)
|
||||
{
|
||||
header_.SetFrameSize(size);
|
||||
}
|
||||
|
||||
void Encode(ArenaVector<uint8_t> *stream, size_t offset = 0);
|
||||
|
||||
void SetSavedCalleeRegsMask(uint32_t mask, uint32_t vmask)
|
||||
{
|
||||
header_.SetCalleeRegMask(mask);
|
||||
header_.SetCalleeFpRegMask(vmask);
|
||||
}
|
||||
|
||||
void AddImplicitNullCheck(uint32_t instruction_native_pc, uint32_t offset)
|
||||
{
|
||||
implicit_nullchecks_.Add({instruction_native_pc, offset});
|
||||
}
|
||||
|
||||
void SetHasFloatRegs(bool has)
|
||||
{
|
||||
header_.SetHasFloatRegs(has);
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
constexpr void EnumerateTables(Func func)
|
||||
{
|
||||
size_t index = 0;
|
||||
func(index++, &stack_maps_);
|
||||
func(index++, &inline_infos_);
|
||||
func(index++, &roots_reg_masks_);
|
||||
func(index++, &roots_stack_masks_);
|
||||
func(index++, &method_ids_);
|
||||
func(index++, &vreg_masks_);
|
||||
func(index++, &vregs_map_);
|
||||
func(index++, &vregs_catalogue_);
|
||||
func(index++, &implicit_nullchecks_);
|
||||
func(index++, &constant_table_);
|
||||
ASSERT(index == CodeInfo::TABLES_COUNT);
|
||||
}
|
||||
|
||||
void DumpCurrentStackMap(std::ostream &stream) const;
|
||||
|
||||
private:
|
||||
void EmitVRegs();
|
||||
|
||||
private:
|
||||
Arch arch_;
|
||||
uint32_t vregs_count_ {0};
|
||||
uint32_t current_vregs_count_ {0};
|
||||
|
||||
CodeInfoHeader header_ {};
|
||||
|
||||
// Tables
|
||||
BitTableBuilder<StackMap> stack_maps_;
|
||||
BitTableBuilder<InlineInfo> inline_infos_;
|
||||
BitTableBuilder<RegisterMask> roots_reg_masks_;
|
||||
BitmapTableBuilder roots_stack_masks_;
|
||||
BitTableBuilder<MethodId> method_ids_;
|
||||
BitTableBuilder<VRegisterInfo> vregs_catalogue_;
|
||||
BitTableBuilder<VRegisterCatalogueIndex> vregs_map_;
|
||||
BitmapTableBuilder vreg_masks_;
|
||||
BitTableBuilder<ImplicitNullChecks> implicit_nullchecks_;
|
||||
BitTableBuilder<ConstantTable> constant_table_;
|
||||
|
||||
// Auxiliary containers
|
||||
BitTableBuilder<StackMap>::Entry current_stack_map_;
|
||||
ArenaVector<VRegInfo> current_vregs_;
|
||||
ArenaVector<VRegInfo> last_vregs_;
|
||||
ArenaVector<uint32_t> vregs_last_change_;
|
||||
ArenaVector<BitTableBuilder<InlineInfo>::Entry> inline_info_stack_;
|
||||
ArenaVector<BitTableBuilder<VRegisterCatalogueIndex>::Entry> vregs_map_storage_;
|
||||
ArenaBitVector vregs_mask_storage_;
|
||||
|
||||
#ifndef NDEBUG
|
||||
bool was_method_begin_ {false};
|
||||
bool was_stack_map_begin_ {false};
|
||||
bool was_inline_info_begin_ {false};
|
||||
#endif
|
||||
|
||||
static constexpr size_t MAX_VREG_LIVE_DISTANCE = 32;
|
||||
};
|
||||
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // PANDA_CODE_INFO_BUILDER_H
|
@ -1,214 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef PANDA_CODE_INFO_TABLES_H
|
||||
#define PANDA_CODE_INFO_TABLES_H
|
||||
|
||||
#include "utils/bit_field.h"
|
||||
#include "utils/bit_table.h"
|
||||
#include "utils/arch.h"
|
||||
#include "vreg_info.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
class StackMap : public BitTableRow<8U, StackMap> {
|
||||
public:
|
||||
BIT_TABLE_HEADER(8, StackMap)
|
||||
BIT_TABLE_COLUMN(0, Properties, PROPERTIES)
|
||||
BIT_TABLE_COLUMN(1, NativePc, NATIVE_PC)
|
||||
BIT_TABLE_COLUMN(2, BytecodePc, BYTECODE_PC)
|
||||
BIT_TABLE_COLUMN(3, RootsRegMaskIndex, ROOTS_REG_MASK_INDEX)
|
||||
BIT_TABLE_COLUMN(4, RootsStackMaskIndex, ROOTS_STACK_MASK_INDEX)
|
||||
BIT_TABLE_COLUMN(5, InlineInfoIndex, INLINE_INFO_INDEX)
|
||||
BIT_TABLE_COLUMN(6, VRegMaskIndex, VREG_MASK_INDEX)
|
||||
BIT_TABLE_COLUMN(7, VRegMapIndex, VREG_MAP_INDEX)
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(StackMap);
|
||||
DEFAULT_COPY_SEMANTIC(StackMap);
|
||||
~StackMap() = default;
|
||||
|
||||
std::string GetColumnStr(size_t column) const
|
||||
{
|
||||
if (column != COLUMN_NATIVE_PC) {
|
||||
return Base::GetColumnStr(column);
|
||||
}
|
||||
if (Get(column) == NO_VALUE) {
|
||||
return "-";
|
||||
}
|
||||
return std::to_string(GetNativePcUnpacked());
|
||||
}
|
||||
|
||||
static constexpr uintptr_t PackAddress(uintptr_t address, Arch arch)
|
||||
{
|
||||
ASSERT(IsAligned(address, GetInstructionAlignment(arch)));
|
||||
return address / GetInstructionAlignment(arch);
|
||||
}
|
||||
|
||||
uint32_t GetNativePcUnpacked(Arch arch = RUNTIME_ARCH) const
|
||||
{
|
||||
return UnpackAddress(GetNativePc(), arch);
|
||||
}
|
||||
|
||||
static constexpr uintptr_t UnpackAddress(uintptr_t address, Arch arch)
|
||||
{
|
||||
return address * GetInstructionAlignment(arch);
|
||||
}
|
||||
|
||||
static constexpr uint32_t CreateProperties(bool is_osr, bool has_regmap)
|
||||
{
|
||||
return FieldIsOsr::Encode(is_osr) | FieldHasRegMap::Encode(has_regmap);
|
||||
}
|
||||
|
||||
bool IsOsr() const
|
||||
{
|
||||
return FieldIsOsr::Get(GetProperties());
|
||||
}
|
||||
|
||||
bool HasRegMap() const
|
||||
{
|
||||
return FieldHasRegMap::Get(GetProperties());
|
||||
}
|
||||
|
||||
private:
|
||||
using FieldIsOsr = BitField<bool, 0, 1>;
|
||||
using FieldHasRegMap = FieldIsOsr::NextFlag;
|
||||
};
|
||||
|
||||
class InlineInfo : public BitTableRow<6U, InlineInfo> {
|
||||
public:
|
||||
BIT_TABLE_HEADER(6, InlineInfo)
|
||||
BIT_TABLE_COLUMN(0, IsLast, IS_LAST)
|
||||
BIT_TABLE_COLUMN(1, BytecodePc, BYTECODE_PC)
|
||||
BIT_TABLE_COLUMN(2, MethodIdIndex, METHOD_ID_INDEX)
|
||||
BIT_TABLE_COLUMN(3, MethodHi, METHOD_HI)
|
||||
BIT_TABLE_COLUMN(4, MethodLow, METHOD_LOW)
|
||||
BIT_TABLE_COLUMN(5, VRegsCount, VREGS_COUNT)
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(InlineInfo);
|
||||
DEFAULT_COPY_SEMANTIC(InlineInfo);
|
||||
~InlineInfo() = default;
|
||||
};
|
||||
|
||||
class RegisterMask : public BitTableRow<1, RegisterMask> {
|
||||
public:
|
||||
BIT_TABLE_HEADER(1, RegisterMask)
|
||||
BIT_TABLE_COLUMN(0, Mask, MASK)
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(RegisterMask);
|
||||
DEFAULT_COPY_SEMANTIC(RegisterMask);
|
||||
~RegisterMask() = default;
|
||||
};
|
||||
|
||||
class StackMask : public BitTableRow<1, StackMask> {
|
||||
public:
|
||||
BIT_TABLE_HEADER(1, StackMask)
|
||||
BIT_TABLE_COLUMN(0, Mask, MASK)
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(StackMask);
|
||||
DEFAULT_COPY_SEMANTIC(StackMask);
|
||||
~StackMask() = default;
|
||||
};
|
||||
|
||||
class VRegisterMask : public BitTableRow<1, VRegisterMask> {
|
||||
public:
|
||||
BIT_TABLE_HEADER(1, VRegisterMask)
|
||||
BIT_TABLE_COLUMN(0, Mask, MASK)
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(VRegisterMask);
|
||||
DEFAULT_COPY_SEMANTIC(VRegisterMask);
|
||||
~VRegisterMask() = default;
|
||||
};
|
||||
|
||||
class MethodId : public BitTableRow<1, MethodId> {
|
||||
public:
|
||||
BIT_TABLE_HEADER(1, MethodId)
|
||||
BIT_TABLE_COLUMN(0, Id, ID)
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(MethodId);
|
||||
DEFAULT_COPY_SEMANTIC(MethodId);
|
||||
~MethodId() = default;
|
||||
};
|
||||
|
||||
class VRegisterCatalogueIndex : public BitTableRow<1, VRegisterCatalogueIndex> {
|
||||
public:
|
||||
BIT_TABLE_HEADER(1, VRegisterCatalogueIndex)
|
||||
BIT_TABLE_COLUMN(0, Index, INDEX)
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(VRegisterCatalogueIndex);
|
||||
DEFAULT_COPY_SEMANTIC(VRegisterCatalogueIndex);
|
||||
~VRegisterCatalogueIndex() = default;
|
||||
};
|
||||
|
||||
class VRegisterInfo : public BitTableRow<2U, VRegisterInfo> {
|
||||
public:
|
||||
BIT_TABLE_HEADER(2, VRegisterInfo)
|
||||
BIT_TABLE_COLUMN(0, Info, INFO)
|
||||
BIT_TABLE_COLUMN(1, Value, VALUE)
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(VRegisterInfo);
|
||||
DEFAULT_COPY_SEMANTIC(VRegisterInfo);
|
||||
~VRegisterInfo() = default;
|
||||
|
||||
std::string GetColumnStr(size_t column) const
|
||||
{
|
||||
if (column != COLUMN_INFO || Get(column) == NO_VALUE) {
|
||||
return Base::GetColumnStr(column);
|
||||
}
|
||||
auto vreg = GetVRegInfo();
|
||||
return std::string(vreg.GetLocationString()) + ":" + vreg.GetTypeString();
|
||||
}
|
||||
|
||||
VRegInfo GetVRegInfo() const
|
||||
{
|
||||
return VRegInfo(GetValue(), GetInfo());
|
||||
}
|
||||
|
||||
uint32_t GetConstantLowIndex() const
|
||||
{
|
||||
ASSERT(GetVRegInfo().GetLocation() == VRegInfo::Location::CONSTANT);
|
||||
return GetValue() & ((1U << BITS_PER_UINT16) - 1);
|
||||
}
|
||||
|
||||
uint32_t GetConstantHiIndex() const
|
||||
{
|
||||
ASSERT(GetVRegInfo().GetLocation() == VRegInfo::Location::CONSTANT);
|
||||
return (GetValue() >> BITS_PER_UINT16) & ((1U << BITS_PER_UINT16) - 1);
|
||||
}
|
||||
};
|
||||
|
||||
class ImplicitNullChecks : public BitTableRow<2U, ImplicitNullChecks> {
|
||||
public:
|
||||
BIT_TABLE_HEADER(2, ImplicitNullChecks)
|
||||
BIT_TABLE_COLUMN(0, InstNativePc, INST_NATIVE_PC)
|
||||
BIT_TABLE_COLUMN(1, Offset, OFFSET)
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(ImplicitNullChecks);
|
||||
DEFAULT_COPY_SEMANTIC(ImplicitNullChecks);
|
||||
~ImplicitNullChecks() = default;
|
||||
};
|
||||
|
||||
class ConstantTable : public BitTableRow<1, ConstantTable> {
|
||||
public:
|
||||
BIT_TABLE_HEADER(1, ConstantTable)
|
||||
BIT_TABLE_COLUMN(0, Value, VALUE)
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(ConstantTable);
|
||||
DEFAULT_COPY_SEMANTIC(ConstantTable);
|
||||
~ConstantTable() = default;
|
||||
};
|
||||
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // PANDA_CODE_INFO_TABLES_H
|
@ -1,220 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_CODE_INFO_VREG_INFO_H
|
||||
#define COMPILER_CODE_INFO_VREG_INFO_H
|
||||
|
||||
#include "utils/bit_field.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
class VRegInfo final {
|
||||
public:
|
||||
enum class Location : int8_t { NONE, SLOT, REGISTER, FP_REGISTER, CONSTANT, COUNT = CONSTANT };
|
||||
|
||||
static constexpr size_t INVALID_LOCATION = (1U << MinimumBitsToStore(static_cast<size_t>(Location::COUNT))) - 1;
|
||||
|
||||
enum class Type : uint8_t { UNDEFINED, OBJECT, INT32, INT64, FLOAT32, FLOAT64, BOOL, ANY, COUNT = ANY };
|
||||
|
||||
VRegInfo()
|
||||
{
|
||||
FieldLocation::Set(Location::NONE, &info_);
|
||||
ASSERT(!IsLive());
|
||||
}
|
||||
VRegInfo(uint32_t value, VRegInfo::Location location, Type type, bool is_acc)
|
||||
: value_(value),
|
||||
info_(FieldLocation::Encode(location) | FieldType::Encode(type) | FieldIsAccumulator::Encode(is_acc))
|
||||
{
|
||||
}
|
||||
VRegInfo(uint32_t value, VRegInfo::Location location, Type type, bool is_acc, uint32_t index)
|
||||
: VRegInfo(value, location, type, is_acc)
|
||||
{
|
||||
FieldVRegIndex::Set(index, &info_);
|
||||
}
|
||||
VRegInfo(uint32_t value, uint32_t packed_info) : value_(value), info_(packed_info) {}
|
||||
|
||||
static VRegInfo Invalid()
|
||||
{
|
||||
return VRegInfo(0, static_cast<Location>(INVALID_LOCATION), Type::UNDEFINED, false);
|
||||
}
|
||||
|
||||
~VRegInfo() = default;
|
||||
|
||||
DEFAULT_COPY_SEMANTIC(VRegInfo);
|
||||
DEFAULT_MOVE_SEMANTIC(VRegInfo);
|
||||
|
||||
uint32_t GetValue() const
|
||||
{
|
||||
return value_;
|
||||
}
|
||||
|
||||
void SetValue(uint32_t value)
|
||||
{
|
||||
value_ = value;
|
||||
}
|
||||
|
||||
Location GetLocation() const
|
||||
{
|
||||
return FieldLocation::Get(info_);
|
||||
}
|
||||
|
||||
Type GetType() const
|
||||
{
|
||||
return FieldType::Get(info_);
|
||||
}
|
||||
|
||||
uint16_t GetIndex() const
|
||||
{
|
||||
return FieldVRegIndex::Get(info_);
|
||||
}
|
||||
void SetIndex(uint16_t value)
|
||||
{
|
||||
FieldVRegIndex::Set(value, &info_);
|
||||
}
|
||||
|
||||
bool IsAccumulator() const
|
||||
{
|
||||
return FieldIsAccumulator::Get(info_);
|
||||
}
|
||||
|
||||
bool IsLive() const
|
||||
{
|
||||
return GetLocation() != Location::NONE;
|
||||
}
|
||||
|
||||
bool IsObject() const
|
||||
{
|
||||
return GetType() == Type::OBJECT;
|
||||
}
|
||||
|
||||
bool IsFloat() const
|
||||
{
|
||||
return GetType() == Type::FLOAT32 || GetType() == Type::FLOAT64;
|
||||
}
|
||||
|
||||
bool Has64BitValue() const
|
||||
{
|
||||
return GetType() == VRegInfo::Type::FLOAT64 || GetType() == VRegInfo::Type::INT64;
|
||||
}
|
||||
|
||||
bool IsLocationRegister() const
|
||||
{
|
||||
auto location = GetLocation();
|
||||
return location == Location::REGISTER || location == Location::FP_REGISTER;
|
||||
}
|
||||
|
||||
uint32_t GetConstantLowIndex() const
|
||||
{
|
||||
ASSERT(GetLocation() == Location::CONSTANT);
|
||||
return GetValue() & ((1U << BITS_PER_UINT16) - 1);
|
||||
}
|
||||
|
||||
uint32_t GetConstantHiIndex() const
|
||||
{
|
||||
ASSERT(GetLocation() == Location::CONSTANT);
|
||||
return (GetValue() >> BITS_PER_UINT16) & ((1U << BITS_PER_UINT16) - 1);
|
||||
}
|
||||
|
||||
void SetConstantIndices(uint16_t low, uint16_t hi)
|
||||
{
|
||||
value_ = low | (static_cast<uint32_t>(hi) << BITS_PER_UINT16);
|
||||
}
|
||||
|
||||
bool operator==(const VRegInfo &rhs) const
|
||||
{
|
||||
return value_ == rhs.value_ && info_ == rhs.info_;
|
||||
}
|
||||
bool operator!=(const VRegInfo &rhs) const
|
||||
{
|
||||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
uint32_t GetInfo()
|
||||
{
|
||||
return info_;
|
||||
}
|
||||
|
||||
const char *GetTypeString() const
|
||||
{
|
||||
switch (GetType()) {
|
||||
case Type::OBJECT:
|
||||
return "OBJECT";
|
||||
case Type::INT64:
|
||||
return "INT64";
|
||||
case Type::INT32:
|
||||
return "INT32";
|
||||
case Type::FLOAT32:
|
||||
return "FLOAT32";
|
||||
case Type::FLOAT64:
|
||||
return "FLOAT64";
|
||||
case Type::BOOL:
|
||||
return "BOOL";
|
||||
case Type::ANY:
|
||||
return "ANY";
|
||||
default:
|
||||
break;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
const char *GetLocationString() const
|
||||
{
|
||||
switch (GetLocation()) {
|
||||
case Location::NONE:
|
||||
return "NONE";
|
||||
case Location::SLOT:
|
||||
return "SLOT";
|
||||
case Location::REGISTER:
|
||||
return "REGISTER";
|
||||
case Location::FP_REGISTER:
|
||||
return "FP_REGISTER";
|
||||
case Location::CONSTANT:
|
||||
return "CONSTANT";
|
||||
default:
|
||||
break;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void Dump(std::ostream &os) const
|
||||
{
|
||||
os << "VReg #" << GetIndex() << ":" << GetTypeString() << ", " << GetLocationString() << "="
|
||||
<< helpers::ToSigned(GetValue());
|
||||
if (IsAccumulator()) {
|
||||
os << ", ACC";
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
uint32_t value_ {0};
|
||||
uint32_t info_ {0};
|
||||
|
||||
using FieldLocation = BitField<Location, 0, MinimumBitsToStore(static_cast<uint32_t>(Location::COUNT))>;
|
||||
using FieldType = FieldLocation::NextField<Type, MinimumBitsToStore(static_cast<uint32_t>(Type::COUNT))>;
|
||||
using FieldIsAccumulator = FieldType::NextFlag;
|
||||
using FieldVRegIndex = FieldIsAccumulator::NextField<uint16_t, BITS_PER_UINT16>;
|
||||
};
|
||||
|
||||
static_assert(sizeof(VRegInfo) <= sizeof(uint64_t));
|
||||
|
||||
inline std::ostream &operator<<(std::ostream &os, const VRegInfo &vreg)
|
||||
{
|
||||
vreg.Dump(os);
|
||||
return os;
|
||||
}
|
||||
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_CODE_INFO_VREG_INFO_H
|
@ -1,345 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "optimizer_run.h"
|
||||
#include "mem/pool_manager.h"
|
||||
#include "mem/code_allocator.h"
|
||||
#include "include/class.h"
|
||||
#include "include/method.h"
|
||||
#include "optimizer/ir/ir_constructor.h"
|
||||
#include "optimizer/ir/runtime_interface.h"
|
||||
#include "optimizer/analysis/loop_analyzer.h"
|
||||
#include "optimizer/pass.h"
|
||||
#include "optimizer/ir_builder/ir_builder.h"
|
||||
#include "utils/logger.h"
|
||||
#include "code_info/code_info.h"
|
||||
#include "events/events.h"
|
||||
#include "trace/trace.h"
|
||||
#include "optimizer/code_generator/codegen.h"
|
||||
#include "compile_method.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
#ifdef PANDA_COMPILER_CFI
|
||||
static Span<uint8_t> EmitElf(Graph *graph, CodeAllocator *code_allocator, ArenaAllocator *gdb_debug_info_allocator,
|
||||
const std::string &method_name);
|
||||
#endif
|
||||
|
||||
static Arch ChooseArch(Arch arch)
|
||||
{
|
||||
if (arch != Arch::NONE) {
|
||||
return arch;
|
||||
}
|
||||
|
||||
arch = RUNTIME_ARCH;
|
||||
if (RUNTIME_ARCH == Arch::X86_64 && options.WasSetCompilerCrossArch()) {
|
||||
arch = GetArchFromString(options.GetCompilerCrossArch());
|
||||
}
|
||||
|
||||
return arch;
|
||||
}
|
||||
|
||||
static bool CheckSingleImplementation(Graph *graph)
|
||||
{
|
||||
// Check that all methods that were inlined due to its single implementation property, still have this property,
|
||||
// otherwise we must drop compiled code.
|
||||
// TODO(compiler): we need to reset hotness counter hereby avoid yet another warmup phase.
|
||||
auto cha = graph->GetRuntime()->GetCha();
|
||||
for (auto si_method : graph->GetSingleImplementationList()) {
|
||||
if (!cha->IsSingleImplementation(si_method)) {
|
||||
LOG(WARNING, COMPILER)
|
||||
<< "Method lost single-implementation property after compilation, so we need to drop "
|
||||
"whole compiled code: "
|
||||
<< graph->GetRuntime()->GetMethodFullName(si_method);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static Span<uint8_t> EmitCode(const Graph *graph, CodeAllocator *allocator)
|
||||
{
|
||||
size_t code_offset = RoundUp(CodePrefix::STRUCT_SIZE, GetCodeAlignment(graph->GetArch()));
|
||||
CodePrefix prefix;
|
||||
prefix.code_size = graph->GetData().size();
|
||||
prefix.code_info_offset = code_offset + RoundUp(graph->GetData().size(), sizeof(uint32_t));
|
||||
prefix.code_info_size = graph->GetCodeInfoData().size();
|
||||
size_t code_size = prefix.code_info_offset + prefix.code_info_size;
|
||||
auto mem_range = allocator->AllocateCodeUnprotected(code_size);
|
||||
if (mem_range.GetSize() == 0) {
|
||||
return Span<uint8_t> {};
|
||||
}
|
||||
|
||||
auto data = reinterpret_cast<uint8_t *>(mem_range.GetData());
|
||||
memcpy_s(data, sizeof(CodePrefix), &prefix, sizeof(CodePrefix));
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
memcpy_s(&data[code_offset], graph->GetData().size(), graph->GetData().data(), graph->GetData().size());
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
|
||||
memcpy_s(&data[prefix.code_info_offset], graph->GetCodeInfoData().size(), graph->GetCodeInfoData().data(),
|
||||
graph->GetCodeInfoData().size());
|
||||
|
||||
allocator->ProtectCode(mem_range);
|
||||
|
||||
return Span<uint8_t>(reinterpret_cast<uint8_t *>(mem_range.GetData()), code_size);
|
||||
}
|
||||
|
||||
uint8_t *GetEntryPoint(Graph *graph, [[maybe_unused]] Method *method, const std::string &method_name,
|
||||
[[maybe_unused]] bool is_osr, CodeAllocator *code_allocator,
|
||||
[[maybe_unused]] ArenaAllocator *gdb_debug_info_allocator)
|
||||
{
|
||||
#ifdef PANDA_COMPILER_CFI
|
||||
auto generated_data = options.IsCompilerEmitDebugInfo()
|
||||
? EmitElf(graph, code_allocator, gdb_debug_info_allocator, method_name)
|
||||
: EmitCode(graph, code_allocator);
|
||||
#else
|
||||
auto generated_data = EmitCode(graph, code_allocator);
|
||||
#endif
|
||||
if (generated_data.Empty()) {
|
||||
LOG(INFO, COMPILER) << "Compilation failed due to memory allocation fail: " << method_name;
|
||||
return nullptr;
|
||||
}
|
||||
CodeInfo code_info(generated_data);
|
||||
LOG(INFO, COMPILER) << "Compiled code for '" << method_name << "' has been installed to "
|
||||
<< bit_cast<void *>(code_info.GetCode()) << ", code size " << code_info.GetCodeSize();
|
||||
|
||||
auto entry_point = const_cast<uint8_t *>(code_info.GetCode());
|
||||
EVENT_COMPILATION(method_name, is_osr, method->GetCodeSize(), reinterpret_cast<uintptr_t>(entry_point),
|
||||
code_info.GetCodeSize(), code_info.GetInfoSize(), events::CompilationStatus::COMPILED);
|
||||
return entry_point;
|
||||
}
|
||||
|
||||
bool JITCompileMethod(RuntimeInterface *runtime, Method *method, bool is_osr, CodeAllocator *code_allocator,
|
||||
ArenaAllocator *allocator, ArenaAllocator *local_allocator,
|
||||
ArenaAllocator *gdb_debug_info_allocator)
|
||||
{
|
||||
std::string method_name = runtime->GetMethodFullName(method, false);
|
||||
SCOPED_TRACE_STREAM << "JIT compiling " << method_name;
|
||||
|
||||
if (!options.MatchesRegex(method_name)) {
|
||||
LOG(DEBUG, COMPILER) << "Skip the method due to regexp mismatch: " << method_name;
|
||||
return false;
|
||||
}
|
||||
|
||||
Graph *graph {nullptr};
|
||||
auto finalizer = [&graph]([[maybe_unused]] void *ptr) {
|
||||
if (graph != nullptr) {
|
||||
graph->~Graph();
|
||||
}
|
||||
};
|
||||
std::unique_ptr<void, decltype(finalizer)> fin(&finalizer, finalizer);
|
||||
|
||||
auto arch {Arch::NONE};
|
||||
bool is_dynamic = panda::panda_file::IsDynamicLanguage(method->GetClass()->GetSourceLang());
|
||||
|
||||
if (!CompileInGraph(runtime, method, is_osr, allocator, local_allocator, is_dynamic, &arch, method_name, &graph)) {
|
||||
return false;
|
||||
}
|
||||
ASSERT(graph != nullptr && graph->GetData().data() != nullptr);
|
||||
|
||||
if (!is_dynamic && !CheckSingleImplementation(graph)) {
|
||||
EVENT_COMPILATION(method_name, is_osr, method->GetCodeSize(), 0, 0, 0,
|
||||
events::CompilationStatus::FAILED_SINGLE_IMPL);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Drop non-native code in any case
|
||||
if (arch != RUNTIME_ARCH) {
|
||||
EVENT_COMPILATION(method_name, is_osr, method->GetCodeSize(), 0, 0, 0, events::CompilationStatus::DROPPED);
|
||||
return false;
|
||||
}
|
||||
|
||||
auto entry_point = GetEntryPoint(graph, method, method_name, is_osr, code_allocator, gdb_debug_info_allocator);
|
||||
if (entry_point == nullptr) {
|
||||
return false;
|
||||
}
|
||||
if (is_osr) {
|
||||
if (runtime->HasCompiledCode(method)) {
|
||||
runtime->SetOsrCode(method, entry_point);
|
||||
ASSERT(runtime->GetOsrCode(method) != nullptr);
|
||||
} else {
|
||||
// Compiled code has been deoptimized, so we shouldn't install osr code.
|
||||
// TODO(compiler): release compiled code memory, when CodeAllocator supports freeing the memory.
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
runtime->SetCompiledEntryPoint(method, entry_point);
|
||||
}
|
||||
ASSERT(graph != nullptr);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CompileInGraph(RuntimeInterface *runtime, Method *method, bool is_osr, ArenaAllocator *allocator,
|
||||
ArenaAllocator *local_allocator, bool is_dynamic, Arch *arch, const std::string &method_name,
|
||||
Graph **graph)
|
||||
{
|
||||
LOG(INFO, COMPILER) << "Compile method" << (is_osr ? "(OSR)" : "") << ": " << method_name << " ("
|
||||
<< runtime->GetFileName(method) << ')';
|
||||
*arch = ChooseArch(*arch);
|
||||
if (*arch == Arch::NONE || !BackendSupport(*arch)) {
|
||||
LOG(DEBUG, COMPILER) << "Compilation unsupported for this platform!";
|
||||
return false;
|
||||
}
|
||||
|
||||
ASSERT(*graph == nullptr);
|
||||
*graph = allocator->New<Graph>(allocator, local_allocator, *arch, method, runtime, is_osr, nullptr, is_dynamic);
|
||||
if (*graph == nullptr) {
|
||||
LOG(ERROR, COMPILER) << "Creating graph failed!";
|
||||
EVENT_COMPILATION(method_name, is_osr, method->GetCodeSize(), 0, 0, 0, events::CompilationStatus::FAILED);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(*graph)->RunPass<IrBuilder>()) {
|
||||
if (!compiler::options.IsCompilerIgnoreFailures()) {
|
||||
LOG(FATAL, COMPILER) << "IrBuilder failed!";
|
||||
}
|
||||
LOG(WARNING, COMPILER) << "IrBuilder failed!";
|
||||
EVENT_COMPILATION(method_name, is_osr, method->GetCodeSize(), 0, 0, 0, events::CompilationStatus::FAILED);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Run compiler optimizations over created graph
|
||||
bool res = RunOptimizations(*graph);
|
||||
if (!res) {
|
||||
if (!compiler::options.IsCompilerIgnoreFailures()) {
|
||||
LOG(FATAL, COMPILER) << "RunOptimizations failed!";
|
||||
}
|
||||
LOG(WARNING, COMPILER) << "RunOptimizations failed!";
|
||||
EVENT_COMPILATION(method_name, is_osr, method->GetCodeSize(), 0, 0, 0, events::CompilationStatus::FAILED);
|
||||
return false;
|
||||
}
|
||||
|
||||
LOG(DEBUG, COMPILER) << "The method is compiled";
|
||||
|
||||
return true;
|
||||
}
|
||||
} // namespace panda::compiler
|
||||
|
||||
#ifdef PANDA_COMPILER_CFI
|
||||
|
||||
#include "optimizer/ir/aot_data.h"
|
||||
#include "tools/debug/jit_writer.h"
|
||||
|
||||
// Next "C"-code need for enable interaction with gdb
|
||||
// Please read "JIT Compilation Interface" from gdb-documentation for more information
|
||||
extern "C" {
|
||||
// Gdb will replace implementation of this function
|
||||
void NO_INLINE __jit_debug_register_code(void)
|
||||
{
|
||||
// NOLINTNEXTLINE(hicpp-no-assembler)
|
||||
asm("");
|
||||
}
|
||||
|
||||
// Default version for descriptor (may be checked before register code)
|
||||
// NOLINTNEXTLINE(modernize-use-nullptr)
|
||||
jit_descriptor __jit_debug_descriptor = {1, JIT_NOACTION, NULL, NULL};
|
||||
} // extern "C"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
// NOLINTNEXTLINE(fuchsia-statically-constructed-objects)
|
||||
static os::memory::Mutex jit_debug_lock;
|
||||
|
||||
// Will register jit-elf description in linked list
|
||||
static void RegisterJitCode(jit_code_entry *entry)
|
||||
{
|
||||
ASSERT(options.IsCompilerEmitDebugInfo());
|
||||
|
||||
os::memory::LockHolder lock(jit_debug_lock);
|
||||
// Re-link list
|
||||
entry->next_entry = __jit_debug_descriptor.first_entry;
|
||||
if (__jit_debug_descriptor.first_entry != nullptr) {
|
||||
__jit_debug_descriptor.first_entry->prev_entry = entry;
|
||||
}
|
||||
__jit_debug_descriptor.first_entry = entry;
|
||||
|
||||
// Fill last entry
|
||||
__jit_debug_descriptor.relevant_entry = entry;
|
||||
__jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
|
||||
|
||||
// Call gdb-callback
|
||||
__jit_debug_register_code();
|
||||
__jit_debug_descriptor.action_flag = JIT_NOACTION;
|
||||
__jit_debug_descriptor.relevant_entry = nullptr;
|
||||
}
|
||||
|
||||
// When code allocator cleaned - also will clean entry
|
||||
void CleanJitDebugCode()
|
||||
{
|
||||
ASSERT(options.IsCompilerEmitDebugInfo());
|
||||
|
||||
os::memory::LockHolder lock(jit_debug_lock);
|
||||
__jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN;
|
||||
|
||||
while (__jit_debug_descriptor.first_entry != nullptr) {
|
||||
__jit_debug_descriptor.first_entry->prev_entry = nullptr;
|
||||
__jit_debug_descriptor.relevant_entry = __jit_debug_descriptor.first_entry;
|
||||
// Call gdb-callback
|
||||
__jit_debug_register_code();
|
||||
|
||||
__jit_debug_descriptor.first_entry = __jit_debug_descriptor.first_entry->next_entry;
|
||||
}
|
||||
|
||||
__jit_debug_descriptor.action_flag = JIT_NOACTION;
|
||||
__jit_debug_descriptor.relevant_entry = nullptr;
|
||||
}
|
||||
|
||||
// For each jit code - will generate small elf description and put them in gdb-special linked list.
|
||||
static Span<uint8_t> EmitElf(Graph *graph, CodeAllocator *code_allocator, ArenaAllocator *gdb_debug_info_allocator,
|
||||
const std::string &method_name)
|
||||
{
|
||||
ASSERT(options.IsCompilerEmitDebugInfo());
|
||||
|
||||
if (graph->GetData().Empty()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
JitDebugWriter jit_writer(graph->GetArch(), graph->GetRuntime(), code_allocator, method_name);
|
||||
|
||||
jit_writer.Start();
|
||||
|
||||
auto method = reinterpret_cast<Method *>(graph->GetMethod());
|
||||
auto klass = reinterpret_cast<Class *>(graph->GetRuntime()->GetClass(method));
|
||||
jit_writer.StartClass(*klass);
|
||||
|
||||
CompiledMethod compiled_method(graph->GetArch(), method);
|
||||
compiled_method.SetCode(graph->GetData().ToConst());
|
||||
compiled_method.SetCodeInfo(graph->GetCodeInfoData());
|
||||
compiled_method.SetCfiInfo(graph->GetCallingConvention()->GetCfiInfo());
|
||||
|
||||
jit_writer.AddMethod(compiled_method, 0);
|
||||
jit_writer.EndClass();
|
||||
jit_writer.End();
|
||||
if (!jit_writer.Write()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
auto gdb_entry {gdb_debug_info_allocator->New<jit_code_entry>()};
|
||||
if (gdb_entry == nullptr) {
|
||||
return {};
|
||||
}
|
||||
|
||||
auto elf_file {jit_writer.GetElf()};
|
||||
// Pointer to Elf-file entry
|
||||
gdb_entry->symfile_addr = reinterpret_cast<const char *>(elf_file.Data());
|
||||
// Elf-in-memory file size
|
||||
gdb_entry->symfile_size = elf_file.Size();
|
||||
gdb_entry->prev_entry = nullptr;
|
||||
|
||||
RegisterJitCode(gdb_entry);
|
||||
return jit_writer.GetCode();
|
||||
}
|
||||
|
||||
} // namespace panda::compiler
|
||||
#endif
|
@ -1,38 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_COMPILE_METHOD_H_
|
||||
#define COMPILER_COMPILE_METHOD_H_
|
||||
|
||||
#include "compiler_options.h"
|
||||
#include "mem/arena_allocator.h"
|
||||
#include "mem/code_allocator.h"
|
||||
#include "include/method.h"
|
||||
#include "utils/arch.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
class Graph;
|
||||
class RuntimeInterface;
|
||||
|
||||
bool JITCompileMethod(RuntimeInterface *runtime, Method *method, bool is_osr, CodeAllocator *code_allocator,
|
||||
ArenaAllocator *allocator, ArenaAllocator *local_allocator,
|
||||
ArenaAllocator *gdb_debug_info_allocator);
|
||||
bool CompileInGraph(RuntimeInterface *runtime, Method *method, bool is_osr, ArenaAllocator *allocator,
|
||||
ArenaAllocator *local_allocator, bool is_dynamic, Arch *arch, const std::string &method_name,
|
||||
Graph **graph);
|
||||
bool CheckMethodInLists(const std::string &method_name);
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_COMPILE_METHOD_H_
|
@ -1,145 +0,0 @@
|
||||
# Expression balancing
|
||||
|
||||
## Overview
|
||||
**Expression balancing** - optimization that reorganises computation of algebraic expressions.
|
||||
The optimization is applied to expressions of the form of a long chain of the same binary associative and commutative operator, such as `ADD`, `MUL`, `AND`, `OR`, and etc. It calculates expression's critical path and, if it can be decreased, reorganises expression so it would be optimal. For example: critical path of `(((a + b) + c) + d)` is 3, whereas critical path of `((a + b) + (c + d))` is 2.
|
||||
|
||||
## Rationality
|
||||
Increases instruction-level parallelism.
|
||||
|
||||
## Dependence
|
||||
* RPO analyze.
|
||||
|
||||
## Algorithm
|
||||
|
||||
Visit all basic blocks in PRO order.
|
||||
|
||||
For each block iterate over instructions in reverse order looking for suitable instruction (i.e. operator).
|
||||
|
||||
If such instruction is found, it is the last one in an expression and it is necessary to determine the whole chain and its critical path by recursive [analysis](#operator-analysis) of operator inputs (`lhs` and `rhs`).
|
||||
|
||||
If the critical path isn't [optimal](#optimal-critical-path), delete expression's operators from the basic block, allocate them to expression's terms in an [optimal way](#operators-allocation) and insert new operators in the basic block.
|
||||
|
||||
|
||||
### Note
|
||||
#### Operator analysis
|
||||
`Analysis of operator inputs` is a check if `lhs`(`rhs`) has the same opcode and has the only user (the operator itself). If so, the input is an operator of the expression, the analysis is called for it too and it should be saved to array of operators (`operators_`); otherwise it is an expression's term and it should be saved to array of terms (`sources_`).
|
||||
If inputs belong to different basic blocks but satisfy the conditions above (have single user and are arithmetic operations), they should be moved to a single (dominatee) basic block (similiar to CodeSink).
|
||||
|
||||
#### Optimal critical path
|
||||
The `optimal critical path` of an expression is `ceil[log2(n_terms)]` (`ceil[x]` is rounding `x` up).
|
||||
|
||||
#### Operators allocation
|
||||
`Allocation in an optimal way` is an algorithm that creates expression and guarantees that it would have an [optimal](#optimal-critical-path) critical path:
|
||||
Assume `terms[]` is an array of expression terms, algorithm called for two arguments `first_idx` and `last_idx` creates expression with terms in range from `terms[first_idx]` to `terms[last_idx]`.
|
||||
The algorithm is:
|
||||
- If range `first_idx:last_idx` covers `1` element then return this element.
|
||||
- If range `first_idx:last_idx` covers `2` elements then create and return operator with `lhs` and `rhs` equal to `terms[first_idx]` and `terms[last_idx]`.
|
||||
- Else calculate `split_idx` so that `split_idx` is strictly less than `last_idx` and size of `first_idx:split_idx` is the greatest possible power of 2, create and return operator with `lhs` and `rhs` equal to results of recursive calls `allocate(first_idx, split_idx)` and `allocate(split_idx + 1, last_idx)`.
|
||||
|
||||
|
||||
## Pseudocode
|
||||
```
|
||||
for (auto basic_block : GetGraph()->GetBlocksRPO()) {
|
||||
for (auto instruction : basic_block->InstsReverse()) {
|
||||
if (instruction is a suitable operator) {
|
||||
|
||||
// Recursively check and save inputs, determine critical_path_length;
|
||||
...
|
||||
|
||||
// Calculate optimal_critical_path_length;
|
||||
...
|
||||
|
||||
if (optimal < current) {
|
||||
AllocateSources(0, size(terms[]) - 1);
|
||||
insert new expression to the basic block;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto AllocateSources(size_t first_idx, size_t last_idx) {
|
||||
if (first_idx == last_idx) {
|
||||
return terms[first_idx];
|
||||
}
|
||||
if (first_idx == last_idx + 1) {
|
||||
auto operator = operators[free_op_idx++];
|
||||
operator->GetBasicBlock()->RemoveInst(operator);
|
||||
|
||||
operator.lhs = terms[first_idx];
|
||||
operator.rhs = terms[last_idx];
|
||||
|
||||
basic_block.Insert(operator);
|
||||
return operator;
|
||||
}
|
||||
else {
|
||||
size_t split_idx = calculate split_idx;
|
||||
auto lhs = AllocateSources(first_idx, split_idx);
|
||||
auto rhs = AllocateSources(split_idx + 1, last_idx);
|
||||
|
||||
auto operator = operators[free_op_idx++];
|
||||
basic_block->RemoveInst(operator);
|
||||
|
||||
operator.lhs = lhs;
|
||||
operator.rhs = rhs;
|
||||
|
||||
basic_block.Insert(operator);
|
||||
return operator;
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
## Examples
|
||||
Before expressions balancing:
|
||||
```
|
||||
0.i64 Parameter -> v8 // a
|
||||
1.i64 Parameter -> v8 // b
|
||||
2.i64 Parameter -> v9 // c
|
||||
3.i64 Parameter -> v10 // d
|
||||
4.i64 Parameter -> v11 // e
|
||||
5.i64 Parameter -> v12 // f
|
||||
6.i64 Parameter -> v12 // g
|
||||
|
||||
8. Add v0, v1 -> v9 // a + b
|
||||
9. Add v2, v8 -> v10 // c + (a + b)
|
||||
// As soon as v10 has more than one users it has side-effects, so the algorithm considers it as a term:
|
||||
10. Add v3, v9 -> v11, v14 // s10 = d + (c + (a + b))
|
||||
|
||||
11. Add v4, v10 -> v13 // e + s10
|
||||
12. Add v5, v6 -> v13 // f + g
|
||||
13. Add v11, v12 -> v14 // (e + s10) + (f + g)
|
||||
14. Add v10, v13 -> v15 // s10 + ((e + s10) + (f + g))
|
||||
|
||||
15. Return v14
|
||||
```
|
||||
The code above contains two expressions: `v8-v10` (critical path is 3) and `v11-v14` (critical path is 3). Moreover, `v11-v14` is [optimal](#optimal-critical-path).
|
||||
After expressions balancing:
|
||||
```
|
||||
0.i64 Parameter -> v8 // a
|
||||
1.i64 Parameter -> v8 // b
|
||||
2.i64 Parameter -> v9 // c
|
||||
3.i64 Parameter -> v10 // d
|
||||
4.i64 Parameter -> v11 // e
|
||||
5.i64 Parameter -> v12 // f
|
||||
6.i64 Parameter -> v12 // g
|
||||
|
||||
8. Add v3, v2 -> v10 // d + c
|
||||
9. Add v0, v1 -> v10 // a + b
|
||||
// As soon as v10 has more than one users it has side-effects, so the algorithm considers it as a term:
|
||||
10. Add v8, v9 -> v11, v14 // s10 = ((d + c) + (a + b))
|
||||
|
||||
11. Add v4, v10 -> v13 // e + s10
|
||||
12. Add v5, v6 -> v13 // f + g
|
||||
13. Add v11, v12 -> v14 // (e + s10) + (f + g)
|
||||
14. Add v10, v13 -> v15 // s10 + ((e + s10) + (f + g))
|
||||
|
||||
15. Return v14
|
||||
```
|
||||
## Links
|
||||
Source code:
|
||||
[balance_expressions.cpp](../optimizer/optimizations/balance_expressions.cpp)
|
||||
[balance_expressions.h](../optimizer/optimizations/balance_expressions.h)
|
||||
|
||||
Tests:
|
||||
[balance_expressions_test.cpp](../tests/balance_expressions_test.cpp)
|
||||
|
@ -1,231 +0,0 @@
|
||||
# Code Sink
|
||||
## Overview
|
||||
|
||||
The optimization moves instructions into successor blocks, when possible, so that they are not executed on paths where their results are not needed.
|
||||
|
||||
## Rationality
|
||||
|
||||
This optimization allows to avoid execution of statements that are not used on execution path. This should speedup the execution.
|
||||
|
||||
Motivational example:
|
||||
```
|
||||
BB 3
|
||||
0.i32 Parameter arg 0 -> (v5, v8, v7)
|
||||
succs: [bb 0]
|
||||
|
||||
BB 0 preds: [bb 3]
|
||||
8.i32 AddI v0, 0x1 -> (v6)
|
||||
5. IfImm GT i32 v0, 0x0
|
||||
succs: [bb 1, bb 2]
|
||||
|
||||
BB 2 preds: [bb 0]
|
||||
6.i32 Return v8
|
||||
|
||||
BB 1 preds: [bb 0]
|
||||
7.i32 Return v0
|
||||
```
|
||||
|
||||
In this example `v8` is used only in one branch however it is always executed. The code sinking optimization suggests to move `v8` into `BB 2`.
|
||||
|
||||
## Dependence
|
||||
|
||||
* AliasAnalysis
|
||||
* DominatorsTree
|
||||
* LoopAnalysis
|
||||
* Reverse Post Order (RPO)
|
||||
|
||||
## Algorithm
|
||||
|
||||
The iterative approach is used. On each iteration the optimization tries to sink each instruction to one of its immediately dominated blocks. It is possible if all users of the instruction is dominated by a block that the instruction is sunk into. Instructions in a basic block are iterated in reverse order to decrease the number of iterations. Iterating finishes when no instruction was sunk.
|
||||
|
||||
Instructions that cannot sink:
|
||||
|
||||
* Instructions allocating memory
|
||||
* Control flow instructions
|
||||
* Instructions that can throw an exception
|
||||
* Barrier instructions (calls, monitors, volatile, SafePoints, etc.)
|
||||
* Store instructions
|
||||
* Load instructions if they dominate in scope of current basic block:
|
||||
* an aliased store instruction
|
||||
* a Monitor instruction
|
||||
* a volatile store instruction
|
||||
|
||||
To determine which load instruction can be sunk we keep a list of store instructions that have been met so far (we are iterating in reverse order; therefore, when we meet load instruction, we have already collected all stores after this load and can easily check on aliases).
|
||||
|
||||
Blocks that instruction cannot be sunk into:
|
||||
* Blocks that do not dominate all users of the instruction
|
||||
* Loads cannot be sunk into blocks with more than one predecessors (because other predecessors might have aliased stores)
|
||||
* Do not sunk instructions into loops
|
||||
|
||||
## Pseudocode
|
||||
|
||||
```
|
||||
void CodeSink::RunImpl() {
|
||||
// Iteratively sink instructions. On each iteration an instruction can be
|
||||
// sunk to its basic block dominatee. Iterate sinking until no changes
|
||||
// happens.
|
||||
bool changed = true;
|
||||
while(changed) {
|
||||
changed = false;
|
||||
for (auto block : GetGraph()->GetBlocksRPO()) {
|
||||
bool barriered = false;
|
||||
ArenaVector<Inst *> stores;
|
||||
for (auto inst : block->InstsSafeReverse()) {
|
||||
barriered |= inst->IsMonitor() || (inst->IsStore && inst->IsVolatile());
|
||||
candidate = SinkInstruction(inst, &stores, barriered);
|
||||
if (candidate != nullptr) {
|
||||
block->EraseInst(inst);
|
||||
candidate->PrependInst(inst);
|
||||
changed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
BasicBlock *CodeSink::SinkInstruction(Inst *inst, ArenaVector<Inst *> *stores, bool barriered) {
|
||||
// Save stores to be sure we do not sink a load instruction that may be aliased
|
||||
if (inst->IsStore()) {
|
||||
stores->push_back(inst);
|
||||
return nullptr;
|
||||
}
|
||||
// Check that instruction can be sunk
|
||||
if (inst->IsAllocation() || inst->IsControlFlow() || inst->CanThrow() || inst->IsBarrier()) {
|
||||
return nullptr;
|
||||
}
|
||||
if (inst->IsLoad()) {
|
||||
// Do not sink over monitor
|
||||
// Do not sink over volatile store
|
||||
if (barriered) {
|
||||
return nullptr;
|
||||
}
|
||||
for (auto store : *stores) {
|
||||
if (GetGraph()->CheckInstAlias(inst, store) != AliasType::NO_ALIAS) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Iterate over dominated blocks
|
||||
for (auto cand : inst->GetBasicBlock()->GetDominatedBlocks()) {
|
||||
if (IsAcceptableTarget(inst, cand)) {
|
||||
return cand;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool CodeSink::IsAcceptableTarget(Inst *inst, BasicBlock *candidate) {
|
||||
BasicBlock *block = inst->GetBasicBlock();
|
||||
Loop *loop = block->GetLoop();
|
||||
Loop *cand_loop = candidate->GetLoop();
|
||||
if (candidate->GetPredsBlocks().size() > 1) {
|
||||
// Do not sink loads across a critical edge there may be stores in other code paths.
|
||||
if (inst->IsLoad()) {
|
||||
return false;
|
||||
}
|
||||
// Do not sink into loops
|
||||
if (loop != cand_loop) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Check that all uses are dominated by the candidate
|
||||
for (auto &user : inst->GetUsers()) {
|
||||
Inst *uinst = user.GetInst();
|
||||
if (!candidate->IsDominate(uinst->GetBasicBlock())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
### Regular sinking
|
||||
```
|
||||
BB 2 preds: [bb 0]
|
||||
5.i32 Add v3, v2 -> (v15)
|
||||
8.i64 LoadObject 243 v0 -> (v9, v13)
|
||||
9.b Compare NE i64 v8, v4 -> (v10)
|
||||
10. IfImm NE b v9, 0x0
|
||||
succs: [bb 3, bb 4]
|
||||
|
||||
BB 4 preds: [bb 2]
|
||||
13.i64 StoreObject 243 v1, v8
|
||||
succs: [bb 3]
|
||||
|
||||
BB 3 preds: [bb 2, bb 4]
|
||||
15.i32 Return v5
|
||||
succs: [bb 1]
|
||||
```
|
||||
Sink arithmetic operation `v5` but do not `v8` because `BB 3` has several predecessors.
|
||||
```
|
||||
BB 2 preds: [bb 0]
|
||||
8.i64 LoadObject 243 v0 -> (v9, v13)
|
||||
9.b Compare NE i64 v8, v4 -> (v10)
|
||||
10. IfImm NE b v9, 0x0
|
||||
succs: [bb 3, bb 4]
|
||||
|
||||
BB 4 preds: [bb 2]
|
||||
13.i64 StoreObject 243 v1, v8
|
||||
succs: [bb 3]
|
||||
|
||||
BB 3 preds: [bb 2, bb 4]
|
||||
5.i32 Add v3, v2 -> (v15)
|
||||
15.i32 Return v5
|
||||
succs: [bb 1]
|
||||
```
|
||||
### Loop Sinking
|
||||
```
|
||||
BB 2 preds: [bb 0]
|
||||
6.i64 Add v1, v5 -> (v21)
|
||||
succs: [bb 3]
|
||||
|
||||
BB 3 preds: [bb 2, bb 3]
|
||||
prop: head, loop 1
|
||||
10p.i64 Phi v4(bb2), v22(bb3) -> (v23, v22, v20)
|
||||
20.i64 LoadArray v2, v10p -> (v21)
|
||||
21.i64 Add v20, v6 -> (v26, v22)
|
||||
22.i64 Add v21, v10p -> (v10p, v26)
|
||||
23.i32 Add v10p, v3 -> (v24)
|
||||
26.i64 Add v21, v22 -> (v27)
|
||||
24.b Compare LT i32 v23, v0 -> (v25)
|
||||
25. IfImm NE b v24, 0x0
|
||||
succs: [bb 3, bb 4]
|
||||
|
||||
BB 4 preds: [bb 3]
|
||||
27.i64 Return v26
|
||||
succs: [bb 1]
|
||||
```
|
||||
Sinking `v6` into loop and `v26` out of loop
|
||||
```
|
||||
BB 2 preds: [bb 0]
|
||||
6.i64 Add v1, v5 -> (v21)
|
||||
succs: [bb 3]
|
||||
|
||||
BB 3 preds: [bb 2, bb 3]
|
||||
prop: head, loop 1
|
||||
10p.i64 Phi v4(bb2), v22(bb3) -> (v23, v20, v22)
|
||||
20.i64 LoadArray v2, v10p -> (v21)
|
||||
21.i64 Add v20, v6 -> (v22, v26)
|
||||
22.i64 Add v21, v10p -> (v10p, v26)
|
||||
23.i32 Add v10p, v3 -> (v24)
|
||||
24.b Compare LT i32 v23, v0 -> (v25)
|
||||
25. IfImm NE b v24, 0x0
|
||||
succs: [bb 3, bb 4]
|
||||
|
||||
BB 4 preds: [bb 3]
|
||||
26.i64 Add v21, v22 -> (v27)
|
||||
27.i64 Return v26
|
||||
succs: [bb 1]
|
||||
|
||||
```
|
||||
## Links
|
||||
|
||||
Source code:
|
||||
[code_sink.cpp](../optimizer/optimizations/code_sink.cpp)
|
||||
[code_sink.h](../optimizer/optimizations/code_sink.h)
|
||||
|
||||
Tests:
|
||||
[code_sink_test.cpp](../tests/code_sink_test.cpp)
|
@ -1,153 +0,0 @@
|
||||
# Code generator
|
||||
## Overview
|
||||
Codegen is the backend for current compiler implementation. It was implemented with the main idea - to be independent of specific encoder-architecture and have the possibility to work with different calling-conventions. It was designed to be independence from target-architecture - also, for this target - was created special compiler-independent library - encoder ([encoder.md](../optimizer/code_generator/encoder.md)).
|
||||
|
||||
## Dependence
|
||||
|
||||
It is needed Regalloc-pass and LinearOrder-analysis for codegen-work. First - because it is needed to fill register for each operand, second - because of needed jump-instruction generation for major edges.
|
||||
|
||||
## Implementation details
|
||||
|
||||
List of major codegen-dependency:
|
||||
1. Encoder library (calling convention, encoder, and register description).
|
||||
2. CodeBuilder - contain binary data and hold header with information for stack-walker.
|
||||
3. CFrameLayout - is responsible for stack-frame layout in runtime(class described in `libpandabase/utils`).
|
||||
4. SlowPath - class, which is responsible for side exits.
|
||||
|
||||
Codegen internal-implementation is responsibility for:
|
||||
1. Conversions from IR to encoder structures (like conditions, immediate, registers, etc).
|
||||
2. Filling meta-info for recovery frame-information in exceptions (state stamps, which used in stack-walker).
|
||||
3. Getting information from runtime about current methods and objects.
|
||||
4. Supporting logic for build AOT-code (there are needed runtime calls during aot-code execution).
|
||||
5. Correct parameter transferring.
|
||||
|
||||
## Logic of work
|
||||
|
||||
Constructor makes additional work - it walks through call-instructions and calculates parameters count (they need for reserve stack-slots).
|
||||
|
||||
The main logic was made in RunImpl-method:
|
||||
1. Generates Calling-Convention prologue.
|
||||
2. Creates encode-visitor and visits each instruction in linear order by blocks.
|
||||
3. After that - side exits (SlowPath) are emitted (also for OSR).
|
||||
4. Generate epilog for correctness exit from method
|
||||
5. At the end - there is the finalization of the encoder(emit real offsets for each branch) and CodeBuilder-header filling.
|
||||
|
||||
## Additional features
|
||||
|
||||
Also, there is implemented ScopedDisasmPrinter(for print disassembly), EncodeVisitor (for generating special opcodes).
|
||||
|
||||
Example of disasm-dump:
|
||||
```
|
||||
- START_METHOD
|
||||
0000: stp x0, x20, [sp, #-32]!
|
||||
0004: stp x29, x30, [sp, #16]
|
||||
0008: add x29, sp, #0x10 (16)
|
||||
...
|
||||
- CallRuntime
|
||||
0014: mov x16, #0xcbd4
|
||||
0018: movk x16, #0x52, lsl #16
|
||||
001c: blr x16
|
||||
0020: ldp x30, xzr, [sp], #16
|
||||
...
|
||||
```
|
||||
|
||||
## Pseudocode
|
||||
codegen.cpp:
|
||||
```
|
||||
void Codegen::RunImpl() {
|
||||
GetCallingConvention()->BeginMethod(); // emit moves from parameters to dst-regs etc.
|
||||
|
||||
for (auto bb : GetGraph()->GetBlocksLinearOrder()) {
|
||||
GetEncoder()->BindLabel(bb->GetId());
|
||||
for (auto inst : bb->AllInsts()) {
|
||||
visitor.VisitInstruction(inst);
|
||||
}
|
||||
}
|
||||
|
||||
EmitSlowPaths(); // Emit code, which responsibility for side exits.
|
||||
GetEncoder()->Finalize(); // After that - it is possible to use generated code.
|
||||
GetGraph()->SetData(EncodeDataType(code_entry, code_size)); // This data(entry and code) was used in code-cache
|
||||
}
|
||||
|
||||
void Codegen::CreateCall(inst) {
|
||||
auto callconv = GetCallingConvention();
|
||||
auto dst_reg = ConvertRegister(call_inst->GetDstReg(), call_inst->GetType());
|
||||
// 1-st parameter register
|
||||
Reg param = GetRegfile()->GetSpecialRegister(SpecialReg::DEFAULT_PARAMETER);
|
||||
if (GetGraph()->IsAotMode()) {
|
||||
... // AOT code
|
||||
} else {
|
||||
GetEncoder()->SaveCallerRegisters();
|
||||
SetCallParameters(call_inst);
|
||||
auto method = ...->GetMethodById(...->GetMethod(), call_inst->GetCallMethodId());
|
||||
// Move immediate-value to parameter
|
||||
GetEncoder()->EncodeMov(param_0, Imm(method));
|
||||
}
|
||||
// Get offset of
|
||||
size_t entry_point_offset = ...->GetCompiledEntryPointOffset();
|
||||
// Major call instruction
|
||||
GetEncoder()->MakeCall(MemRef(param_0, entry_point_offset));
|
||||
...
|
||||
GetEncoder()->LoadCallerRegisters(dst_reg);
|
||||
}
|
||||
|
||||
class EncodeVisitor : public GraphVisitor {
|
||||
/* VisitAdd, VisitSub, VisitMul etc... */
|
||||
VisitInstruction(inst) {
|
||||
switch (inst->Opc()) {
|
||||
case (Opcode::Add) VisitAdd(inst); break;
|
||||
case (Opcode::Cmp) VisitCmp(inst); break;
|
||||
case (Opcode::Call) VisitCallStatic(inst); break;
|
||||
...
|
||||
}
|
||||
}
|
||||
|
||||
// Full logic for generate one instruction:
|
||||
void VisitAdd(inst) {
|
||||
auto dst = GetCodegen()->ConvertRegister(inst->GetDstReg(), type);
|
||||
auto src0 = GetCodegen()->ConvertRegister(inst->GetSrcReg(0), type);
|
||||
enc->GetEncoder()->EncodeAdd(dst, src0);
|
||||
}
|
||||
|
||||
void EncodeVisitor::VisitCallStatic(inst) {
|
||||
GetCodegen()->CreateCall(inst);
|
||||
}
|
||||
|
||||
void EncodeVisitor::VisitCmp(GraphVisitor* visitor, Inst* inst) {
|
||||
auto* enc = static_cast<EncodeVisitor*>(visitor);
|
||||
auto cmp_inst = inst->CastToCmp();
|
||||
auto dst = GetCodegen()->ConvertRegister(inst->GetDstReg());
|
||||
auto src0 = GetCodegen()->ConvertRegister(inst->GetSrcReg(0));
|
||||
auto src1 = GetCodegen()->ConvertRegister(inst->GetSrcReg(1));
|
||||
Condition cc = ...
|
||||
GetEncoder()->EncodeCmp(dst, src0, src1, cc);
|
||||
}
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
And them will have different encoding for each architecture:
|
||||
```
|
||||
target/aarch32/encode.cpp:
|
||||
void Aarch32Encoder::EncodeCompare(Reg dst, Reg src0, Reg src1, Condition cc) {
|
||||
CompareHelper(src0, src1, &cc); // Method for calculate flags for each src1 and src2 types
|
||||
__ Mov(Convert(cc), VixlReg(dst), 0x1);
|
||||
__ Mov(Convert(cc).Negate(), VixlReg(dst), 0x0);
|
||||
}
|
||||
|
||||
target/aarch64/encode.cpp:
|
||||
void Aarch64Encoder::EncodeCompare(Reg dst, Reg src0, Reg src1, Condition cc) {
|
||||
__ Cmp(VixlReg(src0), VixlReg(src1));
|
||||
__ Cset(VixlReg(dst), Convert(cc));
|
||||
}
|
||||
```
|
||||
|
||||
## Links
|
||||
|
||||
[codegen.cpp](../optimizer/code_generator/codegen.cpp)
|
||||
[codegen.h](../optimizer/code_generator/codegen.h)
|
||||
[slow_path.cpp](../optimizer/code_generator/slow_path.cpp)
|
||||
[slow_path.h](../optimizer/code_generator/slow_path.h)
|
||||
|
||||
Tests:
|
||||
[codegen_test.cpp](../tests/codegen_test.cpp), [inst_generator_test.cpp](../tests/inst_generator_test.cpp) - test with generation each instruction.
|
@ -1,155 +0,0 @@
|
||||
# IfConversion
|
||||
## Overview
|
||||
|
||||
`IfConversion` tries to remove branches in executable code by creating linear sections with predicate instructions.
|
||||
|
||||
## Rationality
|
||||
|
||||
Hardware executes the program speculatively. It tries to predict the branch conditional(true or false) and starts executing instructions after the branch speculatively before executing the branch itself. If the prediction is incorrect(named branch misprediction), the pipeline stops and the state is restored. As result, several dozen cycles are lost. `IfConversion` can add several instructions but remove branch misprediction.
|
||||
|
||||
## Dependence
|
||||
|
||||
* Dead Code Elimination(DCE)
|
||||
* Remove Empty Blocks
|
||||
* Remove Linear blocks
|
||||
* Reverse Post Order(RPO)
|
||||
|
||||
## Algorithm
|
||||
|
||||
Optimization makes a pass through the blocks by post order traversal.
|
||||
Two patterns are checked for each block: `Triangle` and `Diamond`.
|
||||
|
||||
### Triangle
|
||||
|
||||
The pattern:
|
||||
|
||||
```
|
||||
[BB]
|
||||
| \
|
||||
| [JBB]
|
||||
| /
|
||||
[PBB]
|
||||
```
|
||||
`BB` -- basic block the recognition starts from
|
||||
`JBB`(Join BB) -- true or false successor of `BB`, which will be joined to BB
|
||||
`PBB`(Phi BB) -- false or true successor of `BB`, which contain PHI instruction for BB and JBB
|
||||
|
||||
### Diamond
|
||||
|
||||
```
|
||||
[BB]
|
||||
/ \
|
||||
[JBB] [JBB 2]
|
||||
\ /
|
||||
[PBB]
|
||||
```
|
||||
|
||||
`BB` -- basic block the recognition starts from
|
||||
`JBB`(Join BB), `JBB 2` -- true and false successors of BB, which will be joined to `BB `
|
||||
`PBB`(Phi BB) -- the successor of `JBB` and `JBB 2`, which contain PHI instruction for `JBB` and `JBB 2`
|
||||
|
||||
|
||||
### Conditions to check
|
||||
|
||||
1. `JBB`(and `JBB 2` for Diamond) must have only one predecessor and one successor
|
||||
2. `PBB` must have 2 or more predecessors
|
||||
3. `JBB`(and `JBB 2` for Diamond) is the predecessor of the PBB
|
||||
4. `JBB`(and `JBB 2` for Diamond) doesn't contain instruction with `no_ifcvt` property(for example memory instruction, call instruction, instruction with a call to runtime)
|
||||
5. The number of instructions in `JBB`(and `JBB 2` for Diamond) less than the limit(set by the option `--compiler-if-conversion-limit=N` with the default value 2)
|
||||
6. The number of Phi instruction in `PBB`, which have different inputs from corresponding predecessor blocks, should also be less than the limit(each of them would be converted into Select)
|
||||
7. `PBB` doesn't contain float Phi with different inputs for `JBB` and `BB`(`JBB 2` for Diamond)
|
||||
|
||||
|
||||
### Transformation
|
||||
|
||||
1. `If` instructions removed from `BB`(the necessary information, such as the CC, is saved)
|
||||
2. Edges `BB` -> `JBB` and `JBB` -> `PBB` are removed
|
||||
3. All instruction from `JBB` are copied to `BB`
|
||||
4. Select instructions are constructed at the end of `BB`(`JBB 2` for Diamond)
|
||||
5. All Phi instructions in `PBB` are edited:
|
||||
a. If `PBB` has other predecessors, we check if inputs from `JBB` and `BB`(`JBB 2` for Diamond) are equal, then input from `JBB` is removed. Otherwise, it is also removed, but input from `BB`(`JBB 2` for Diamond) is changed to corresponding Select instruction.
|
||||
b. If `PBB` doesn't have other predecessors, all Phi inputs are copied to Select instructions and Phi instruction is deleted.
|
||||
6. For Diamond `BB` and `JBB 2` are merged
|
||||
7. If `PBB` doesn't have other predecessors, `BB` and `PBB` are merged
|
||||
8. Loop information is fixed
|
||||
|
||||
## Pseudocode
|
||||
|
||||
TODO
|
||||
|
||||
## Examples
|
||||
|
||||
**Triangle**:
|
||||
|
||||
Before:
|
||||
|
||||
```
|
||||
BB 2 preds: [bb 0]
|
||||
3.b Compare B u64 v0, v1 -> (v4)
|
||||
4. IfImm NE b v3, 0x0
|
||||
succs: [bb 3, bb 4]
|
||||
|
||||
BB 3 preds: [bb 2]
|
||||
5.u64 Mul v0, v2 -> (v6p)
|
||||
succs: [bb 4]
|
||||
|
||||
BB 4 preds: [bb 2, bb 3]
|
||||
6p.u64 Phi v0(bb2), v5(bb3) -> (v7)
|
||||
7.u64 Return v6p
|
||||
succs: [bb 1]
|
||||
```
|
||||
After:
|
||||
|
||||
```
|
||||
BB 2 preds: [bb 0]
|
||||
3.b Compare B u64 v0, v1 -> (v8)
|
||||
5.u64 Mul v0, v2 -> (v8)
|
||||
8.u64 SelectImm NE b v5, v0, v3, 0x0 -> (v7)
|
||||
7.u64 Return v8
|
||||
succs: [bb 1]
|
||||
```
|
||||
|
||||
**Diamond**:
|
||||
|
||||
Before:
|
||||
|
||||
```
|
||||
BB 2 preds: [bb 0]
|
||||
3.b Compare EQ u32 v1, v2 -> (v4)
|
||||
4. IfImm NE b v3, 0x0
|
||||
succs: [bb 3, bb 4]
|
||||
|
||||
BB 4 preds: [bb 2]
|
||||
5.u32 Add v0, v1 -> (v8p)
|
||||
succs: [bb 5]
|
||||
|
||||
BB 3 preds: [bb 2]
|
||||
7.u32 Sub v0, v1 -> (v8p)
|
||||
succs: [bb 5]
|
||||
|
||||
BB 5 preds: [bb 4, bb 3]
|
||||
8p.u32 Phi v5(bb4), v7(bb3) -> (v9)
|
||||
9.u32 Return v8p
|
||||
succs: [bb 1]
|
||||
```
|
||||
|
||||
After:
|
||||
|
||||
```
|
||||
BB 2 preds: [bb 0]
|
||||
3.b Compare EQ u32 v1, v2 -> (v10)
|
||||
7.u32 Sub v0, v1 -> (v10)
|
||||
5.u32 Add v0, v1 -> (v10)
|
||||
10.u32 SelectImm NE b v7, v5, v3, 0x0 -> (v9)
|
||||
9.u32 Return v10
|
||||
succs: [bb 1]
|
||||
```
|
||||
|
||||
## Links
|
||||
|
||||
Source code:
|
||||
[if_conversion.cpp](../optimizer/optimizations/if_conversion.cpp)
|
||||
[if_conversion.h](../optimizer/optimizations/if_conversion.h)
|
||||
|
||||
Tests:
|
||||
[if_conversion_test.cpp](../tests/if_conversion_test.cpp)
|
@ -1,179 +0,0 @@
|
||||
# Loop Peeling
|
||||
## Overview
|
||||
|
||||
`Loop peeling` optimization modifies the loops with exit-point at loop-header to the loops with exit-point at loop-backedge.
|
||||
|
||||
## Rationality
|
||||
|
||||
Simplify the loop and allow further loop optimizations.
|
||||
|
||||
## Dependence
|
||||
|
||||
* Loop Analysis
|
||||
* Dominators Tree
|
||||
* Reverse Post Order (RPO)
|
||||
|
||||
## Algorithm
|
||||
|
||||
`Loop peeling` modifies loops with the following requirements:
|
||||
- loop is not irreducible;
|
||||
- loop-header is not OSR-entry;
|
||||
- there is only 1 back-edge;
|
||||
- loop-header is a single loop-exit point;
|
||||
- there are no inner loops;
|
||||
|
||||
```cpp
|
||||
[pre-header]
|
||||
|
|
||||
v
|
||||
/---->[header]--------\
|
||||
| | |
|
||||
| v v
|
||||
\----[back-edge] [outer]
|
||||
```
|
||||
|
||||
There are two stages of the algorithm:
|
||||
|
||||
### 1. Insert pre-loop
|
||||
```cpp
|
||||
[pre-header]
|
||||
|
|
||||
v
|
||||
[pre-loop]--------\
|
||||
| |
|
||||
v v
|
||||
/---->[header]-------->|
|
||||
| | |
|
||||
| v v
|
||||
\----[back-edge] [resolver]
|
||||
|
|
||||
v
|
||||
[outer]
|
||||
```
|
||||
Pre-loop basic block is a loop-header clone with all instructions, excluding `SafePoint`.
|
||||
|
||||
### 2. Move exit-point form the loop-header to the loop-backedge block
|
||||
```cpp
|
||||
[pre-header]
|
||||
|
|
||||
v
|
||||
[pre-loop]--------\
|
||||
| |
|
||||
v v
|
||||
/---->[header] |
|
||||
| | |
|
||||
| v v
|
||||
\----[back-edge]-->[resolver]
|
||||
|
|
||||
v
|
||||
[outer]
|
||||
```
|
||||
All instructions from loop-header are moving to the loop-backedge block. Also control-flow edge between loop-header and resolver-block is moving to the loop-backedge.
|
||||
|
||||
## Pseudocode
|
||||
|
||||
```cpp
|
||||
auto header = loop->GetHeader();
|
||||
auto pre-loop = LoopPeeling->CreatePreLoop();
|
||||
for (auto inst : header->GetInstructions()) {
|
||||
auto clone_inst = Clone(inst);
|
||||
pre-loop.AppendInst(clone_inst);
|
||||
}
|
||||
auto exit_block = LoopPeeling->CreateExitBlock();
|
||||
for (auto inst : header->GetInstructionsReverse()) {
|
||||
header->EraseInst(inst);
|
||||
exit_block->PrependInst(inst);
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
[0]
|
||||
|
|
||||
v
|
||||
/---->[2]-----\
|
||||
| | |
|
||||
| v v
|
||||
\-----[3] [4]
|
||||
|
|
||||
[exit]
|
||||
|
||||
|
||||
GRAPH(GetGraph()) {
|
||||
PARAMETER(0, 0).u64();
|
||||
PARAMETER(1, 1).u64();
|
||||
PARAMETER(2, 2).u64();
|
||||
BASIC_BLOCK(2, 3, 4) {
|
||||
INST(3, Opcode::Phi).u64().Inputs(1, 5);
|
||||
INST(4, Opcode::Phi).u64().Inputs(2, 10);
|
||||
INST(5, Opcode::Sub).u64().Inputs(3, 2);
|
||||
INST(6, Opcode::SafePoint).Inputs(0, 3, 4).SrcVregs({0, 1, 2});
|
||||
INST(7, Opcode::Compare).CC(CC_EQ).b().Inputs(5, 0);
|
||||
INST(8, Opcode::IfImm).SrcType(DataType::BOOL).CC(CC_NE).Imm(0).Inputs(7);
|
||||
}
|
||||
BASIC_BLOCK(3, 2) {
|
||||
INST(9, Opcode::And).u64().Inputs(4, 5);
|
||||
INST(10, Opcode::Add).u64().Inputs(9, 4);
|
||||
}
|
||||
BASIC_BLOCK(4, -1) {
|
||||
INST(11, Opcode::Return).u64().Inputs(4);
|
||||
}
|
||||
}
|
||||
|
||||
`Loop peeling` transforms to:
|
||||
|
||||
[0]
|
||||
|
|
||||
v
|
||||
[pre-loop]---------\
|
||||
| |
|
||||
/---->[2] |
|
||||
| | |
|
||||
| v |
|
||||
| [3] |
|
||||
| | |
|
||||
| v v
|
||||
\--[loop-exit]--->[loop-outer]
|
||||
|
|
||||
v
|
||||
[4]
|
||||
|
|
||||
v
|
||||
[exit]
|
||||
|
||||
GRAPH(expected_graph) {
|
||||
PARAMETER(0, 0).u64();
|
||||
PARAMETER(1, 1).u64();
|
||||
PARAMETER(2, 2).u64();
|
||||
BASIC_BLOCK(5, 2, 4) {
|
||||
INST(12, Opcode::Sub).u64().Inputs(1, 2);
|
||||
INST(13, Opcode::Compare).CC(CC_EQ).b().Inputs(12, 0);
|
||||
INST(14, Opcode::IfImm).SrcType(DataType::BOOL).CC(CC_NE).Imm(0).Inputs(13);
|
||||
}
|
||||
BASIC_BLOCK(2, 2, 4) {
|
||||
INST(3, Opcode::Phi).u64().Inputs({{5, 12}, {2, 5}});
|
||||
INST(4, Opcode::Phi).u64().Inputs({{5, 2}, {2, 10}});
|
||||
INST(15, Opcode::Phi).u64().Inputs({{5, 12}, {2, 5}});
|
||||
INST(9, Opcode::And).u64().Inputs(4, 15);
|
||||
INST(10, Opcode::Add).u64().Inputs(9, 4);
|
||||
INST(5, Opcode::Sub).u64().Inputs(3, 2);
|
||||
INST(6, Opcode::SafePoint).Inputs(0, 5, 10).SrcVregs({0, 1, 2});
|
||||
INST(7, Opcode::Compare).CC(CC_EQ).b().Inputs(5, 0);
|
||||
INST(8, Opcode::IfImm).SrcType(DataType::BOOL).CC(CC_NE).Imm(0).Inputs(7);
|
||||
}
|
||||
BASIC_BLOCK(4, -1) {
|
||||
INST(16, Opcode::Phi).u64().Inputs({{5, 2}, {2, 10}});
|
||||
INST(11, Opcode::Return).u64().Inputs(16);
|
||||
}
|
||||
}
|
||||
|
||||
## Links
|
||||
Source code:
|
||||
|
||||
[loop_peeling.cpp](../optimizer/optimizations/loop_peeling.cpp)
|
||||
|
||||
[loop_peeling.h](../optimizer/optimizations/loop_peeling.h)
|
||||
|
||||
Tests:
|
||||
|
||||
[loop_peeling_test.cpp](../tests/loop_peeling_test.cpp)
|
@ -1,212 +0,0 @@
|
||||
# Loop Unrolling
|
||||
## Overview
|
||||
|
||||
`Loop unrolling` optimization increases loop body by copying instructions of the original loop body.
|
||||
|
||||
## Rationality
|
||||
|
||||
Increase number of instructions for each loop iteration, reduce branch penalty.
|
||||
|
||||
## Dependence
|
||||
|
||||
* Loop Analysis
|
||||
* Dominators Tree
|
||||
* Reverse Post Order (RPO)
|
||||
* Loop Peeling (to make loop with exit-point from backedge)
|
||||
|
||||
|
||||
## Algorithm
|
||||
|
||||
`Loop unrolling` modifies loops with the following requirements:
|
||||
|
||||
* loop is not irreducible;
|
||||
* loop-header is not OSR-entry;
|
||||
* there is only 1 back-edge;
|
||||
* loop-backedge is a single loop-exit point;
|
||||
* there are no inner loops;
|
||||
|
||||
Optimization settings:
|
||||
|
||||
**Instructions limit** - the maximum number of loop instructions after its unrolling;
|
||||
|
||||
**Unroll factor** - the number of loop body copies including the original one;
|
||||
|
||||
There two types of unrolling: with side-exits and without them. Unrolling without side-exits is applied for countable loops.
|
||||
|
||||
### Countable loops
|
||||
|
||||
Loop is countable if it contains compare between loop-index instruction and test-instruction defined outside loop.
|
||||
Loop-index should be incremented or decremented by a constant. Currently signed integer indexes are supported.
|
||||
|
||||
```
|
||||
[Loop-header]
|
||||
Phi(init, update)
|
||||
...
|
||||
|
||||
[Loop-backedge]
|
||||
update(phi, constant)
|
||||
Compare(update, test)
|
||||
...
|
||||
where `update` is Add or Sub instruction
|
||||
```
|
||||
### Unrolling without side-exits
|
||||
|
||||
There are 3 stages of unrolling loop without side-exits:
|
||||
|
||||
1. Clone loop-body without loop-backedge `factor` times;
|
||||
2. Fix loop-backedge compare by incrementing/decrementing test input with constant, counted using formula: `factor * loop_step`. If compare has `not-equal` condition code, replace it by `less-than`/`greater-than`
|
||||
3. Clone loop-body with the original loop-backedge `factor` times, but replace edge to the loop-header with edge the loop-outer block;
|
||||
|
||||
Here `factor` means number of cloned loop bodies.
|
||||
|
||||
```cpp
|
||||
/---->[header]
|
||||
| |
|
||||
| v
|
||||
| [loop-body]
|
||||
| |
|
||||
| v
|
||||
\-----[backedge]----> ...
|
||||
```
|
||||
```
|
||||
/---->[header]
|
||||
| |
|
||||
| v
|
||||
| [loop-body]
|
||||
| |
|
||||
| v
|
||||
| [loop-body']
|
||||
| |
|
||||
| v
|
||||
| [loop-body'']
|
||||
| |
|
||||
| v
|
||||
\-----[backedge]----> ...
|
||||
|
|
||||
v
|
||||
[loop-body]
|
||||
|
|
||||
v
|
||||
[backedge]------\
|
||||
| |
|
||||
v |
|
||||
[loop-body] |
|
||||
| |
|
||||
v |
|
||||
[outer-block]<---/
|
||||
```
|
||||
|
||||
|
||||
|
||||
### Unrolling with side-exits
|
||||
|
||||
For this case both loop-body and loop-backedge are cloned:
|
||||
```cpp
|
||||
/---->[header]
|
||||
| |
|
||||
| v
|
||||
| [loop-body]
|
||||
| |
|
||||
| v
|
||||
| [backedge]------------\ << exit-block
|
||||
| | |
|
||||
| v |
|
||||
| [loop-body-clone] |
|
||||
| | |
|
||||
| v |
|
||||
\-----[backedge-clone]----->| << last-block
|
||||
|
|
||||
v
|
||||
[outer]-----> ...
|
||||
```
|
||||
## Pseudocode
|
||||
```cpp
|
||||
if (IsLoopCountable(loop)) {
|
||||
auto clone_loop = CloneLoop(loop);
|
||||
UnrollLoopBodyWithoutSideExits(loop);
|
||||
FixCompareInst(loop);
|
||||
UnrollLoopBodyWithSideExits(clone_loop);
|
||||
RemoveEdgeToLoopHeader(clone_loop);
|
||||
} else {
|
||||
UnrollLoopBodyWithSideExits(loop);
|
||||
}
|
||||
```
|
||||
## Examples
|
||||
|
||||
Countable loop unrolling:
|
||||
```cpp
|
||||
auto graph = CreateEmptyGraph();
|
||||
GRAPH(graph) {
|
||||
CONSTANT(0, stop);
|
||||
CONSTANT(1, 0); // a = 0, b = 0
|
||||
CONSTANT(2, step);
|
||||
BASIC_BLOCK(2, 3, 4) {
|
||||
INST(3, Opcode::Compare).b().SrcType(DataType::INT32).CC(CC_LT).Inputs(1, 0);
|
||||
INST(4, Opcode::IfImm).SrcType(DataType::BOOL).CC(CC_NE).Imm(0).Inputs(3); // if a < stop
|
||||
}
|
||||
BASIC_BLOCK(3, 3, 4) {
|
||||
INST(5, Opcode::Phi).s32().Inputs(1, 7); // a
|
||||
INST(6, Opcode::Phi).s32().Inputs(1, 8); // b
|
||||
INST(7, Opcode::Add).s32().Inputs(5, 2); // a += step
|
||||
INST(8, Opcode::Add).s32().Inputs(6, 7); // b += a
|
||||
INST(9, Opcode::Compare).b().SrcType(DataType::INT32).CC(CC_LT).Inputs(7, 0);
|
||||
INST(10, Opcode::IfImm).SrcType(DataType::BOOL).CC(CC_NE).Imm(0).Inputs(9); // if a < stop
|
||||
}
|
||||
BASIC_BLOCK(4, -1) {
|
||||
INST(11, Opcode::Phi).s32().Inputs(1, 6);
|
||||
INST(12, Opcode::Return).s32().Inputs(11); // return b;
|
||||
}
|
||||
}
|
||||
return graph;
|
||||
|
||||
```
|
||||
```cpp
|
||||
uint32_t UNROLL_FACTOR = 2;
|
||||
|
||||
GRAPH(graph_unroll) {
|
||||
CONSTANT(0, 10);
|
||||
CONSTANT(1, 0); // a = 0, b = 0
|
||||
CONSTANT(2, 1);
|
||||
BASIC_BLOCK(2, 3, 5) {
|
||||
INST(20, Opcode::SubI).s32().Inputs(0).Imm(UNROLL_FACTOR - 1);
|
||||
INST(3, Opcode::Compare).b().SrcType(DataType::INT32).CC(CC_LT).Inputs(1, 20); // if (a < 10 - (UNROLL_FACTOR - 1))
|
||||
INST(4, Opcode::IfImm).SrcType(DataType::BOOL).CC(CC_NE).Imm(0).Inputs(3);
|
||||
}
|
||||
BASIC_BLOCK(3, 3, 5) {
|
||||
INST(5, Opcode::Phi).s32().Inputs(1, 21); // a
|
||||
INST(6, Opcode::Phi).s32().Inputs(1, 22); // b
|
||||
INST(7, Opcode::Add).s32().Inputs(5, 2); // a + 1
|
||||
INST(8, Opcode::Add).s32().Inputs(6, 7); // b + 1
|
||||
INST(21, Opcode::Add).s32().Inputs(7, 2); // a + 1
|
||||
INST(22, Opcode::Add).s32().Inputs(8, 21); // b + 1
|
||||
INST(9, Opcode::Compare).b().SrcType(DataType::INT32).CC(CC_LT).Inputs(21, 20);
|
||||
INST(10, Opcode::IfImm).SrcType(DataType::BOOL).CC(CC_NE).Imm(0).Inputs(9); // if (a < 10 - (UNROLL_FACTOR - 1))
|
||||
}
|
||||
BASIC_BLOCK(5, 6, 4) {
|
||||
INST(11, Opcode::Phi).s32().Inputs(1, 8);
|
||||
INST(25, Opcode::Phi).s32().Inputs(1, 21); // a
|
||||
INST(26, Opcode::Phi).s32().Inputs(1, 22); // b
|
||||
INST(27, Opcode::Compare).b().SrcType(DataType::INT32).CC(CC_LT).Inputs(25, 0); // if (a < 10)
|
||||
INST(28, Opcode::IfImm).SrcType(DataType::BOOL).CC(CC_NE).Imm(0).Inputs(27);
|
||||
}
|
||||
BASIC_BLOCK(6, 4) {
|
||||
INST(29, Opcode::Add).s32().Inputs(25, 2); // a + 1
|
||||
INST(30, Opcode::Add).s32().Inputs(26, 29); // b + 1
|
||||
}
|
||||
BASIC_BLOCK(4, -1) {
|
||||
INST(31, Opcode::Phi).s32().Inputs(11, 26);
|
||||
INST(12, Opcode::Return).s32().Inputs(31); // return b
|
||||
}
|
||||
}
|
||||
|
||||
```
|
||||
## Links
|
||||
Source code:
|
||||
|
||||
[loop_unroll.cpp](../optimizer/optimizations/loop_unroll.cpp)
|
||||
|
||||
[loop_unroll.h](../optimizer/optimizations/loop_unroll.h)
|
||||
|
||||
Tests:
|
||||
|
||||
[loop_unroll_test.cpp](../tests/loop_unroll_test.cpp)
|
@ -1,104 +0,0 @@
|
||||
# Optimize memory barriers
|
||||
|
||||
## Overview
|
||||
|
||||
We need to encode barriers after the instructions NewArray, NewObject, NewMultiArray so that if the created objects are used in another thread, the initialization is fully completed.
|
||||
We can remove the barrier if we prove that the created object cannot be passed to another thread before the next barrier.
|
||||
This can happen if we save the object to memory or pass it to another method
|
||||
|
||||
## Rationality
|
||||
|
||||
Reducing the number of instructions and speed up execution.
|
||||
|
||||
## Dependence
|
||||
|
||||
RPO analysis
|
||||
|
||||
## Algorithm
|
||||
|
||||
There is instruction flag `MEM_BARRIER`. The flag is set to `true` for the instructions NewObject, NewArray and NewMultiArray.
|
||||
The pass `OptimizeMemoryBarriers` try remove the flag(set false) from the instruction.
|
||||
We pass through all instructions in PRO order. If the instruction has flag `MEM_BARRIER` we add the instruction in special vector `barriers_insts_`.
|
||||
If we visit an instruction that can pass an object to another thread(Store instruction, Call instruction e.t.c) we check the instruction inputs.
|
||||
If the instruction has input from the `barriers_insts_`, we call function `MergeBarriers`.
|
||||
The function set `false` for the flag `MEM_BARRIER`, exclude last instruction from the vector.
|
||||
So we will only set the barrier in the last instruction before potentially passing the created objects to another thread
|
||||
|
||||
The function `MergeBarriers` also is called at end of the basic block.
|
||||
|
||||
Codegen checks the flag `MEM_BARRIER` for the instructions NewObject, NewArray and NewMultiArray and encode memory barrier if the flag `true`
|
||||
|
||||
## Pseudocode
|
||||
|
||||
```
|
||||
bool OptimizeMemoryBarriers::RunImpl()
|
||||
{
|
||||
barriers_insts.clear();
|
||||
for (auto bb : GetGraph()->GetBlocksRPO()) {
|
||||
for (auto inst : bb->Insts()) {
|
||||
if (inst->GetFlag(inst_flags::MEM_BARRIER)) {
|
||||
barriers_insts.push_back(inst);
|
||||
}
|
||||
if (InstCanMoveObjectInAnotherthread(inst) && InstHasBarrierInput(inst, barriers_insts)) {
|
||||
MergeBarriers(barriers_insts);
|
||||
}
|
||||
}
|
||||
MergeBarriers(barriers_insts);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void MemoryBarriersVisitor::MergeBarriers(InstVector& barriers_insts)
|
||||
{
|
||||
if (barriers_insts.empty()) {
|
||||
return;
|
||||
}
|
||||
auto last_barrier_inst = barriers_insts.back();
|
||||
for (auto inst : barriers_insts) {
|
||||
inst->ClearFlag(inst_flags::MEM_BARRIER);
|
||||
}
|
||||
last_barrier_inst->SetFlag(inst_flags::MEM_BARRIER);
|
||||
barriers_insts.clear();
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
```
|
||||
BB 0
|
||||
prop: start
|
||||
0.i64 Constant 0x2a -> (v6, v3, v1, v2, v8, v11)
|
||||
succs: [bb 2]
|
||||
|
||||
BB 2 preds: [bb 0]
|
||||
1. SaveState v0(vr0) -> (v2)
|
||||
2.ref NewArray 1 v0, v1 -> (v6, v3, v8, v11, v12)
|
||||
3. SaveState v0(vr0), v2(vr1) -> (v5, v4)
|
||||
4.ref LoadAndInitClass 'A' v3 -> (v5)
|
||||
5.ref NewObject 2 v4, v3 -> (v6, v8, v11, v12)
|
||||
6. SaveState v0(vr0), v2(vr1), v5(vr2) -> (v7, v12)
|
||||
7.void CallStatic 3 v6
|
||||
8. SaveState v0(vr0), v2(vr1), v5(vr2) -> (v9, v10)
|
||||
9.ref LoadAndInitClass 'B' v8 -> (v10)
|
||||
10.ref NewObject 4 v9, v8 -> (v11, v13)
|
||||
11. SaveState v0(vr0), v2(vr1), v5(vr2), v10(vr3)
|
||||
12.i64 CallVirtual 5 v2, v5, v6
|
||||
13.ref Return v10
|
||||
succs: [bb 1]
|
||||
|
||||
BB 1 preds: [bb 2]
|
||||
prop: end
|
||||
```
|
||||
|
||||
Instructions `2.ref NewArray`, `5.ref NewObject` and `10.ref NewObject` have flag `MEM_BARRIER` by default.
|
||||
`7.void CallStatic` don't have the instructions `2.ref NewArray`, `5.ref NewObject` as inputs.
|
||||
So the pass `OptimizeMemoryBarriers` will remove the flag from these instructions and skip in `10.ref NewObject`.
|
||||
|
||||
## Links
|
||||
|
||||
Source code:
|
||||
[memory_barriers.cpp](../optimizer/optimizations/memory_barriers.cpp)
|
||||
[memory_barriers.h](../optimizer/optimizations/memory_barriers.h)
|
||||
|
||||
Tests:
|
||||
[memory_barriers_test.cpp](../tests/memory_barriers_test.cpp)
|
@ -1,250 +0,0 @@
|
||||
# Memory Coalescing
|
||||
## Overview
|
||||
|
||||
The optimization is based on the fact that some architectures (`AArch64` particularly) support simultaneous load and store operations for consecutive addresses instead of several separate operations.
|
||||
|
||||
## Rationality
|
||||
|
||||
Replacing two memory operations with one generally reduces the number of long latency memory instructions.
|
||||
|
||||
| Code | Regular | Optimized |
|
||||
| ------ | ------ | ------|
|
||||
| `num[0] * num[1];` | `ldr x1, [x0]` <br> `ldr x0, [x0, 8]` <br> `mul x0, x1, x0` | `ldp x1, x0, [x0]` <br> `mul x0, x1, x0` |
|
||||
|
||||
## Dependence
|
||||
|
||||
* DominatorsTree
|
||||
* LoopAnalyzer
|
||||
* AliasAnalysis
|
||||
* Reverse Post Order (RPO)
|
||||
|
||||
## Assumptions
|
||||
|
||||
The optimization was implemented for arrays' accesses for `AArch64` architecture.
|
||||
|
||||
Array accesses cannot be volatile.
|
||||
|
||||
`AArch64` has `32`-bit and `64`-bit versions of coalescing operations – `ldp` and `stp`. As a result it is possible only for following Panda's types: `INT32`, `UINT32`, `INT64`, `UINT64`, `REFERENCE`.
|
||||
|
||||
## Algorithm
|
||||
|
||||
To implement such kind of optimization the extra support from IR is required
|
||||
|
||||
### IR Support
|
||||
|
||||
The following actions are required
|
||||
|
||||
* Separate instructions that will represent coalesced memory accesses from regular accesses.
|
||||
* Handle multiple output from one instruction in terms of SSA
|
||||
|
||||
The case with a coalesced store is quite straightforward: having two consecutive stores we replace them by one instruction that accepts index and two values to store.
|
||||
|
||||
| Consecutive Stores | Coalesced Store |
|
||||
| --- | --- |
|
||||
| `248.i64 StoreArrayI v2, 0x0, v53` <br> `250.i64 StoreArrayI v2, 0x1, v53` | `251.i64 StoreArrayPairI v2, 0x0, v53, v53` |
|
||||
|
||||
The problem occurs with a coalesced load because a load instruction of multiple values produces multiple assignment that is not a part of SSA form. By this reason, we need additional pseudo instructions as `LoadPairPart` to divide multiple values into single values. The type of `LoadArrayPair` instruction corresponds to the type of a single element, so there is an assumption that `LoadArrayPair` loads only multiple values of the same type.
|
||||
|
||||
| Consecutive Loads | Coalesced Load |
|
||||
| --- | --- |
|
||||
| `58.i64 LoadArrayI v2, 0x0 -> (v37)` <br> `61.i64 LoadArrayI v2, 0x1 -> (v43)` | `62.i64 LoadArrayPairI v2, 0x0 -> (v63, v64)` <br> `63.i64 LoadPairPart v62, 0x0 -> (v37)` <br> `64.i64 LoadPairPart v62, 0x1 -> (v43)` |
|
||||
|
||||
### Transformation
|
||||
|
||||
The optimization tries to coalesce memory operations in a scope of a basic block. It needs that two consecutive memory operations are placed near each other without intermediate instructions. By this reason we need to find a place to sunk an upper memory operation and to hoist the lower according to reordering rules.
|
||||
|
||||
During hoisting and sinking of memory operations we use rules for memory instruction scheduling: do not move over monitors, calls, save states, save points and etc.
|
||||
|
||||
Memory coalescing was implemented for array accesses. We process instructions of basic block in order. To find accesses of consecutive memory addresses we keep a queue of candidates. Each instruction that may be coalesced is inserted into this queue. A candidate is marked as invalid in the following conditions:
|
||||
* it has been paired already
|
||||
* store candidates are invalid if SaveState instruction has been met
|
||||
* all candidates are invalid if a barrier is met: calls, control flow instructions, monitors, exceptions, intrinsic and etc.
|
||||
|
||||
To track indices we use basic implementation of scalar evolution that allows to track how variables evolves: basic value (variable or constant), difference from the basic value (if basic value is a variable) and evolution (if basic value is a variable incremented on each iteration of a loop). It is a simple graph bypass by collecting assignments including Phi evolutions (supported only addition and subtraction).
|
||||
|
||||
Processing each instruction in basic block we do the following:
|
||||
1) If the instruction cannot be coalesced.
|
||||
1) If the instruction is a barrier – invalidate all candidates.
|
||||
2) If the instruction is a SaveState – invalidate all store candidates.
|
||||
3) If the instruction is a memory operation – add it as invalid candidate.
|
||||
2) If we can't determine anything about index variable, we add this instruction as a candidate and move on next instruction
|
||||
3) Iterate candidates in backward order
|
||||
2) If a candidate is invalid **or** candidate cannot be coalesced with the instruction **or** both refer to different objects **or** we have no information about candidate index, move on next candidate
|
||||
3) If indices differs by one and there is a place to sunk the candidate instruction and hoist the currently processing instruction – add this candidate and the instruction as a pair for coalescing and invalidate both.
|
||||
4) Add the instruction as a candidate.
|
||||
|
||||
Finally, we replace collected pairs by coalesced instructions.
|
||||
|
||||
To find a place for candidate and current instruction:
|
||||
1) find the lowest position the candidate can be sunk
|
||||
2) find the highest position the instruction can be hoisted
|
||||
3) The place can be any between highest and lowest position. If the intersection is empty, coalescing is not possible.
|
||||
|
||||
## Pseudocode
|
||||
|
||||
```
|
||||
void MemoryCoalescing::RunImpl() {
|
||||
VariableAnalysis variables(GetGraph());
|
||||
for (auto block : GetGraph()->GetBlocksRPO()) {
|
||||
for (auto inst : block.Insts()) {
|
||||
if (IsArrayAccess(inst)) {
|
||||
HandleArrayAccess(inst, variables);
|
||||
} else if (inst->IsMemory()) {
|
||||
inst->SetMarker(mrk_invalid_);
|
||||
candidates.push_back(inst);
|
||||
} else if (inst->IsBarrier()) {
|
||||
// Remove all candidates -- do not move anything across barriers
|
||||
candidates.clear();
|
||||
}
|
||||
}
|
||||
// Work in scope of basic block
|
||||
candidates.clear();
|
||||
}
|
||||
|
||||
for (auto pair : pairs) {
|
||||
// Replace a pair of instructions by a coalesced instruction
|
||||
ReplacePair(pair);
|
||||
}
|
||||
}
|
||||
|
||||
void HandleArrayAccess(Inst *inst, VariableAnalysis &vars) {
|
||||
Inst *obj = inst->GetObject();
|
||||
Inst *idx = inst->GetIndex();
|
||||
// If we don't know anything about index, do nothing
|
||||
if (!vars.IsAnalyzed(idx)) {
|
||||
candidates.push_back(inst);
|
||||
return;
|
||||
}
|
||||
// Last candidates more likely to be coalesced
|
||||
for (auto iter = candidates.rbegin(); iter != candidates.rend(); iter++) {
|
||||
auto cand = *iter;
|
||||
// Skip not interesting candidates: invalid and that cannot be coalesced with current inst
|
||||
if (cand->IsMarked(invalid) || cand->GetOpcode() != inst->GetOpcode()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Inst *cand_obj = cand->GetObject();
|
||||
auto cand_idx = cand->GetIndex();
|
||||
// We need to have info about candidate's index and array objects must alias each other
|
||||
if (!vars.IsAnalyzed(cand_idx) || obj.IsAlias(cand_obj) != MUST_ALIAS) {
|
||||
continue;
|
||||
}
|
||||
// If both indices differs by one
|
||||
Inst *position = FindBetterPlace(cand, inst);
|
||||
if (poisition && vars.DiffersByConst(idx, cand_idx, 1)) {
|
||||
pairs.push_back({cand, inst, position});
|
||||
cand->SetMarker(invalid);
|
||||
inst->SetMarket(invalid);
|
||||
}
|
||||
}
|
||||
|
||||
candidates.push_back(inst);
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Loads and Stores with immediate indices
|
||||
Before optimization
|
||||
```
|
||||
BB 0
|
||||
prop: start
|
||||
0.i64 Constant 0x2a -> (v3)
|
||||
succs: [bb 2]
|
||||
|
||||
BB 2 preds: [bb 0]
|
||||
3.ref NewArray 77 v0 -> (v42, v41)
|
||||
41. SaveState v3(vr7) -> (v42)
|
||||
42.ref NullCheck v3, v41 -> (v225, v229, v227, v230)
|
||||
225.i64 LoadArrayI v42, 0x0 -> (v51)
|
||||
227.i64 LoadArrayI v42, 0x1 -> (v51)
|
||||
51.i64 Add v225, v227 -> (v229, v40, v230)
|
||||
229.i64 StoreArrayI v42, 0x0, v51
|
||||
230.i64 StoreArrayI v42, 0x1, v51
|
||||
40.i64 Return v51
|
||||
succs: [bb 1]
|
||||
```
|
||||
After optimization
|
||||
```
|
||||
BB 0
|
||||
prop: start
|
||||
0.i64 Constant 0x2a -> (v3)
|
||||
succs: [bb 2]
|
||||
|
||||
BB 2 preds: [bb 0]
|
||||
3.ref NewArray 77 v0 -> (v41, v42)
|
||||
41. SaveState v3(vr7) -> (v42)
|
||||
42.ref NullCheck v3, v41 -> (v231, v234)
|
||||
231.i64 LoadArrayPairI v42, 0x0 -> (v232, v233)
|
||||
232.i64 LoadPairPart v231, 0x0 -> (v51)
|
||||
233.i64 LoadPairPart v231, 0x1 -> (v51)
|
||||
51.i64 Add v232, v233 -> (v234, v234, v40)
|
||||
234.i64 StoreArrayPairI v42, 0x0, v51, v51
|
||||
40.i64 Return v51
|
||||
succs: [bb 1]
|
||||
```
|
||||
### Coalescing inside loop
|
||||
Before optimization
|
||||
```
|
||||
BB 2 preds: [bb 0]
|
||||
3.i32 LenArray v0 -> (v35)
|
||||
succs: [bb 3]
|
||||
|
||||
BB 3 preds: [bb 2, bb 3]
|
||||
prop: head, loop 1
|
||||
6p.i32 Phi v4(bb2), v34(bb3) -> (v33, v17, v34)
|
||||
7p.i32 Phi v5(bb2), v24(bb3) -> (v24, v17)
|
||||
8p.i32 Phi v5(bb2), v25(bb3) -> (v25, v23, v24)
|
||||
17.i32 StoreArray v0, v6p, v7p
|
||||
33.i32 AddI v6p, 0x1 -> (v23)
|
||||
23.i32 StoreArray v0, v33, v8p
|
||||
24.i32 Add v7p, v8p -> (v7p, v25)
|
||||
25.i32 Add v8p, v24 -> (v8p)
|
||||
34.i32 AddI v6p, 0x2 -> (v6p, v35)
|
||||
35. If LT i32 v34, v3
|
||||
succs: [bb 3, bb 4]
|
||||
|
||||
BB 4 preds: [bb 3]
|
||||
29.void ReturnVoid
|
||||
succs: [bb 1]
|
||||
```
|
||||
After optimization
|
||||
```
|
||||
BB 2 preds: [bb 0]
|
||||
3.i32 LenArray v0 -> (v35)
|
||||
succs: [bb 3]
|
||||
|
||||
BB 3 preds: [bb 2, bb 3]
|
||||
prop: head, loop 1
|
||||
6p.i32 Phi v4(bb2), v34(bb3) -> (v33, v36, v34)
|
||||
7p.i32 Phi v5(bb2), v24(bb3) -> (v36, v24)
|
||||
8p.i32 Phi v5(bb2), v25(bb3) -> (v36, v24, v25)
|
||||
33.i32 AddI v6p, 0x1
|
||||
36.i32 StoreArrayPair v0, v6p, v7p, v8p
|
||||
24.i32 Add v7p, v8p -> (v7p, v25)
|
||||
25.i32 Add v8p, v24 -> (v8p)
|
||||
34.i32 AddI v6p, 0x2 -> (v6p, v35)
|
||||
35. If LT i32 v34, v3
|
||||
succs: [bb 3, bb 4]
|
||||
|
||||
BB 4 preds: [bb 3]
|
||||
29.void ReturnVoid
|
||||
succs: [bb 1]
|
||||
```
|
||||
|
||||
## Options
|
||||
|
||||
| Option | Description | Default value |
|
||||
| --- | --- | --- |
|
||||
| `--compiler-memory-coalescing` | Enables optimization | `true` |
|
||||
| `--compiler-memory-coalescing-objects` | Allows coalescing of operations with `ref`s | `true` |
|
||||
| `--compiler-memory-coalescing-aligned` | Coalesces only aligned accesses (starting with even indices e.g. 0-1, 4-5 etc.) | `false` |
|
||||
|
||||
## Links
|
||||
|
||||
Source code:
|
||||
[memory_coalescing.cpp](../optimizer/optimizations/memory_coalescing.cpp)
|
||||
[memory_coalescing.h](../optimizer/optimizations/memory_coalescing.h)
|
||||
|
||||
Tests:
|
||||
[memory_coalescing_test.cpp](../tests/memory_coalescing_test.cpp)
|
@ -1,57 +0,0 @@
|
||||
# Object Type Check Elimination
|
||||
## Overview
|
||||
**Object Type Check Elimination** - optimization which try to reduce number of IsInstance/CheckCast instructions.
|
||||
|
||||
## Rationality
|
||||
Reduce number of instructions and remove unnecessary data-flow dependencies.
|
||||
|
||||
## Dependences
|
||||
* RPO
|
||||
* ObjectTypePropagation
|
||||
|
||||
## Algorithm
|
||||
Visit `IsInstance` and `CheckCast` instructions in RPO order and try to elimiate them.
|
||||
If instruction couldn't be eliminated, `ObjectTypeInfo` for input is dropped.
|
||||
|
||||
### IsInstance
|
||||
|
||||
`IsInstance` is replaced by 1 if an object in input can be cast to the resolved type, else replaced by 0. 'null' object is not an instance of any class.
|
||||
`IsInstanceVisitor` also used in `Peephole` optimizations.
|
||||
|
||||
### CheckCast
|
||||
|
||||
If an object in input can't be cast to the resolved type `CheckCast` is replaced by deoptimize, else removed. 'null' object reference can be cast to every type.
|
||||
`CheckCastVisitor` also used in `CheckCast` optimizations.
|
||||
|
||||
## Pseudocode
|
||||
TODO
|
||||
|
||||
## Examples
|
||||
|
||||
```
|
||||
.record A {}
|
||||
.record B <extends=A> {}
|
||||
.record C {}
|
||||
|
||||
...
|
||||
newobj v0, B
|
||||
lda.obj v0
|
||||
isinstance A // will replaced by 1
|
||||
newobj v0, C
|
||||
lda.obj v0
|
||||
isinstance A // will replaced by 0
|
||||
...
|
||||
newobj v0, B
|
||||
lda.obj v0
|
||||
checkcast A // will removed
|
||||
checkcast C // will replaced by deoptimze
|
||||
```
|
||||
|
||||
## Links
|
||||
Source code:
|
||||
[object_type_check_elimination.h](../optimizer/optimizations/object_type_check_elimination.h)
|
||||
[object_type_check_elimination.cpp](../optimizer/optimizations/object_type_check_elimination.cpp)
|
||||
|
||||
Tests:
|
||||
[isinstance_elimination_test.cpp](../../tests/checked/isinstance_elimination_test.pa)
|
||||
[checkcast_elimination_test.cpp](../../tests/checked/checkcast_elimination_test.pa)
|
@ -1,81 +0,0 @@
|
||||
# Redundant Loop Elimination
|
||||
## Overview
|
||||
**Redundant Loop Elimination(RLE)** - optimization which find and remove useless loops.
|
||||
## Rationality
|
||||
Reducing number of basic blocks and instructions.
|
||||
## Dependence
|
||||
* Loop analysis
|
||||
## Algorithm
|
||||
Visit loops in LRN order (first children, then parent).
|
||||
For each loop check that:
|
||||
* RLE is applied for all children loops.
|
||||
* Loop doesn't contain instructions with side effect (ex. call instructions).
|
||||
* Loop doesn't contain instructions with users out of the loop.
|
||||
|
||||
If all checks are true then loop is removing:
|
||||
1. Loop pre-header connect with loop outer block.
|
||||
2. Loop inner blocks disconnect from graph.
|
||||
## Pseudocode
|
||||
```
|
||||
LoopVisitLRN(Loop* loop) {
|
||||
for (auto inner_loop : loop->GetInnerLoops()) {
|
||||
LoopVisitLRN(inner_loop);
|
||||
}
|
||||
if (Check(loop)) {
|
||||
Remove(loop);
|
||||
}
|
||||
}
|
||||
```
|
||||
## Examples
|
||||
Before RLE:
|
||||
```
|
||||
BB 0
|
||||
prop: start
|
||||
0.i64 Constant 0x0 -> (v4p)
|
||||
1.i64 Constant 0x1 -> (v10)
|
||||
2.i64 Constant 0xa -> (v5)
|
||||
succs: [bb 3]
|
||||
|
||||
BB 3 preds: [bb 0, bb 4]
|
||||
prop: head, loop 1
|
||||
4p.i32 Phi v0(bb8), v10(bb4) -> (v5, v10)
|
||||
5.b Compare LT i32 v4p, v2 -> (v6)
|
||||
6. IfImm NE b v5, 0x0
|
||||
succs: [bb 4, bb 5]
|
||||
|
||||
BB 5 preds: [bb 3]
|
||||
12. ReturnVoid
|
||||
succs: [bb 1]
|
||||
|
||||
BB 1 preds: [bb 5]
|
||||
prop: end
|
||||
|
||||
BB 4 preds: [bb 3]
|
||||
prop: loop 1
|
||||
10.i32 Add v4p, v1 -> (v4p)
|
||||
succs: [bb 3]
|
||||
```
|
||||
After RLE:
|
||||
```
|
||||
BB 0
|
||||
prop: start
|
||||
0.i64 Constant 0x0
|
||||
1.i64 Constant 0x1
|
||||
2.i64 Constant 0xa
|
||||
succs: [bb 5]
|
||||
|
||||
BB 5 preds: [bb 0]
|
||||
12. ReturnVoid
|
||||
succs: [bb 1]
|
||||
|
||||
BB 1 preds: [bb 5]
|
||||
prop: end
|
||||
```
|
||||
## Links
|
||||
|
||||
Source code:
|
||||
[redundant_loop_elimination.cpp](../optimizer/optimizations/redundant_loop_elimination.cpp)
|
||||
[redundant_loop_elimination.h](../optimizer/optimizations/redundant_loop_elimination.h)
|
||||
|
||||
Tests:
|
||||
[redundant_loop_elimination_test.cpp](../tests/redundant_loop_elimination_test.cpp)
|
@ -1,272 +0,0 @@
|
||||
# Instruction scheduling
|
||||
## Overview
|
||||
|
||||
Rearrange adjacent instructions for better performance.
|
||||
|
||||
## Rationality
|
||||
|
||||
When instructions are executed on CPU they may stall the processor pipeline when input registers are not ready yet, because they are written by one of the previous instructions. Scheduling allows to reduce the amount of such stalls in pipeline.
|
||||
|
||||
## Dependence
|
||||
|
||||
* Dead Code Elimination(DCE)
|
||||
* Remove Empty Blocks
|
||||
* Remove Linear blocks
|
||||
* Reverse Post Order(RPO)
|
||||
|
||||
## Algorithm
|
||||
|
||||
Current decisions/limitations:
|
||||
* Scheduler pass is placed immediately before register allocation
|
||||
* It rearranges instructions only inside the basic block, but not between them
|
||||
* No liveness analysis, only calculating dependencies using barrier/users/alias information
|
||||
* No CPU pipeline/resource modeling, only having dependency costs
|
||||
* Forward list scheduling algorithm with standard critical-path-based priority
|
||||
|
||||
For each basic block we first scan instructions in reverse order marking barriers and calculating the dependencies.
|
||||
Together with dependencies we calculate priority as a longest (critical) path to leaf instructions in basic block dependency graph.
|
||||
|
||||
Than we schedule each interval between barriers using the following algorithm.
|
||||
There are two priority queues, `waiting` and `ready`. `ready` queue is sorted based on priority calculated previously, while `waiting` queue is based on so-called `ASAP` (as soon as possible) values. In initialization, `ready` is empty and `waiting` contains all leaf instructions (without incoming dependencies), their `ASAP` is 1.
|
||||
|
||||
`ASAP` value for each instruction is changed only before it enters the `waiting` queue, and remains unchanged since that time.
|
||||
Algorithm starts from tick `cycle` 1. If `ready` queue is empty we look through "soonest" instruction from `waiting` queue and if we need to skip some ticks without scheduling any instruction we have to adjust `cycle` value.
|
||||
Next, we move all already available instructions (`ASAP` <= `cycle`) from `waiting` queue into `ready` queue.
|
||||
|
||||
Finally, we extract top instruction from `ready` queue and add it into new schedule. At this moment we adjust `ASAP` value for all dependent instructions and add some of them (which depend only on already scheduled instructions) into `waiting` queue.
|
||||
|
||||
## Pseudocode
|
||||
|
||||
```c++
|
||||
Scheduler::RunImpl() {
|
||||
for (auto bb : GetGraph()->GetBlocksRPO())
|
||||
ScheduleBasicBlock(bb);
|
||||
}
|
||||
|
||||
// Dependency helper function
|
||||
void Scheduler::AddDep(uint32_t* prio, Inst* from, Inst* to, uint32_t latency, Inst* barrier) {
|
||||
// Update instruction priority - "how high instruction is in dependency tree"
|
||||
*prio = std::max(*prio, latency + prio_[to]);
|
||||
// Do not add cross-barrier dependencies into deps_
|
||||
if (barrier == nullptr || old_[to] > old_[barrier]) {
|
||||
if (deps_.at(from).count(to) == 1) {
|
||||
uint32_t old_latency = deps_.at(from).at(to);
|
||||
if (old_latency >= latency)
|
||||
return;
|
||||
} else
|
||||
num_deps_[to]++;
|
||||
deps_.at(from)[to] = latency;
|
||||
}
|
||||
}
|
||||
|
||||
// Rearranges instructions in the basic block using list scheduling algorithm.
|
||||
Scheduler::ScheduleBasicBlock(BasicBlock* bb) {
|
||||
// Calculate priority and dependencies
|
||||
uint32_t num_inst = 0;
|
||||
Inst* last_barrier = nullptr;
|
||||
|
||||
for (auto inst : bb->InstsSafeReverse()) {
|
||||
uint32_t prio = 0;
|
||||
old_.insert({inst, num_inst++});
|
||||
num_deps_.insert({inst, 0U});
|
||||
deps_.emplace(inst, GetGraph()->GetLocalAllocator()->Adapter());
|
||||
|
||||
// Dependency to the barrier
|
||||
if (last_barrier != nullptr)
|
||||
AddDep(&prio, inst, last_barrier, 1U, last_barrier);
|
||||
// Dependency from barrier
|
||||
if (barrier) {
|
||||
Inst* old_last_barrier = last_barrier;
|
||||
last_barrier = inst;
|
||||
num_barriers++;
|
||||
for (auto user = inst->GetNext(); user != old_last_barrier; user = user->GetNext())
|
||||
AddDep(&prio, inst, user, 1U, last_barrier);
|
||||
}
|
||||
|
||||
// Users
|
||||
for (auto& user_item : inst->GetUsers()) {
|
||||
auto user = user_item.GetInst();
|
||||
AddDep(&prio, inst, user, inst_latency, last_barrier);
|
||||
}
|
||||
|
||||
.... // Memory dependencies calculation
|
||||
... // CanThrow or SaveState can't be rearranged, and stores can't be moved over them
|
||||
|
||||
prio_.insert({inst, prio});
|
||||
}
|
||||
|
||||
// Schedule intervals between barriers
|
||||
uint32_t cycle = 0;
|
||||
num_inst = 0;
|
||||
Inst* first = nullptr;
|
||||
for (auto inst = bb->GetFirstInst(); inst != nullptr; inst = inst->GetNext()) {
|
||||
bool barrier = inst->IsBarrier();
|
||||
num_inst++;
|
||||
if (first == nullptr)
|
||||
first = inst;
|
||||
if (barrier || inst == bb->GetLastInst()) {
|
||||
Inst* last = nullptr;
|
||||
if (barrier) {
|
||||
last = inst->GetPrev();
|
||||
num_inst--;
|
||||
} else
|
||||
last = inst;
|
||||
if (num_inst > 1)
|
||||
cycle += ScheduleInstsBetweenBarriers(first, last);
|
||||
else if (num_inst == 1) {
|
||||
sched_.push_back(first);
|
||||
cycle++;
|
||||
}
|
||||
if (barrier) {
|
||||
sched_.push_back(inst);
|
||||
cycle++;
|
||||
}
|
||||
num_inst = 0;
|
||||
first = nullptr;
|
||||
}
|
||||
}
|
||||
... // Here we rearrange instructions in basic block according to sched_
|
||||
}
|
||||
|
||||
// Schedule instructions between [first..last] inclusive, none of them are barriers.
|
||||
uint32_t Scheduler::ScheduleInstsBetweenBarriers(Inst* first, Inst* last) {
|
||||
// Compare function for 'waiting' queue
|
||||
auto cmp_asap = [this](Inst* left, Inst* right) {
|
||||
return asap_[left] > asap_[right] || (asap_[left] == asap_[right] && old_[left] < old_[right]);
|
||||
};
|
||||
// Queue of instructions, which dependencies are scheduled already, but they are still not finished yet
|
||||
std::priority_queue<Inst*, ArenaVector<Inst*>, decltype(cmp_asap)> waiting(
|
||||
cmp_asap, GetGraph()->GetLocalAllocator()->Adapter());
|
||||
|
||||
// Compare function for 'ready' queue
|
||||
auto cmp_prio = [this](Inst* left, Inst* right) {
|
||||
return prio_[left] < prio_[right] || (prio_[left] == prio_[right] && old_[left] < old_[right]);
|
||||
};
|
||||
// Queue of ready instructions
|
||||
std::priority_queue<Inst*, ArenaVector<Inst*>, decltype(cmp_prio)> ready(
|
||||
cmp_prio, GetGraph()->GetLocalAllocator()->Adapter());
|
||||
|
||||
// Initialization, add leafs into 'waiting' queue
|
||||
uint32_t num_inst = 0;
|
||||
for (auto inst = first; inst != last->GetNext(); inst = inst->GetNext()) {
|
||||
asap_.insert({inst, 1U});
|
||||
if (num_deps_[inst] == 0)
|
||||
waiting.push(inst);
|
||||
num_inst++;
|
||||
}
|
||||
// Scheduling
|
||||
uint32_t cycle = 1;
|
||||
while (num_inst > 0) {
|
||||
if (ready.empty()) {
|
||||
uint32_t nearest = asap_[waiting.top()];
|
||||
// Skipping cycles where we can't schedule any instruction
|
||||
if (nearest > cycle)
|
||||
cycle = nearest;
|
||||
}
|
||||
// Move from 'waiting' to 'ready'
|
||||
while (!waiting.empty()) {
|
||||
Inst* soonest = waiting.top();
|
||||
if (asap_[soonest] <= cycle) {
|
||||
waiting.pop();
|
||||
ready.push(soonest);
|
||||
} else
|
||||
break;
|
||||
}
|
||||
// Extract top 'ready' instruction
|
||||
auto cur = ready.top();
|
||||
ready.pop();
|
||||
// Adjust all dependent instructions
|
||||
for (auto pair : deps_.at(cur)) {
|
||||
// Adjust asap
|
||||
uint32_t asap = asap_[pair.first];
|
||||
asap = std::max(asap, cycle + pair.second);
|
||||
asap_[pair.first] = asap;
|
||||
// Adjust num_deps
|
||||
uint32_t num_deps = num_deps_[pair.first];
|
||||
num_deps--;
|
||||
num_deps_[pair.first] = num_deps;
|
||||
if (num_deps == 0 && pair.first->GetOpcode() != Opcode::LoadPairPart)
|
||||
waiting.push(pair.first);
|
||||
}
|
||||
// Add into schedule
|
||||
sched_.push_back(cur);
|
||||
num_inst--;
|
||||
cycle++;
|
||||
}
|
||||
asap_.clear();
|
||||
return cycle;
|
||||
}
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
IR Before optimization:
|
||||
```
|
||||
BB 0
|
||||
prop: start
|
||||
0.i64 Constant 0x2a -> (v8)
|
||||
1.i64 Constant 0x2b -> (v8)
|
||||
2.i64 Constant 0x2c -> (v9)
|
||||
3.i64 Constant 0x2d -> (v9)
|
||||
4.i64 Constant 0x2e -> (v11)
|
||||
5.i64 Constant 0x2f -> (v11)
|
||||
6.i64 Constant 0x30 -> (v12)
|
||||
7.i64 Constant 0x31 -> (v12)
|
||||
succs: [bb 2]
|
||||
|
||||
BB 2 preds: [bb 0]
|
||||
8.u64 Add v0, v1 -> (v10)
|
||||
9.u64 Add v2, v3 -> (v10)
|
||||
10.u64 Add v8, v9 -> (v14)
|
||||
11.u64 Add v4, v5 -> (v13)
|
||||
12.u64 Add v6, v7 -> (v13)
|
||||
13.u64 Add v11, v12 -> (v14)
|
||||
14.u64 Add v10, v13 -> (v15)
|
||||
15.u64 Return v14
|
||||
succs: [bb 1]
|
||||
|
||||
BB 1 preds: [bb 2]
|
||||
prop: end
|
||||
```
|
||||
|
||||
IR after optimization:
|
||||
```
|
||||
BB 0
|
||||
prop: start
|
||||
0.i64 Constant 0x2a -> (v8)
|
||||
1.i64 Constant 0x2b -> (v8)
|
||||
2.i64 Constant 0x2c -> (v9)
|
||||
3.i64 Constant 0x2d -> (v9)
|
||||
4.i64 Constant 0x2e -> (v11)
|
||||
5.i64 Constant 0x2f -> (v11)
|
||||
6.i64 Constant 0x30 -> (v12)
|
||||
7.i64 Constant 0x31 -> (v12)
|
||||
succs: [bb 2]
|
||||
|
||||
BB 2 preds: [bb 0]
|
||||
8.u64 Add v0, v1 -> (v10)
|
||||
9.u64 Add v2, v3 -> (v10)
|
||||
11.u64 Add v4, v5 -> (v13)
|
||||
12.u64 Add v6, v7 -> (v13)
|
||||
10.u64 Add v8, v9 -> (v14)
|
||||
13.u64 Add v11, v12 -> (v14)
|
||||
14.u64 Add v10, v13 -> (v15)
|
||||
15.u64 Return v14
|
||||
succs: [bb 1]
|
||||
|
||||
BB 1 preds: [bb 2]
|
||||
prop: end
|
||||
```
|
||||
|
||||
Instruction 10 was moved down.
|
||||
|
||||
## Links
|
||||
|
||||
Algorithm: [article](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.211.7673&rep=rep1&type=pdf)
|
||||
|
||||
Source code:
|
||||
[scheduler.cpp](../optimizer/optimizations/scheduler.cpp)
|
||||
[scheduler.h](../optimizer/optimizations/scheduler.h)
|
||||
|
||||
Tests:
|
||||
[scheduler_test.cpp](../tests/scheduler_test.cpp)
|
1134
compiler/intrinsics.yaml
Normal file
1134
compiler/intrinsics.yaml
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,342 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "optimizer/ir/basicblock.h"
|
||||
#include "optimizer/ir/graph.h"
|
||||
#include "compiler_logger.h"
|
||||
#include "optimizer/analysis/alias_analysis.h"
|
||||
|
||||
/**
|
||||
* See "Efficient Field-sensitive pointer analysis for C" by "David J. Pearce
|
||||
* and Paul H. J. Kelly and Chris Hankin
|
||||
*
|
||||
* We treat each IR Inst as a constraint that may be applied to a set of
|
||||
* aliases of some virtual register. Virtual registers are used as constraint
|
||||
* variables as well.
|
||||
*
|
||||
* In order to solve the system of set constraints, the following is done:
|
||||
*
|
||||
* 1. Each constraint variable x has a solution set associated with it, Sol(x).
|
||||
* Implemented through AliasAnalysis::points_to_ that contains mapping of
|
||||
* virtual register to possible aliases.
|
||||
*
|
||||
* 2. Constraints are separated into direct, copy.
|
||||
*
|
||||
* - Direct constraints are constraints that require no extra processing, such
|
||||
* as P = &Q.
|
||||
*
|
||||
* - Copy constraints are those of the form P = Q. Such semantic can be
|
||||
* obtained through NullCheck, Mov, and Phi instructions.
|
||||
*
|
||||
* 3. All direct constraints of the form P = &Q are processed, such that Q is
|
||||
* added to Sol(P).
|
||||
*
|
||||
* 4. A directed graph is built out of the copy constraints. Each constraint
|
||||
* variable is a node in the graph, and an edge from Q to P is added for each
|
||||
* copy constraint of the form P = Q.
|
||||
*
|
||||
* 5. The graph is then walked, and solution sets are propagated along the copy
|
||||
* edges, such that an edge from Q to P causes Sol(P) <- Sol(P) union Sol(Q).
|
||||
*
|
||||
* 6. The process of walking the graph is iterated until no solution sets
|
||||
* change.
|
||||
*
|
||||
* To add new instructions to alias analysis please consider following:
|
||||
* - AliasAnalysis class: to add a visitor for a new instruction that should be analyzed
|
||||
*
|
||||
* TODO(Evgenii Kudriashov): Prior to walking the graph in steps 5 and 6, We
|
||||
* need to perform static cycle elimination on the constraint graph, as well as
|
||||
* off-line variable substitution.
|
||||
*
|
||||
* TODO(Evgenii Kudriashov): To add flow-sensitivity the "Flow-sensitive
|
||||
* pointer analysis for millions of lines of code" by Ben Hardekopf and Calvin
|
||||
* Lin may be considered.
|
||||
*
|
||||
* TODO(Evgenii Kudriashov): After implementing VRP and SCEV the "Loop-Oriented
|
||||
* Array- and Field-Sensitive Pointer Analysis for Automatic SIMD
|
||||
* Vectorization" by Yulei Sui, Xiaokang Fan, Hao Zhou, and Jingling Xue may be
|
||||
* considered to add advanced analysis of array indices.
|
||||
*/
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
AliasAnalysis::AliasAnalysis(Graph *graph) : Analysis(graph), points_to_(graph->GetAllocator()->Adapter()) {}
|
||||
|
||||
const ArenaVector<BasicBlock *> &AliasAnalysis::GetBlocksToVisit() const
|
||||
{
|
||||
return GetGraph()->GetBlocksRPO();
|
||||
}
|
||||
|
||||
bool AliasAnalysis::RunImpl()
|
||||
{
|
||||
Init();
|
||||
|
||||
VisitGraph();
|
||||
|
||||
// Initialize solution sets
|
||||
for (auto pair : *direct_) {
|
||||
auto it = points_to_.try_emplace(pair.first, GetGraph()->GetAllocator()->Adapter());
|
||||
ASSERT(pair.first.GetBase() == nullptr || pair.first.GetBase()->GetOpcode() != Opcode::NullCheck);
|
||||
ASSERT(pair.second.GetBase() == nullptr || pair.second.GetBase()->GetOpcode() != Opcode::NullCheck);
|
||||
it.first->second.insert(pair.second);
|
||||
}
|
||||
|
||||
SolveConstraints();
|
||||
|
||||
#ifndef NDEBUG
|
||||
if (CompilerLogger::IsComponentEnabled(CompilerLoggerComponents::ALIAS_ANALYSIS)) {
|
||||
std::ostringstream out;
|
||||
DumpChains(&out);
|
||||
Dump(&out);
|
||||
COMPILER_LOG(DEBUG, ALIAS_ANALYSIS) << out.str();
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
void AliasAnalysis::Init()
|
||||
{
|
||||
auto allocator = GetGraph()->GetLocalAllocator();
|
||||
chains_ = allocator->New<PointerMap<ArenaVector<Pointer>>>(allocator->Adapter());
|
||||
direct_ = allocator->New<PointerPairVector>(allocator->Adapter());
|
||||
inputs_set_ = allocator->New<ArenaSet<Inst *>>(allocator->Adapter());
|
||||
ASSERT(chains_ != nullptr);
|
||||
ASSERT(direct_ != nullptr);
|
||||
ASSERT(inputs_set_ != nullptr);
|
||||
points_to_.clear();
|
||||
}
|
||||
|
||||
void Pointer::Dump(std::ostream *out) const
|
||||
{
|
||||
switch (type_) {
|
||||
case OBJECT:
|
||||
(*out) << "v" << base_->GetId();
|
||||
break;
|
||||
case STATIC_FIELD:
|
||||
(*out) << "SF #" << imm_;
|
||||
break;
|
||||
case POOL_CONSTANT:
|
||||
(*out) << "PC #" << imm_;
|
||||
break;
|
||||
case OBJECT_FIELD:
|
||||
(*out) << "v" << base_->GetId() << " #" << imm_;
|
||||
break;
|
||||
case ARRAY_ELEMENT:
|
||||
(*out) << "v" << base_->GetId() << "[";
|
||||
if (idx_ != nullptr) {
|
||||
(*out) << "v" << idx_->GetId();
|
||||
if (imm_ != 0) {
|
||||
(*out) << "+" << imm_;
|
||||
}
|
||||
} else {
|
||||
(*out) << imm_;
|
||||
}
|
||||
(*out) << "]";
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
if (local_) {
|
||||
(*out) << "(local)";
|
||||
}
|
||||
if (volatile_) {
|
||||
(*out) << "(v)";
|
||||
}
|
||||
}
|
||||
|
||||
static bool PointerLess(const Pointer &lhs, const Pointer &rhs)
|
||||
{
|
||||
if (lhs.GetBase() == rhs.GetBase()) {
|
||||
return lhs.GetImm() < rhs.GetImm();
|
||||
}
|
||||
if (lhs.GetBase() == nullptr) {
|
||||
return true;
|
||||
}
|
||||
if (rhs.GetBase() == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return lhs.GetBase()->GetId() < rhs.GetBase()->GetId();
|
||||
}
|
||||
|
||||
void AliasAnalysis::DumpChains(std::ostream *out) const
|
||||
{
|
||||
ArenaVector<Pointer> sorted_keys(GetGraph()->GetLocalAllocator()->Adapter());
|
||||
for (auto &pair : *chains_) {
|
||||
sorted_keys.push_back(pair.first);
|
||||
}
|
||||
std::sort(sorted_keys.begin(), sorted_keys.end(), PointerLess);
|
||||
|
||||
(*out) << "The chains are the following:" << std::endl;
|
||||
for (auto &p : sorted_keys) {
|
||||
(*out) << "\t";
|
||||
p.Dump(out);
|
||||
(*out) << ": {";
|
||||
|
||||
// Sort by instruction ID to add more readability to logs
|
||||
ArenaVector<Pointer> sorted(chains_->at(p), GetGraph()->GetLocalAllocator()->Adapter());
|
||||
std::sort(sorted.begin(), sorted.end(), PointerLess);
|
||||
auto edge = sorted.begin();
|
||||
if (edge != sorted.end()) {
|
||||
edge->Dump(out);
|
||||
while (++edge != sorted.end()) {
|
||||
(*out) << ", ";
|
||||
edge->Dump(out);
|
||||
}
|
||||
}
|
||||
(*out) << "}" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
void AliasAnalysis::Dump(std::ostream *out) const
|
||||
{
|
||||
ArenaVector<Pointer> sorted_keys(GetGraph()->GetLocalAllocator()->Adapter());
|
||||
for (auto &pair : points_to_) {
|
||||
sorted_keys.push_back(pair.first);
|
||||
}
|
||||
std::sort(sorted_keys.begin(), sorted_keys.end(), PointerLess);
|
||||
|
||||
(*out) << "The solution set is the following:" << std::endl;
|
||||
for (auto &p : sorted_keys) {
|
||||
(*out) << "\t";
|
||||
p.Dump(out);
|
||||
(*out) << ": {";
|
||||
|
||||
// Sort by instruction ID to add more readability to logs
|
||||
auto values = points_to_.at(p);
|
||||
ArenaVector<Pointer> sorted(values.begin(), values.end(), GetGraph()->GetLocalAllocator()->Adapter());
|
||||
std::sort(sorted.begin(), sorted.end(), PointerLess);
|
||||
auto iter = sorted.begin();
|
||||
if (iter != sorted.end()) {
|
||||
iter->Dump(out);
|
||||
while (++iter != sorted.end()) {
|
||||
(*out) << ", ";
|
||||
iter->Dump(out);
|
||||
}
|
||||
}
|
||||
(*out) << "}" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
AliasType AliasAnalysis::CheckInstAlias(Inst *mem1, Inst *mem2) const
|
||||
{
|
||||
return MAY_ALIAS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Here we propagate solutions obtained from direct constraints through copy
|
||||
* constraints e.g: we have a node A with solution {a} and the node A was
|
||||
* copied to B and C (this->chains_ maintains these links), and C was copied to
|
||||
* D.
|
||||
*
|
||||
* A{a} -> B
|
||||
* \-> C -> D
|
||||
*
|
||||
* After first iteration (iterating A node) we will obtain
|
||||
*
|
||||
* A{a} -> B{a}
|
||||
* \-> C{a} -> D
|
||||
*
|
||||
* After second iteration (iterating B node) nothing changes
|
||||
*
|
||||
* After third iteration (iterating C node):
|
||||
*
|
||||
* A{a} -> B{a}
|
||||
* \-> C{a} -> D{a}
|
||||
*
|
||||
* For complex nodes (OBJECT_FIELD, ARRAY_ELEMENT) we create auxiliary nodes e.g.
|
||||
* if a field F was accessed from object A then we have two nodes:
|
||||
*
|
||||
* A{a} -> A.F
|
||||
*
|
||||
* And solutions from A would be propagated as following:
|
||||
*
|
||||
* A{a} -> A.F{a.F}
|
||||
*
|
||||
* The function works using worklist to process only updated nodes.
|
||||
*/
|
||||
void AliasAnalysis::SolveConstraints()
|
||||
{
|
||||
ArenaQueue<Pointer> worklist(GetGraph()->GetLocalAllocator()->Adapter());
|
||||
for (auto &pair : *direct_) {
|
||||
if (chains_->find(pair.first) != chains_->end()) {
|
||||
worklist.push(pair.first);
|
||||
}
|
||||
}
|
||||
|
||||
while (!worklist.empty()) {
|
||||
Pointer &ref = worklist.front();
|
||||
ASSERT(ref.GetBase() == nullptr || ref.GetBase()->GetOpcode() != Opcode::NullCheck);
|
||||
for (auto &edge : chains_->at(ref)) {
|
||||
// POOL_CONSTANT cannot be assignee
|
||||
ASSERT(edge.GetType() != POOL_CONSTANT);
|
||||
auto &sols = points_to_.try_emplace(edge, GetGraph()->GetAllocator()->Adapter()).first->second;
|
||||
bool added = false;
|
||||
for (auto &alias : points_to_.at(ref)) {
|
||||
ASSERT(alias.GetBase() == nullptr || alias.GetBase()->GetOpcode() != Opcode::NullCheck);
|
||||
if (edge.GetType() == OBJECT_FIELD && ref.GetBase() == edge.GetBase()) {
|
||||
// Propagating from object to fields: A{a} -> A.F{a.f}
|
||||
if (alias.GetType() == OBJECT) {
|
||||
Pointer p = Pointer::CreateObjectField(alias.GetBase(), edge.GetImm(), edge.GetTypePtr());
|
||||
p.SetLocalVolatile(alias.IsLocal(), edge.IsVolatile());
|
||||
|
||||
added |= sols.insert(p).second;
|
||||
continue;
|
||||
}
|
||||
// In case A{a.g} -> A.F we propagate symbolic name: A{a.g} -> A.F{A.F}
|
||||
Pointer p = Pointer::CreateObjectField(ref.GetBase(), edge.GetImm(), edge.GetTypePtr());
|
||||
p.SetLocalVolatile(alias.IsLocal(), edge.IsVolatile());
|
||||
|
||||
added |= sols.insert(p).second;
|
||||
continue;
|
||||
}
|
||||
if (edge.GetType() == ARRAY_ELEMENT && ref.GetBase() == edge.GetBase()) {
|
||||
// Propagating from object to elements: A{a} -> A[i]{a[i]}
|
||||
if (alias.GetType() == OBJECT) {
|
||||
Pointer p = Pointer::CreateArrayElement(alias.GetBase(), edge.GetIdx(), edge.GetImm());
|
||||
p.SetLocalVolatile(alias.IsLocal(), edge.IsVolatile());
|
||||
|
||||
added |= sols.insert(p).second;
|
||||
continue;
|
||||
}
|
||||
// In case A{a[j]} -> A[i] we propagate symbolic name: A{a[j]} -> A[i]{A[i]}
|
||||
Pointer p = Pointer::CreateArrayElement(ref.GetBase(), edge.GetIdx(), edge.GetImm());
|
||||
p.SetLocalVolatile(alias.IsLocal(), edge.IsVolatile());
|
||||
|
||||
added |= sols.insert(p).second;
|
||||
continue;
|
||||
}
|
||||
added |= sols.insert(alias).second;
|
||||
}
|
||||
if (added && chains_->find(edge) != chains_->end()) {
|
||||
worklist.push(edge);
|
||||
}
|
||||
ASSERT(!sols.empty());
|
||||
}
|
||||
worklist.pop();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Instructions that introduce aliases.
|
||||
*/
|
||||
|
||||
void AliasAnalysis::VisitCastAnyTypeValue(GraphVisitor *v, Inst *inst)
|
||||
{
|
||||
if (inst->GetType() == DataType::REFERENCE) {
|
||||
static_cast<AliasAnalysis *>(v)->AddDirectEdge(Pointer::CreateObject(inst));
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace panda::compiler
|
@ -1,235 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_OPTIMIZER_ANALYSIS_ALIAS_ANALYSIS_H_
|
||||
#define COMPILER_OPTIMIZER_ANALYSIS_ALIAS_ANALYSIS_H_
|
||||
|
||||
#include <unordered_map>
|
||||
#include "optimizer/ir/graph_visitor.h"
|
||||
#include "optimizer/pass.h"
|
||||
#include "utils/arena_containers.h"
|
||||
#include "utils/hash.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
class BasicBlock;
|
||||
class Graph;
|
||||
|
||||
enum AliasType : uint8_t {
|
||||
// Proved that references are not aliases
|
||||
NO_ALIAS,
|
||||
// References may or may not alias each other (cannot be proven statically)
|
||||
MAY_ALIAS,
|
||||
// References are proven aliases
|
||||
MUST_ALIAS
|
||||
};
|
||||
|
||||
enum PointerType {
|
||||
// Reference to unknown object.
|
||||
// Valid fields: base
|
||||
OBJECT,
|
||||
// Constant from pool
|
||||
// Valid fields: imm
|
||||
POOL_CONSTANT,
|
||||
// Object's field
|
||||
// Valid fields: base, imm, type_ptr
|
||||
OBJECT_FIELD,
|
||||
// Static field of the object
|
||||
// Valid fields: imm, type_ptr
|
||||
STATIC_FIELD,
|
||||
// Array pointer
|
||||
// Valid fields: base, idx
|
||||
ARRAY_ELEMENT
|
||||
};
|
||||
|
||||
class Pointer {
|
||||
public:
|
||||
Pointer() = default;
|
||||
Pointer(PointerType type, const Inst *base, const Inst *idx, uint64_t imm, const void *type_ptr)
|
||||
: type_(type), base_(base), idx_(idx), imm_(imm), type_ptr_(type_ptr), volatile_(false)
|
||||
{
|
||||
local_ = false;
|
||||
};
|
||||
|
||||
static Pointer CreateObject(const Inst *base)
|
||||
{
|
||||
return Pointer(OBJECT, base, nullptr, 0, nullptr);
|
||||
}
|
||||
|
||||
static Pointer CreateObjectField(const Inst *base, uint32_t type_id, const void *type_ptr = nullptr)
|
||||
{
|
||||
return Pointer(OBJECT_FIELD, base, nullptr, type_id, type_ptr);
|
||||
}
|
||||
|
||||
static Pointer CreateArrayElement(const Inst *array, const Inst *idx, uint64_t imm = 0)
|
||||
{
|
||||
return Pointer(ARRAY_ELEMENT, array, idx, imm, nullptr);
|
||||
}
|
||||
|
||||
PointerType GetType() const
|
||||
{
|
||||
return type_;
|
||||
}
|
||||
|
||||
const Inst *GetBase() const
|
||||
{
|
||||
return base_;
|
||||
}
|
||||
|
||||
const Inst *GetIdx() const
|
||||
{
|
||||
return idx_;
|
||||
}
|
||||
|
||||
uint64_t GetImm() const
|
||||
{
|
||||
return imm_;
|
||||
}
|
||||
|
||||
const void *GetTypePtr() const
|
||||
{
|
||||
return type_ptr_;
|
||||
}
|
||||
|
||||
bool IsLocal() const
|
||||
{
|
||||
return local_;
|
||||
}
|
||||
|
||||
void SetLocalVolatile(bool local, bool is_volatile)
|
||||
{
|
||||
local_ = local;
|
||||
volatile_ = is_volatile;
|
||||
}
|
||||
|
||||
bool IsVolatile() const
|
||||
{
|
||||
return volatile_;
|
||||
}
|
||||
|
||||
void Dump(std::ostream *out) const;
|
||||
|
||||
bool HasSameOffset(const Pointer &p) const
|
||||
{
|
||||
if (type_ptr_ == nullptr && p.type_ptr_ == nullptr) {
|
||||
return imm_ == p.imm_;
|
||||
}
|
||||
return type_ptr_ == p.type_ptr_;
|
||||
}
|
||||
|
||||
private:
|
||||
PointerType type_;
|
||||
const Inst *base_;
|
||||
const Inst *idx_;
|
||||
uint64_t imm_;
|
||||
const void *type_ptr_;
|
||||
bool local_;
|
||||
bool volatile_;
|
||||
};
|
||||
|
||||
struct PointerEqual {
|
||||
bool operator()(Pointer const &p1, Pointer const &p2) const
|
||||
{
|
||||
return p1.GetType() == p2.GetType() && p1.GetBase() == p2.GetBase() && p1.GetIdx() == p2.GetIdx() &&
|
||||
p1.HasSameOffset(p2);
|
||||
}
|
||||
};
|
||||
|
||||
struct PointerHash {
|
||||
uint32_t operator()(Pointer const &p) const
|
||||
{
|
||||
auto inst_hasher = std::hash<const Inst *> {};
|
||||
uint32_t hash = inst_hasher(p.GetBase());
|
||||
hash += inst_hasher(p.GetIdx());
|
||||
if (p.GetTypePtr() == nullptr) {
|
||||
hash += std::hash<uint64_t> {}(p.GetImm());
|
||||
} else {
|
||||
hash += std::hash<const void *> {}(p.GetTypePtr());
|
||||
}
|
||||
return hash;
|
||||
}
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(fuchsia-multiple-inheritance)
|
||||
class AliasAnalysis : public Analysis, public GraphVisitor {
|
||||
public:
|
||||
enum class Trilean {
|
||||
TRUE,
|
||||
UNKNOWN,
|
||||
FALSE,
|
||||
};
|
||||
|
||||
using PointerPairVector = ArenaVector<std::pair<Pointer, Pointer>>;
|
||||
|
||||
explicit AliasAnalysis(Graph *graph);
|
||||
NO_MOVE_SEMANTIC(AliasAnalysis);
|
||||
NO_COPY_SEMANTIC(AliasAnalysis);
|
||||
~AliasAnalysis() override = default;
|
||||
|
||||
bool RunImpl() override;
|
||||
|
||||
const char *GetPassName() const override
|
||||
{
|
||||
return "AliasAnalysis";
|
||||
}
|
||||
|
||||
AliasType CheckInstAlias(Inst *mem1, Inst *mem2) const;
|
||||
void Dump(std::ostream *out) const;
|
||||
|
||||
/**
|
||||
* Sort IR instructions into two constraint groups:
|
||||
* Direct: introduce the alias
|
||||
* Copy: copy one alias to another
|
||||
*/
|
||||
const ArenaVector<BasicBlock *> &GetBlocksToVisit() const override;
|
||||
|
||||
/**
|
||||
* Instructions that introduce aliases.
|
||||
*/
|
||||
static void VisitCastAnyTypeValue(GraphVisitor *v, Inst *inst);
|
||||
|
||||
void AddDirectEdge(const Pointer &p)
|
||||
{
|
||||
direct_->push_back({p, p});
|
||||
}
|
||||
|
||||
ArenaSet<Inst *> *GetClearInputsSet()
|
||||
{
|
||||
inputs_set_->clear();
|
||||
return inputs_set_;
|
||||
}
|
||||
|
||||
#include "optimizer/ir/visitor.inc"
|
||||
|
||||
private:
|
||||
void Init();
|
||||
using PointerSet = ArenaUnorderedSet<Pointer, PointerHash, PointerEqual>;
|
||||
template <class T>
|
||||
using PointerMap = ArenaUnorderedMap<Pointer, T, PointerHash, PointerEqual>;
|
||||
|
||||
void SolveConstraints();
|
||||
|
||||
void DumpChains(std::ostream *out) const;
|
||||
|
||||
private:
|
||||
PointerMap<PointerSet> points_to_;
|
||||
|
||||
// Local containers:
|
||||
PointerMap<ArenaVector<Pointer>> *chains_ {nullptr};
|
||||
PointerPairVector *direct_ {nullptr};
|
||||
ArenaSet<Inst *> *inputs_set_ {nullptr};
|
||||
};
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_OPTIMIZER_ANALYSIS_ALIAS_ANALYSIS_H_
|
@ -1,537 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "bounds_analysis.h"
|
||||
#include "dominators_tree.h"
|
||||
#include "optimizer/ir/graph.h"
|
||||
#include "optimizer/ir/graph_visitor.h"
|
||||
#include "optimizer/ir/basicblock.h"
|
||||
#include "optimizer/ir/inst.h"
|
||||
#include "compiler/optimizer/ir/analysis.h"
|
||||
#include "optimizer/analysis/loop_analyzer.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
BoundsRange::BoundsRange(int64_t val, DataType::Type type) : BoundsRange(val, val, nullptr, type) {}
|
||||
|
||||
BoundsRange::BoundsRange(int64_t left, int64_t right, const Inst *inst, [[maybe_unused]] DataType::Type type)
|
||||
: left_(left), right_(right), len_array_(inst)
|
||||
{
|
||||
ASSERT(inst == nullptr);
|
||||
ASSERT(left <= right);
|
||||
ASSERT(GetMin(type) <= left);
|
||||
ASSERT(right <= GetMax(type));
|
||||
}
|
||||
|
||||
int64_t BoundsRange::GetLeft() const
|
||||
{
|
||||
return left_;
|
||||
}
|
||||
|
||||
int64_t BoundsRange::GetRight() const
|
||||
{
|
||||
return right_;
|
||||
}
|
||||
|
||||
bool BoundsRange::IsConst() const
|
||||
{
|
||||
return left_ == right_;
|
||||
}
|
||||
|
||||
bool BoundsRange::IsMaxRange(DataType::Type type) const
|
||||
{
|
||||
return left_ <= GetMin(type) && right_ >= GetMax(type);
|
||||
}
|
||||
|
||||
bool BoundsRange::IsEqual(const BoundsRange &range) const
|
||||
{
|
||||
return left_ == range.GetLeft() && right_ == range.GetRight();
|
||||
}
|
||||
|
||||
bool BoundsRange::IsLess(const BoundsRange &range) const
|
||||
{
|
||||
return right_ < range.GetLeft();
|
||||
}
|
||||
|
||||
bool BoundsRange::IsLess(const Inst *inst) const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool BoundsRange::IsMore(const BoundsRange &range) const
|
||||
{
|
||||
return left_ > range.GetRight();
|
||||
}
|
||||
|
||||
bool BoundsRange::IsMoreOrEqual(const BoundsRange &range) const
|
||||
{
|
||||
return left_ >= range.GetRight();
|
||||
}
|
||||
|
||||
bool BoundsRange::IsNotNegative() const
|
||||
{
|
||||
return left_ >= 0;
|
||||
}
|
||||
|
||||
bool BoundsRange::IsNegative() const
|
||||
{
|
||||
return right_ < 0;
|
||||
}
|
||||
/**
|
||||
* Return the minimal value for a type.
|
||||
*
|
||||
* We consider that REFERENCE type has only non-negative address values
|
||||
*/
|
||||
int64_t BoundsRange::GetMin(DataType::Type type)
|
||||
{
|
||||
ASSERT(!IsFloatType(type));
|
||||
switch (type) {
|
||||
case DataType::BOOL:
|
||||
case DataType::UINT8:
|
||||
case DataType::UINT16:
|
||||
case DataType::UINT32:
|
||||
case DataType::UINT64:
|
||||
case DataType::REFERENCE:
|
||||
return 0;
|
||||
case DataType::INT8:
|
||||
return INT8_MIN;
|
||||
case DataType::INT16:
|
||||
return INT16_MIN;
|
||||
case DataType::INT32:
|
||||
return INT32_MIN;
|
||||
case DataType::INT64:
|
||||
return INT64_MIN;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the maximal value for a type.
|
||||
*
|
||||
* For REFERENCE we are interested in whether it is NULL or not. Set the
|
||||
* maximum to INT64_MAX regardless the real architecture bitness.
|
||||
*/
|
||||
int64_t BoundsRange::GetMax(DataType::Type type)
|
||||
{
|
||||
ASSERT(!IsFloatType(type));
|
||||
ASSERT(type != DataType::UINT64);
|
||||
switch (type) {
|
||||
case DataType::BOOL:
|
||||
return 1;
|
||||
case DataType::UINT8:
|
||||
return UINT8_MAX;
|
||||
case DataType::UINT16:
|
||||
return UINT16_MAX;
|
||||
case DataType::UINT32:
|
||||
return UINT32_MAX;
|
||||
case DataType::INT8:
|
||||
return INT8_MAX;
|
||||
case DataType::INT16:
|
||||
return INT16_MAX;
|
||||
case DataType::INT32:
|
||||
return INT32_MAX;
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone)
|
||||
case DataType::INT64:
|
||||
return INT64_MAX;
|
||||
case DataType::REFERENCE:
|
||||
return INT64_MAX;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
BoundsRange BoundsRange::FitInType(DataType::Type type) const
|
||||
{
|
||||
auto type_min = BoundsRange::GetMin(type);
|
||||
auto type_max = BoundsRange::GetMax(type);
|
||||
if (left_ < type_min || left_ > type_max || right_ < type_min || right_ > type_max) {
|
||||
return BoundsRange(type_min, type_max);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
BoundsRange BoundsRange::Union(const ArenaVector<BoundsRange> &ranges)
|
||||
{
|
||||
int64_t min = MAX_RANGE_VALUE;
|
||||
int64_t max = MIN_RANGE_VALUE;
|
||||
for (const auto &range : ranges) {
|
||||
if (range.GetLeft() < min) {
|
||||
min = range.GetLeft();
|
||||
}
|
||||
if (range.GetRight() > max) {
|
||||
max = range.GetRight();
|
||||
}
|
||||
}
|
||||
return BoundsRange(min, max);
|
||||
}
|
||||
|
||||
BoundsRange::RangePair BoundsRange::NarrowBoundsByNE(BoundsRange::RangePair const &ranges)
|
||||
{
|
||||
auto &[left_range, right_range] = ranges;
|
||||
int64_t ll = left_range.GetLeft();
|
||||
int64_t lr = left_range.GetRight();
|
||||
int64_t rl = right_range.GetLeft();
|
||||
int64_t rr = right_range.GetRight();
|
||||
// We can narrow bounds of a range if another is a constant and matches one of the bounds
|
||||
// Mostly needed for a reference comparison with null
|
||||
if (left_range.IsConst() && !right_range.IsConst()) {
|
||||
if (ll == rl) {
|
||||
return {left_range, BoundsRange(rl + 1, rr)};
|
||||
}
|
||||
if (ll == rr) {
|
||||
return {left_range, BoundsRange(rl, rr - 1)};
|
||||
}
|
||||
}
|
||||
if (!left_range.IsConst() && right_range.IsConst()) {
|
||||
if (rl == ll) {
|
||||
return {BoundsRange(ll + 1, lr), right_range};
|
||||
}
|
||||
if (rl == lr) {
|
||||
return {BoundsRange(ll, lr - 1), right_range};
|
||||
}
|
||||
}
|
||||
return ranges;
|
||||
}
|
||||
|
||||
BoundsRange::RangePair BoundsRange::NarrowBoundsCase1(ConditionCode cc, BoundsRange::RangePair const &ranges)
|
||||
{
|
||||
auto &[left_range, right_range] = ranges;
|
||||
int64_t lr = left_range.GetRight();
|
||||
int64_t rl = right_range.GetLeft();
|
||||
if (cc == ConditionCode::CC_GT || cc == ConditionCode::CC_A) {
|
||||
// With equal rl and lr left_range cannot be greater than right_range
|
||||
if (rl == lr) {
|
||||
return {BoundsRange(), BoundsRange()};
|
||||
}
|
||||
return {BoundsRange(rl + 1, lr), BoundsRange(rl, lr - 1)};
|
||||
}
|
||||
if (cc == ConditionCode::CC_GE || cc == ConditionCode::CC_AE || cc == ConditionCode::CC_EQ) {
|
||||
return {BoundsRange(rl, lr), BoundsRange(rl, lr)};
|
||||
}
|
||||
return ranges;
|
||||
}
|
||||
|
||||
BoundsRange::RangePair BoundsRange::NarrowBoundsCase2(ConditionCode cc, BoundsRange::RangePair const &ranges)
|
||||
{
|
||||
if (cc == ConditionCode::CC_GT || cc == ConditionCode::CC_GE || cc == ConditionCode::CC_EQ ||
|
||||
cc == ConditionCode::CC_A || cc == ConditionCode::CC_AE) {
|
||||
return {BoundsRange(), BoundsRange()};
|
||||
}
|
||||
return ranges;
|
||||
}
|
||||
|
||||
BoundsRange::RangePair BoundsRange::NarrowBoundsCase3(ConditionCode cc, BoundsRange::RangePair const &ranges)
|
||||
{
|
||||
auto &[left_range, right_range] = ranges;
|
||||
int64_t ll = left_range.GetLeft();
|
||||
int64_t lr = left_range.GetRight();
|
||||
int64_t rl = right_range.GetLeft();
|
||||
int64_t rr = right_range.GetRight();
|
||||
if (cc == ConditionCode::CC_GT || cc == ConditionCode::CC_A) {
|
||||
// rl == lr handled in case 1
|
||||
return {BoundsRange(rl + 1, lr), right_range};
|
||||
}
|
||||
if (cc == ConditionCode::CC_GE || cc == ConditionCode::CC_AE) {
|
||||
return {BoundsRange(rl, lr), right_range};
|
||||
}
|
||||
if (cc == ConditionCode::CC_LT || cc == ConditionCode::CC_B) {
|
||||
// With equal ll and rr left_range cannot be less than right_range
|
||||
if (ll == rr) {
|
||||
return {BoundsRange(), BoundsRange()};
|
||||
}
|
||||
return {BoundsRange(ll, rr - 1), right_range};
|
||||
}
|
||||
if (cc == ConditionCode::CC_LE || cc == ConditionCode::CC_BE) {
|
||||
return {BoundsRange(ll, rr), right_range};
|
||||
}
|
||||
if (cc == ConditionCode::CC_EQ) {
|
||||
return {BoundsRange(rl, rr), right_range};
|
||||
}
|
||||
return ranges;
|
||||
}
|
||||
|
||||
BoundsRange::RangePair BoundsRange::NarrowBoundsCase4(ConditionCode cc, BoundsRange::RangePair const &ranges)
|
||||
{
|
||||
auto &[left_range, right_range] = ranges;
|
||||
int64_t ll = left_range.GetLeft();
|
||||
int64_t rr = right_range.GetRight();
|
||||
if (cc == ConditionCode::CC_LT || cc == ConditionCode::CC_B) {
|
||||
// With equal ll and rr left_range cannot be less than right_range
|
||||
if (ll == rr) {
|
||||
return {BoundsRange(), BoundsRange()};
|
||||
}
|
||||
return {BoundsRange(ll, rr - 1), BoundsRange(ll + 1, rr)};
|
||||
}
|
||||
if (cc == ConditionCode::CC_LE || cc == ConditionCode::CC_BE || cc == ConditionCode::CC_EQ) {
|
||||
return {BoundsRange(ll, rr), BoundsRange(ll, rr)};
|
||||
}
|
||||
return ranges;
|
||||
}
|
||||
|
||||
BoundsRange::RangePair BoundsRange::NarrowBoundsCase5(ConditionCode cc, BoundsRange::RangePair const &ranges)
|
||||
{
|
||||
if (cc == ConditionCode::CC_LT || cc == ConditionCode::CC_LE || cc == ConditionCode::CC_EQ ||
|
||||
cc == ConditionCode::CC_B || cc == ConditionCode::CC_BE) {
|
||||
return {BoundsRange(), BoundsRange()};
|
||||
}
|
||||
return ranges;
|
||||
}
|
||||
|
||||
BoundsRange::RangePair BoundsRange::NarrowBoundsCase6(ConditionCode cc, BoundsRange::RangePair const &ranges)
|
||||
{
|
||||
auto &[left_range, right_range] = ranges;
|
||||
int64_t ll = left_range.GetLeft();
|
||||
int64_t lr = left_range.GetRight();
|
||||
int64_t rl = right_range.GetLeft();
|
||||
int64_t rr = right_range.GetRight();
|
||||
if (cc == ConditionCode::CC_GT || cc == ConditionCode::CC_A) {
|
||||
// rl == lr handled in case 1
|
||||
return {left_range, BoundsRange(rl, lr - 1)};
|
||||
}
|
||||
if (cc == ConditionCode::CC_GE || cc == ConditionCode::CC_AE) {
|
||||
return {left_range, BoundsRange(rl, lr)};
|
||||
}
|
||||
if (cc == ConditionCode::CC_LT || cc == ConditionCode::CC_B) {
|
||||
// ll == rr handled in case 4
|
||||
return {left_range, BoundsRange(ll + 1, rr)};
|
||||
}
|
||||
if (cc == ConditionCode::CC_LE || cc == ConditionCode::CC_BE) {
|
||||
return {left_range, BoundsRange(ll, rr)};
|
||||
}
|
||||
if (cc == ConditionCode::CC_EQ) {
|
||||
return {left_range, BoundsRange(ll, lr)};
|
||||
}
|
||||
return ranges;
|
||||
}
|
||||
|
||||
/**
|
||||
* Try narrow bounds range for <if (left CC right)> situation
|
||||
* Return a pair of narrowed left and right intervals
|
||||
*/
|
||||
BoundsRange::RangePair BoundsRange::TryNarrowBoundsByCC(ConditionCode cc, BoundsRange::RangePair const &ranges)
|
||||
{
|
||||
if (cc == ConditionCode::CC_NE) {
|
||||
return NarrowBoundsByNE(ranges);
|
||||
}
|
||||
auto &[left_range, right_range] = ranges;
|
||||
int64_t ll = left_range.GetLeft();
|
||||
int64_t lr = left_range.GetRight();
|
||||
int64_t rl = right_range.GetLeft();
|
||||
int64_t rr = right_range.GetRight();
|
||||
// For further description () is for left_range bounds and [] is for right_range bounds
|
||||
// case 1: ( [ ) ]
|
||||
if (ll <= rl && rl <= lr && lr <= rr) {
|
||||
return NarrowBoundsCase1(cc, ranges);
|
||||
}
|
||||
// case 2: ( ) [ ]
|
||||
if (ll <= lr && lr < rl && rl <= rr) {
|
||||
return NarrowBoundsCase2(cc, ranges);
|
||||
}
|
||||
// case 3: ( [ ] )
|
||||
if (ll <= rl && rl <= rr && rr <= lr) {
|
||||
return NarrowBoundsCase3(cc, ranges);
|
||||
}
|
||||
// case 4: [ ( ] )
|
||||
if (rl <= ll && ll <= rr && rr <= lr) {
|
||||
return NarrowBoundsCase4(cc, ranges);
|
||||
}
|
||||
// case 5: [ ] ( )
|
||||
if (rl <= rr && rr < ll && ll <= lr) {
|
||||
return NarrowBoundsCase5(cc, ranges);
|
||||
}
|
||||
// case 6: [ ( ) ]
|
||||
if (rl <= ll && ll <= lr && lr <= rr) {
|
||||
return NarrowBoundsCase6(cc, ranges);
|
||||
}
|
||||
return ranges;
|
||||
}
|
||||
|
||||
BoundsRange BoundsRangeInfo::FindBoundsRange(const BasicBlock *block, Inst *inst) const
|
||||
{
|
||||
ASSERT(block != nullptr && inst != nullptr);
|
||||
ASSERT(!IsFloatType(inst->GetType()));
|
||||
ASSERT(inst->GetType() == DataType::REFERENCE || DataType::GetCommonType(inst->GetType()) == DataType::INT64);
|
||||
if (inst->GetOpcode() == Opcode::NullPtr) {
|
||||
ASSERT(inst->GetType() == DataType::REFERENCE);
|
||||
return BoundsRange(0);
|
||||
}
|
||||
if (IsInstNotNull(inst)) {
|
||||
ASSERT(inst->GetType() == DataType::REFERENCE);
|
||||
return BoundsRange(1, BoundsRange::GetMax(DataType::REFERENCE));
|
||||
}
|
||||
while (block != nullptr) {
|
||||
if (bounds_range_info_.find(block) != bounds_range_info_.end() &&
|
||||
bounds_range_info_.at(block).find(inst) != bounds_range_info_.at(block).end()) {
|
||||
return bounds_range_info_.at(block).at(inst);
|
||||
}
|
||||
block = block->GetDominator();
|
||||
}
|
||||
if (inst->IsConst()) {
|
||||
ASSERT(inst->GetType() == DataType::INT64);
|
||||
auto val = static_cast<int64_t>(inst->CastToConstant()->GetIntValue());
|
||||
return BoundsRange(val);
|
||||
}
|
||||
// if we know nothing about inst return the complete range of type
|
||||
return BoundsRange(inst->GetType());
|
||||
}
|
||||
|
||||
void BoundsRangeInfo::SetBoundsRange(const BasicBlock *block, const Inst *inst, BoundsRange range)
|
||||
{
|
||||
if (inst->IsConst() && range.GetLenArray() == nullptr) {
|
||||
return;
|
||||
}
|
||||
if (inst->IsConst()) {
|
||||
auto val = static_cast<int64_t>(static_cast<const ConstantInst *>(inst)->GetIntValue());
|
||||
range = BoundsRange(val, val, range.GetLenArray());
|
||||
}
|
||||
ASSERT(inst->GetType() == DataType::REFERENCE || DataType::GetCommonType(inst->GetType()) == DataType::INT64);
|
||||
ASSERT(range.GetLeft() >= BoundsRange::GetMin(inst->GetType()));
|
||||
ASSERT(range.GetRight() <= BoundsRange::GetMax(inst->GetType()));
|
||||
if (!range.IsMaxRange() || range.GetLenArray() != nullptr) {
|
||||
if (bounds_range_info_.find(block) == bounds_range_info_.end()) {
|
||||
auto it1 = bounds_range_info_.emplace(block, aa_.Adapter());
|
||||
ASSERT(it1.second);
|
||||
it1.first->second.emplace(inst, range);
|
||||
} else if (bounds_range_info_.at(block).find(inst) == bounds_range_info_.at(block).end()) {
|
||||
bounds_range_info_.at(block).emplace(inst, range);
|
||||
} else {
|
||||
bounds_range_info_.at(block).at(inst) = range;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
BoundsAnalysis::BoundsAnalysis(Graph *graph) : Analysis(graph), bounds_range_info_(graph->GetAllocator()) {}
|
||||
|
||||
bool BoundsAnalysis::RunImpl()
|
||||
{
|
||||
GetGraph()->RunPass<DominatorsTree>();
|
||||
GetGraph()->RunPass<LoopAnalyzer>();
|
||||
|
||||
VisitGraph();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
const ArenaVector<BasicBlock *> &BoundsAnalysis::GetBlocksToVisit() const
|
||||
{
|
||||
return GetGraph()->GetBlocksRPO();
|
||||
}
|
||||
|
||||
void BoundsAnalysis::VisitIf([[maybe_unused]] GraphVisitor *v, [[maybe_unused]] Inst *inst)
|
||||
{
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
void BoundsAnalysis::VisitIfImm(GraphVisitor *v, Inst *inst)
|
||||
{
|
||||
auto if_inst = inst->CastToIfImm();
|
||||
ASSERT(if_inst->GetOperandsType() == DataType::BOOL);
|
||||
ASSERT(if_inst->GetCc() == ConditionCode::CC_NE || if_inst->GetCc() == ConditionCode::CC_EQ);
|
||||
ASSERT(if_inst->GetImm() == 0);
|
||||
|
||||
auto input = inst->GetInput(0).GetInst();
|
||||
if (input->GetOpcode() != Opcode::Compare) {
|
||||
return;
|
||||
}
|
||||
auto compare = input->CastToCompare();
|
||||
if (compare->GetOperandsType() == DataType::UINT64) {
|
||||
return;
|
||||
}
|
||||
auto op0 = compare->GetInput(0).GetInst();
|
||||
auto op1 = compare->GetInput(1).GetInst();
|
||||
|
||||
if ((DataType::GetCommonType(op0->GetType()) != DataType::INT64 && op0->GetType() != DataType::REFERENCE) ||
|
||||
(DataType::GetCommonType(op1->GetType()) != DataType::INT64 && op1->GetType() != DataType::REFERENCE)) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto cc = compare->GetCc();
|
||||
auto block = inst->GetBasicBlock();
|
||||
BasicBlock *true_block;
|
||||
BasicBlock *false_block;
|
||||
if (if_inst->GetCc() == ConditionCode::CC_NE) {
|
||||
// Corresponds to Compare result
|
||||
true_block = block->GetTrueSuccessor();
|
||||
false_block = block->GetFalseSuccessor();
|
||||
} else if (if_inst->GetCc() == ConditionCode::CC_EQ) {
|
||||
// Corresponds to inversion of Compare result
|
||||
true_block = block->GetFalseSuccessor();
|
||||
false_block = block->GetTrueSuccessor();
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
CalcNewBoundsRangeForCompare(v, block, cc, op0, op1, true_block);
|
||||
CalcNewBoundsRangeForCompare(v, block, GetInverseConditionCode(cc), op0, op1, false_block);
|
||||
}
|
||||
|
||||
void BoundsAnalysis::VisitPhi(GraphVisitor *v, Inst *inst)
|
||||
{
|
||||
if (IsFloatType(inst->GetType()) || inst->GetType() == DataType::REFERENCE || inst->GetType() == DataType::UINT64) {
|
||||
return;
|
||||
}
|
||||
auto bri = static_cast<BoundsAnalysis *>(v)->GetBoundsRangeInfo();
|
||||
auto phi = inst->CastToPhi();
|
||||
auto phi_block = inst->GetBasicBlock();
|
||||
auto phi_type = phi->GetType();
|
||||
ArenaVector<BoundsRange> ranges(phi_block->GetGraph()->GetLocalAllocator()->Adapter());
|
||||
for (auto &block : phi_block->GetPredsBlocks()) {
|
||||
ranges.emplace_back(bri->FindBoundsRange(block, phi->GetPhiInput(block)));
|
||||
}
|
||||
bri->SetBoundsRange(phi_block, phi, BoundsRange::Union(ranges).FitInType(phi_type));
|
||||
}
|
||||
|
||||
bool BoundsAnalysis::CheckTriangleCase(const BasicBlock *block, const BasicBlock *tgt_block)
|
||||
{
|
||||
auto &preds_blocks = tgt_block->GetPredsBlocks();
|
||||
auto loop = tgt_block->GetLoop();
|
||||
auto &back_edges = loop->GetBackEdges();
|
||||
if (preds_blocks.size() == 1) {
|
||||
return true;
|
||||
}
|
||||
if (!loop->IsRoot() && back_edges.size() == 1 && preds_blocks.size() == 2U) {
|
||||
if (preds_blocks[0] == block && preds_blocks[1] == back_edges[0]) {
|
||||
return true;
|
||||
}
|
||||
if (preds_blocks[1] == block && preds_blocks[0] == back_edges[0]) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void BoundsAnalysis::CalcNewBoundsRangeForCompare(GraphVisitor *v, BasicBlock *block, ConditionCode cc, Inst *left,
|
||||
Inst *right, BasicBlock *tgt_block)
|
||||
{
|
||||
auto bri = static_cast<BoundsAnalysis *>(v)->GetBoundsRangeInfo();
|
||||
auto left_range = bri->FindBoundsRange(block, left);
|
||||
auto right_range = bri->FindBoundsRange(block, right);
|
||||
// try to skip triangle:
|
||||
/* [block]
|
||||
* | \
|
||||
* | \
|
||||
* | [BB]
|
||||
* | /
|
||||
* | /
|
||||
* [tgt_block]
|
||||
*/
|
||||
if (CheckTriangleCase(block, tgt_block)) {
|
||||
auto ranges = BoundsRange::TryNarrowBoundsByCC(cc, {left_range, right_range});
|
||||
ASSERT(left_range.GetLenArray() == nullptr);
|
||||
ASSERT(right_range.GetLenArray() == nullptr);
|
||||
bri->SetBoundsRange(tgt_block, left, ranges.first.FitInType(left->GetType()));
|
||||
bri->SetBoundsRange(tgt_block, right, ranges.second.FitInType(right->GetType()));
|
||||
}
|
||||
}
|
||||
} // namespace panda::compiler
|
@ -1,160 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_OPTIMIZER_ANALYSIS_BOUNDSRANGE_ANALYSIS_H
|
||||
#define COMPILER_OPTIMIZER_ANALYSIS_BOUNDSRANGE_ANALYSIS_H
|
||||
|
||||
#include "optimizer/ir/graph_visitor.h"
|
||||
#include "optimizer/ir/datatype.h"
|
||||
#include "optimizer/ir/inst.h"
|
||||
#include "optimizer/pass.h"
|
||||
#include "utils/arena_containers.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
/**
|
||||
* Represents a range of values that a variable might have.
|
||||
*
|
||||
* It is used to represent variables of integral types according to their size
|
||||
* and sign.
|
||||
* It is used for REFERENCE type as well but only for reasoning whether a
|
||||
* variable is NULL or not.
|
||||
*/
|
||||
class BoundsRange {
|
||||
public:
|
||||
using RangePair = std::pair<BoundsRange, BoundsRange>;
|
||||
|
||||
explicit BoundsRange(DataType::Type type = DataType::INT64) : left_(GetMin(type)), right_(GetMax(type)) {};
|
||||
|
||||
explicit BoundsRange(int64_t left, int64_t right, const Inst *inst = nullptr,
|
||||
DataType::Type type = DataType::INT64);
|
||||
|
||||
explicit BoundsRange(int64_t val, DataType::Type type = DataType::INT64);
|
||||
|
||||
DEFAULT_COPY_SEMANTIC(BoundsRange);
|
||||
DEFAULT_MOVE_SEMANTIC(BoundsRange);
|
||||
~BoundsRange() = default;
|
||||
|
||||
const Inst *GetLenArray()
|
||||
{
|
||||
return len_array_;
|
||||
}
|
||||
int64_t GetLeft() const;
|
||||
|
||||
int64_t GetRight() const;
|
||||
|
||||
BoundsRange FitInType(DataType::Type type) const;
|
||||
|
||||
bool IsConst() const;
|
||||
|
||||
bool IsMaxRange(DataType::Type type = DataType::INT64) const;
|
||||
|
||||
bool IsEqual(const BoundsRange &range) const;
|
||||
|
||||
bool IsLess(const BoundsRange &range) const;
|
||||
|
||||
bool IsLess(const Inst *inst) const;
|
||||
|
||||
bool IsMore(const BoundsRange &range) const;
|
||||
|
||||
bool IsMoreOrEqual(const BoundsRange &range) const;
|
||||
|
||||
bool IsNotNegative() const;
|
||||
|
||||
bool IsNegative() const;
|
||||
|
||||
static int64_t GetMin(DataType::Type type);
|
||||
|
||||
static int64_t GetMax(DataType::Type type);
|
||||
|
||||
static BoundsRange Union(const ArenaVector<BoundsRange> &ranges);
|
||||
|
||||
static RangePair NarrowBoundsByNE(RangePair const &ranges);
|
||||
static RangePair NarrowBoundsCase1(ConditionCode cc, RangePair const &ranges);
|
||||
static RangePair NarrowBoundsCase2(ConditionCode cc, RangePair const &ranges);
|
||||
static RangePair NarrowBoundsCase3(ConditionCode cc, RangePair const &ranges);
|
||||
static RangePair NarrowBoundsCase4(ConditionCode cc, RangePair const &ranges);
|
||||
static RangePair NarrowBoundsCase5(ConditionCode cc, RangePair const &ranges);
|
||||
static RangePair NarrowBoundsCase6(ConditionCode cc, RangePair const &ranges);
|
||||
|
||||
static RangePair TryNarrowBoundsByCC(ConditionCode cc, RangePair const &ranges);
|
||||
|
||||
static constexpr int64_t MAX_RANGE_VALUE = INT64_MAX;
|
||||
static constexpr int64_t MIN_RANGE_VALUE = INT64_MIN;
|
||||
|
||||
private:
|
||||
int64_t left_ = MIN_RANGE_VALUE;
|
||||
int64_t right_ = MAX_RANGE_VALUE;
|
||||
const Inst *len_array_ {nullptr};
|
||||
};
|
||||
|
||||
class BoundsRangeInfo {
|
||||
public:
|
||||
explicit BoundsRangeInfo(ArenaAllocator *aa) : aa_(*aa), bounds_range_info_(aa->Adapter()) {}
|
||||
NO_COPY_SEMANTIC(BoundsRangeInfo);
|
||||
NO_MOVE_SEMANTIC(BoundsRangeInfo);
|
||||
~BoundsRangeInfo() = default;
|
||||
|
||||
BoundsRange FindBoundsRange(const BasicBlock *block, Inst *inst) const;
|
||||
|
||||
void SetBoundsRange(const BasicBlock *block, const Inst *inst, BoundsRange range);
|
||||
|
||||
private:
|
||||
ArenaAllocator &aa_;
|
||||
ArenaDoubleUnorderedMap<const BasicBlock *, const Inst *, BoundsRange> bounds_range_info_;
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(fuchsia-multiple-inheritance)
|
||||
class BoundsAnalysis : public Analysis, public GraphVisitor {
|
||||
public:
|
||||
explicit BoundsAnalysis(Graph *graph);
|
||||
NO_MOVE_SEMANTIC(BoundsAnalysis);
|
||||
NO_COPY_SEMANTIC(BoundsAnalysis);
|
||||
~BoundsAnalysis() override = default;
|
||||
|
||||
const ArenaVector<BasicBlock *> &GetBlocksToVisit() const override;
|
||||
|
||||
bool RunImpl() override;
|
||||
|
||||
const char *GetPassName() const override
|
||||
{
|
||||
return "BoundsAnalysis";
|
||||
}
|
||||
|
||||
BoundsRangeInfo *GetBoundsRangeInfo()
|
||||
{
|
||||
return &bounds_range_info_;
|
||||
}
|
||||
|
||||
const BoundsRangeInfo *GetBoundsRangeInfo() const
|
||||
{
|
||||
return &bounds_range_info_;
|
||||
}
|
||||
|
||||
static void VisitIf([[maybe_unused]] GraphVisitor *v, [[maybe_unused]] Inst *inst);
|
||||
static void VisitIfImm(GraphVisitor *v, Inst *inst);
|
||||
static void VisitPhi(GraphVisitor *v, Inst *inst);
|
||||
|
||||
#include "optimizer/ir/visitor.inc"
|
||||
private:
|
||||
static bool CheckTriangleCase(const BasicBlock *block, const BasicBlock *tgt_block);
|
||||
|
||||
static void CalcNewBoundsRangeForCompare(GraphVisitor *v, BasicBlock *block, ConditionCode cc, Inst *left,
|
||||
Inst *right, BasicBlock *tgt_block);
|
||||
private:
|
||||
BoundsRangeInfo bounds_range_info_;
|
||||
};
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_OPTIMIZER_ANALYSIS_BOUNDS_ANALYSIS_H
|
@ -1,38 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "optimizer/analysis/countable_loop_parser.h"
|
||||
#include "optimizer/analysis/loop_analyzer.h"
|
||||
#include "optimizer/ir/basicblock.h"
|
||||
#include "optimizer/ir/graph.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
/**
|
||||
* Check if loop is countable
|
||||
*
|
||||
* [Loop]
|
||||
* Phi(init, update)
|
||||
* ...
|
||||
* update(phi, 1)
|
||||
* Compare(Add/Sub, test)
|
||||
*
|
||||
* where `update` is Add or Sub instruction
|
||||
*/
|
||||
std::optional<CountableLoopInfo> CountableLoopParser::Parse()
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
} // namespace panda::compiler
|
@ -1,61 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef COMPILER_OPTIMIZER_ANALYSIS_COUNTABLE_LOOP_PARSER_H_
|
||||
#define COMPILER_OPTIMIZER_ANALYSIS_COUNTABLE_LOOP_PARSER_H_
|
||||
|
||||
#include "optimizer/ir/inst.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
class Loop;
|
||||
|
||||
/**
|
||||
* Example of code
|
||||
* ---------------
|
||||
* for (init(a); if_imm(compare(a,test)); update(a)) {...}
|
||||
*/
|
||||
struct CountableLoopInfo {
|
||||
Inst *if_imm;
|
||||
Inst *init;
|
||||
Inst *test;
|
||||
Inst *update;
|
||||
Inst *index;
|
||||
uint64_t const_step;
|
||||
ConditionCode normalized_cc; // cc between `update` and `test`
|
||||
};
|
||||
|
||||
/**
|
||||
* Helper class to check if loop is countable and to get its parameters
|
||||
*/
|
||||
class CountableLoopParser {
|
||||
public:
|
||||
explicit CountableLoopParser(const Loop &loop) : loop_(loop) {}
|
||||
|
||||
NO_MOVE_SEMANTIC(CountableLoopParser);
|
||||
NO_COPY_SEMANTIC(CountableLoopParser);
|
||||
~CountableLoopParser() = default;
|
||||
|
||||
std::optional<CountableLoopInfo> Parse();
|
||||
|
||||
private:
|
||||
Inst *SetIndexAndRetrunConstInst();
|
||||
|
||||
private:
|
||||
const Loop &loop_;
|
||||
CountableLoopInfo loop_info_ {};
|
||||
bool is_head_loop_exit_ = false;
|
||||
};
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_OPTIMIZER_ANALYSIS_COUNTABLE_LOOP_PARSER_H_
|
@ -37,10 +37,6 @@ void LinearOrder::HandleIfBlock(BasicBlock *if_true_block, BasicBlock *next_bloc
|
||||
if_inst->CastToIfImm()->InverseConditionCode();
|
||||
} else if (if_inst->GetOpcode() == Opcode::If) {
|
||||
if_inst->CastToIf()->InverseConditionCode();
|
||||
} else if (if_inst->GetOpcode() == Opcode::AddOverflow) {
|
||||
if_inst->CastToAddOverflow()->InverseConditionCode();
|
||||
} else if (if_inst->GetOpcode() == Opcode::SubOverflow) {
|
||||
if_inst->CastToSubOverflow()->InverseConditionCode();
|
||||
} else {
|
||||
LOG(FATAL, COMPILER) << "Unexpected `If` instruction: " << *if_inst;
|
||||
}
|
||||
@ -58,15 +54,10 @@ void LinearOrder::HandlePrevInstruction(BasicBlock *block, BasicBlock *prev_bloc
|
||||
switch (prev_inst->GetOpcode()) {
|
||||
case Opcode::IfImm:
|
||||
case Opcode::If:
|
||||
case Opcode::AddOverflow:
|
||||
case Opcode::SubOverflow:
|
||||
ASSERT(prev_block->GetSuccsBlocks().size() == MAX_SUCCS_NUM);
|
||||
HandleIfBlock(prev_block, block);
|
||||
break;
|
||||
|
||||
case Opcode::Throw:
|
||||
break;
|
||||
|
||||
default:
|
||||
ASSERT(prev_block->GetSuccsBlocks().size() == 1 || prev_block->IsTryBegin() || prev_block->IsTryEnd());
|
||||
if (block != prev_block->GetSuccessor(0) && !prev_block->GetLastInst()->IsControlFlow()) {
|
||||
|
@ -1,152 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <algorithm>
|
||||
#include "liveness_analyzer.h"
|
||||
#include "live_registers.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
namespace {
|
||||
struct Split {
|
||||
Split(LifeIntervalsIt p_begin, LifeIntervalsIt p_end, LifeNumber p_min, LifeNumber p_max,
|
||||
LifeIntervalsTreeNode *p_parent)
|
||||
: begin(p_begin), end(p_end), min(p_min), max(p_max), parent(p_parent)
|
||||
{
|
||||
ASSERT(p_begin < p_end);
|
||||
ASSERT(p_min <= p_max);
|
||||
}
|
||||
LifeIntervalsIt begin; // NOLINT(misc-non-private-member-variables-in-classes)
|
||||
LifeIntervalsIt end; // NOLINT(misc-non-private-member-variables-in-classes)
|
||||
LifeNumber min; // NOLINT(misc-non-private-member-variables-in-classes)
|
||||
LifeNumber max; // NOLINT(misc-non-private-member-variables-in-classes)
|
||||
LifeIntervalsTreeNode *parent; // NOLINT(misc-non-private-member-variables-in-classes)
|
||||
};
|
||||
|
||||
// copy intervals with assigned registers and compute min and max life numbers covered by all these intervals
|
||||
std::pair<LifeNumber, LifeNumber> CopyIntervals(const ArenaVector<LifeIntervals *> &source,
|
||||
ArenaVector<LifeIntervals *> *destination)
|
||||
{
|
||||
LifeNumber min_ln = std::numeric_limits<LifeNumber>::max();
|
||||
LifeNumber max_ln = 0;
|
||||
for (auto &interval : source) {
|
||||
for (auto split = interval; !interval->IsPhysical() && split != nullptr; split = split->GetSibling()) {
|
||||
if (split->HasReg()) {
|
||||
min_ln = std::min(min_ln, split->GetBegin());
|
||||
max_ln = std::max(max_ln, split->GetEnd());
|
||||
destination->push_back(split);
|
||||
}
|
||||
}
|
||||
}
|
||||
return std::make_pair(min_ln, max_ln);
|
||||
}
|
||||
|
||||
LifeIntervalsIt PartitionLeftSplit(const LifeIntervalsIt &left, const LifeIntervalsIt &right, LifeNumber midpoint,
|
||||
LifeNumber *min_ln, LifeNumber *max_ln)
|
||||
{
|
||||
LifeNumber left_min_ln = std::numeric_limits<LifeNumber>::max();
|
||||
LifeNumber left_max_ln = 0;
|
||||
auto result = std::partition(left, right, [&midpoint, &left_min_ln, &left_max_ln](const auto &em) {
|
||||
if (em->GetEnd() < midpoint) {
|
||||
left_min_ln = std::min(left_min_ln, em->GetBegin());
|
||||
left_max_ln = std::max(left_max_ln, em->GetEnd());
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
*min_ln = left_min_ln;
|
||||
*max_ln = left_max_ln;
|
||||
return result;
|
||||
}
|
||||
|
||||
LifeIntervalsIt PartitionRightSplit(const LifeIntervalsIt &left, const LifeIntervalsIt &right, LifeNumber midpoint,
|
||||
LifeNumber *min_ln, LifeNumber *max_ln)
|
||||
{
|
||||
LifeNumber right_min_ln = std::numeric_limits<LifeNumber>::max();
|
||||
LifeNumber right_max_ln = 0;
|
||||
auto result = std::partition(left, right, [&midpoint, &right_min_ln, &right_max_ln](const auto &em) {
|
||||
if (em->GetBegin() > midpoint) {
|
||||
right_min_ln = std::min(right_min_ln, em->GetBegin());
|
||||
right_max_ln = std::max(right_max_ln, em->GetEnd());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
*min_ln = right_min_ln;
|
||||
*max_ln = right_max_ln;
|
||||
return result;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
LifeIntervalsTree *LifeIntervalsTree::BuildIntervalsTree(const ArenaVector<LifeIntervals *> &life_intervals,
|
||||
const Graph *graph)
|
||||
{
|
||||
auto alloc = graph->GetAllocator();
|
||||
auto lalloc = graph->GetLocalAllocator();
|
||||
auto intervals = alloc->New<ArenaVector<LifeIntervals *>>(alloc->Adapter());
|
||||
ArenaQueue<const Split *> queue(lalloc->Adapter());
|
||||
|
||||
auto ln_range = CopyIntervals(life_intervals, intervals);
|
||||
if (intervals->empty()) {
|
||||
return nullptr;
|
||||
}
|
||||
queue.push(lalloc->New<Split>(intervals->begin(), intervals->end(), ln_range.first, ln_range.second, nullptr));
|
||||
|
||||
LifeIntervalsTreeNode *root {nullptr};
|
||||
|
||||
// Split each interval into three parts:
|
||||
// 1) intervals covering mid point;
|
||||
// 2) intervals ended before mid point;
|
||||
// 3) intervals started after mid point.
|
||||
// Allocate tree node for (1), recursively process (2) and (3).
|
||||
while (!queue.empty()) {
|
||||
auto split = queue.front();
|
||||
queue.pop();
|
||||
if (split->end - split->begin <= 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto midpoint = split->min + (split->max - split->min) / 2U;
|
||||
|
||||
LifeNumber left_min_ln;
|
||||
LifeNumber left_max_ln;
|
||||
auto left_midpoint = PartitionLeftSplit(split->begin, split->end, midpoint, &left_min_ln, &left_max_ln);
|
||||
|
||||
LifeNumber right_min_ln;
|
||||
LifeNumber right_max_ln;
|
||||
auto right_midpoint = PartitionRightSplit(left_midpoint, split->end, midpoint, &right_min_ln, &right_max_ln);
|
||||
|
||||
std::sort(left_midpoint, right_midpoint,
|
||||
[](LifeIntervals *l, LifeIntervals *r) { return l->GetEnd() > r->GetEnd(); });
|
||||
|
||||
auto node = alloc->New<LifeIntervalsTreeNode>(split->min, split->max, left_midpoint, right_midpoint);
|
||||
if (split->parent == nullptr) {
|
||||
root = node;
|
||||
} else if (split->parent->GetMidpoint() > midpoint) {
|
||||
split->parent->SetLeft(node);
|
||||
} else {
|
||||
split->parent->SetRight(node);
|
||||
}
|
||||
if (split->begin < left_midpoint) {
|
||||
queue.push(lalloc->New<Split>(split->begin, left_midpoint, left_min_ln, left_max_ln, node));
|
||||
}
|
||||
if (right_midpoint < split->end) {
|
||||
queue.push(lalloc->New<Split>(right_midpoint, split->end, right_min_ln, right_max_ln, node));
|
||||
}
|
||||
}
|
||||
return alloc->New<LifeIntervalsTree>(root);
|
||||
}
|
||||
|
||||
} // namespace panda::compiler
|
@ -1,188 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_OPTIMIZER_ANALYSIS_LIVE_REGISTERS_H
|
||||
#define COMPILER_OPTIMIZER_ANALYSIS_LIVE_REGISTERS_H
|
||||
|
||||
#include <optimizer/ir/inst.h>
|
||||
#include <optimizer/ir/graph.h>
|
||||
#include "utils/arena_containers.h"
|
||||
#include "optimizer/pass.h"
|
||||
#include "optimizer/analysis/liveness_analyzer.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
using LifeIntervalsIt = ArenaVector<LifeIntervals *>::iterator;
|
||||
class LifeIntervalsTreeNode {
|
||||
public:
|
||||
explicit LifeIntervalsTreeNode(LifeNumber min_value, LifeNumber max_value, LifeIntervalsIt begin,
|
||||
LifeIntervalsIt end)
|
||||
: min_value_(min_value), max_value_(max_value), begin_(begin), end_(end)
|
||||
{
|
||||
}
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(LifeIntervalsTreeNode);
|
||||
DEFAULT_COPY_SEMANTIC(LifeIntervalsTreeNode);
|
||||
~LifeIntervalsTreeNode() = default;
|
||||
|
||||
LifeNumber GetMidpoint() const
|
||||
{
|
||||
return (min_value_ + max_value_) / 2;
|
||||
}
|
||||
|
||||
LifeNumber GetMinValue() const
|
||||
{
|
||||
return min_value_;
|
||||
}
|
||||
|
||||
LifeNumber GetMaxValue() const
|
||||
{
|
||||
return max_value_;
|
||||
}
|
||||
|
||||
LifeIntervalsIt GetBegin() const
|
||||
{
|
||||
return begin_;
|
||||
}
|
||||
|
||||
LifeIntervalsIt GetEnd() const
|
||||
{
|
||||
return end_;
|
||||
}
|
||||
|
||||
LifeIntervalsTreeNode *GetLeft() const
|
||||
{
|
||||
return left_;
|
||||
}
|
||||
|
||||
void SetLeft(LifeIntervalsTreeNode *left)
|
||||
{
|
||||
ASSERT(left_ == nullptr);
|
||||
left_ = left;
|
||||
}
|
||||
|
||||
LifeIntervalsTreeNode *GetRight() const
|
||||
{
|
||||
return right_;
|
||||
}
|
||||
|
||||
void SetRight(LifeIntervalsTreeNode *right)
|
||||
{
|
||||
ASSERT(right_ == nullptr);
|
||||
right_ = right;
|
||||
}
|
||||
|
||||
private:
|
||||
LifeNumber min_value_;
|
||||
LifeNumber max_value_;
|
||||
LifeIntervalsIt begin_;
|
||||
LifeIntervalsIt end_;
|
||||
LifeIntervalsTreeNode *left_ {nullptr};
|
||||
LifeIntervalsTreeNode *right_ {nullptr};
|
||||
};
|
||||
|
||||
// Simplified intervals tree implementation.
|
||||
// Each LifeIntervalsTreeNode stores intervals covering the mid point associated with a node, these intervals
|
||||
// sorted by the life range end in descending order. Every left child stores intervals ended before current node's
|
||||
// mid point and every right child stores intervals started after current node's mid point.
|
||||
class LifeIntervalsTree {
|
||||
public:
|
||||
static LifeIntervalsTree *BuildIntervalsTree(Graph *graph)
|
||||
{
|
||||
ASSERT(graph->IsAnalysisValid<LivenessAnalyzer>());
|
||||
return LifeIntervalsTree::BuildIntervalsTree(graph->GetAnalysis<LivenessAnalyzer>().GetLifeIntervals(), graph);
|
||||
}
|
||||
|
||||
static LifeIntervalsTree *BuildIntervalsTree(const ArenaVector<LifeIntervals *> &life_intervals,
|
||||
const Graph *graph);
|
||||
|
||||
explicit LifeIntervalsTree(LifeIntervalsTreeNode *root) : root_(root) {};
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(LifeIntervalsTree);
|
||||
DEFAULT_COPY_SEMANTIC(LifeIntervalsTree);
|
||||
~LifeIntervalsTree() = default;
|
||||
|
||||
template <bool live_inputs = true, typename Func>
|
||||
void VisitIntervals(LifeNumber ln, Func func, const Inst *skip_inst = nullptr) const
|
||||
{
|
||||
for (auto node = root_; node != nullptr; node = ln < node->GetMidpoint() ? node->GetLeft() : node->GetRight()) {
|
||||
if (ln < node->GetMinValue() || ln > node->GetMaxValue()) {
|
||||
// current node does not contain intervals covering target life number
|
||||
continue;
|
||||
}
|
||||
for (auto i = node->GetBegin(); i < node->GetEnd(); i++) {
|
||||
auto interval = *i;
|
||||
if (interval->GetInst() == skip_inst) {
|
||||
continue;
|
||||
}
|
||||
if (ln > interval->GetEnd()) {
|
||||
// intervals are ordered by its end in descending order, so we can stop on first interval
|
||||
// that ends before target life number
|
||||
break;
|
||||
}
|
||||
if (interval->SplitCover<live_inputs>(ln)) {
|
||||
func(interval);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
LifeIntervalsTreeNode *root_;
|
||||
};
|
||||
|
||||
// Analysis collecting live intervals with assigned registers and
|
||||
// allowing to visit those of them which are intersecting with specific instruction.
|
||||
class LiveRegisters : public Analysis {
|
||||
public:
|
||||
explicit LiveRegisters(Graph *graph) : Analysis(graph) {};
|
||||
|
||||
NO_MOVE_SEMANTIC(LiveRegisters);
|
||||
NO_COPY_SEMANTIC(LiveRegisters);
|
||||
~LiveRegisters() override = default;
|
||||
|
||||
bool RunImpl() override
|
||||
{
|
||||
inst_life_intervals_tree_ = LifeIntervalsTree::BuildIntervalsTree(GetGraph());
|
||||
return true;
|
||||
}
|
||||
|
||||
// Visit all live intervals with assigned registers intersecting with specified instruction
|
||||
// (excluding the interval for that instruction).
|
||||
template <bool live_inputs = true, typename Func>
|
||||
void VisitIntervalsWithLiveRegisters(Inst *inst, Func func)
|
||||
{
|
||||
ASSERT(GetGraph()->IsAnalysisValid<LivenessAnalyzer>());
|
||||
|
||||
if (inst_life_intervals_tree_ == nullptr) {
|
||||
return;
|
||||
}
|
||||
|
||||
auto &la = GetGraph()->GetAnalysis<LivenessAnalyzer>();
|
||||
auto li = la.GetInstLifeIntervals(inst);
|
||||
inst_life_intervals_tree_->VisitIntervals<live_inputs, Func>(li->GetBegin(), func, inst);
|
||||
}
|
||||
|
||||
const char *GetPassName() const override
|
||||
{
|
||||
return "Live Registers";
|
||||
}
|
||||
|
||||
private:
|
||||
LifeIntervalsTree *inst_life_intervals_tree_ {nullptr};
|
||||
};
|
||||
|
||||
} // namespace panda::compiler
|
||||
#endif // COMPILER_OPTIMIZER_ANALYSIS_LIVE_REGISTERS_H
|
@ -32,9 +32,7 @@ LivenessAnalyzer::LivenessAnalyzer(Graph *graph)
|
||||
block_live_sets_(graph->GetLocalAllocator()->Adapter()),
|
||||
pending_catch_phi_inputs_(graph->GetAllocator()->Adapter()),
|
||||
physical_general_intervals_(graph->GetAllocator()->Adapter()),
|
||||
physical_vector_intervals_(graph->GetAllocator()->Adapter()),
|
||||
use_table_(graph->GetAllocator()),
|
||||
has_safepoint_during_call_(graph->GetRuntime()->HasSafepointDuringCall())
|
||||
physical_vector_intervals_(graph->GetAllocator()->Adapter())
|
||||
{
|
||||
}
|
||||
|
||||
@ -224,12 +222,6 @@ void LivenessAnalyzer::BuildInstLifeNumbers()
|
||||
for (auto inst : block->Insts()) {
|
||||
inst->SetLinearNumber(linear_number++);
|
||||
CreateLifeIntervals(inst);
|
||||
if (IsPseudoUserOfMultiOutput(inst)) {
|
||||
// Should be the same life number as pseudo-user, since actually they have the same definition
|
||||
SetInstLifeNumber(inst, life_number);
|
||||
GetInstLifeIntervals(inst)->AddUsePosition(life_number);
|
||||
continue;
|
||||
}
|
||||
life_number += LIFE_NUMBER_GAP;
|
||||
SetInstLifeNumber(inst, life_number);
|
||||
insts_by_life_number_.push_back(inst);
|
||||
@ -324,9 +316,6 @@ void LivenessAnalyzer::ProcessBlockLiveInstructions(BasicBlock *block, InstLiveS
|
||||
interval->StartFrom(inst_life_number);
|
||||
AdjustCatchPhiInputsLifetime(inst);
|
||||
} else {
|
||||
if (inst->GetOpcode() == Opcode::LiveOut) {
|
||||
interval->AppendRange({inst_life_number, GetBlockLiveRange(GetGraph()->GetEndBlock()).GetBegin()});
|
||||
}
|
||||
auto current_live_range = LiveRange {GetBlockLiveRange(block).GetBegin(), inst_life_number};
|
||||
AdjustInputsLifetime(inst, current_live_range, live_set);
|
||||
}
|
||||
@ -354,38 +343,12 @@ void LivenessAnalyzer::ProcessBlockLiveInstructions(BasicBlock *block, InstLiveS
|
||||
/* static */
|
||||
LiveRange LivenessAnalyzer::GetPropagatedLiveRange(Inst *inst, LiveRange live_range)
|
||||
{
|
||||
/*
|
||||
* Implicit null check encoded as no-op and if the reference to check is null
|
||||
* then SIGSEGV will be raised at the first (closest) user. Regmap generated for
|
||||
* NullCheck's SaveState should be valid at that user so we need to extend
|
||||
* life intervals of SaveState's inputs until NullCheck user.
|
||||
*/
|
||||
if (inst->IsNullCheck() && !inst->GetUsers().Empty() && inst->CastToNullCheck()->IsImplicit()) {
|
||||
auto extend_until = std::numeric_limits<LifeNumber>::max();
|
||||
for (auto &user : inst->GetUsers()) {
|
||||
auto li = GetInstLifeIntervals(user.GetInst());
|
||||
ASSERT(li != nullptr);
|
||||
extend_until = std::min<LifeNumber>(extend_until, li->GetBegin() + 1);
|
||||
}
|
||||
live_range.SetEnd(extend_until);
|
||||
return live_range;
|
||||
}
|
||||
/*
|
||||
* We need to propagate liveness for instruction with CallRuntime to save registers before call;
|
||||
* Otherwise, we will not be able to restore the value of the virtual registers
|
||||
*/
|
||||
if (inst->IsPropagateLiveness()) {
|
||||
live_range.SetEnd(live_range.GetEnd() + 1);
|
||||
} else if (inst->GetOpcode() == Opcode::ReturnInlined && inst->CastToReturnInlined()->IsExtendedLiveness()) {
|
||||
/*
|
||||
* [ReturnInlined]
|
||||
* [ReturnInlined]
|
||||
* ...
|
||||
* [Deoptimize/Throw]
|
||||
*
|
||||
* In this case we propagate ReturnInlined inputs liveness up to the end of basic block
|
||||
*/
|
||||
live_range.SetEnd(GetBlockLiveRange(inst->GetBasicBlock()).GetEnd());
|
||||
}
|
||||
return live_range;
|
||||
}
|
||||
@ -428,13 +391,9 @@ void LivenessAnalyzer::AdjustInputsLifetime(Inst *inst, LiveRange live_range, In
|
||||
* Increase ref-input liveness in the 'no-async-jit' mode, since GC can be triggered and delete ref during callee-method
|
||||
* compilation
|
||||
*/
|
||||
void LivenessAnalyzer::SetInputRange(const Inst *inst, const Inst *input, LiveRange live_range) const
|
||||
void LivenessAnalyzer::SetInputRange([[maybe_unused]] const Inst *inst, const Inst *input, LiveRange live_range) const
|
||||
{
|
||||
if (has_safepoint_during_call_ && inst->IsCall() && DataType::IsReference(input->GetType())) {
|
||||
GetInstLifeIntervals(input)->AppendRange(live_range.GetBegin(), live_range.GetEnd() + 1U);
|
||||
} else {
|
||||
GetInstLifeIntervals(input)->AppendRange(live_range);
|
||||
}
|
||||
GetInstLifeIntervals(input)->AppendRange(live_range);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -615,7 +574,7 @@ void LivenessAnalyzer::BlockReg(Register reg, LifeNumber block_from)
|
||||
|
||||
bool LivenessAnalyzer::IsCallBlockingRegisters(Inst *inst) const
|
||||
{
|
||||
if (inst->IsCall() && !static_cast<CallInst *>(inst)->IsInlined()) {
|
||||
if (inst->IsCall()) {
|
||||
return true;
|
||||
}
|
||||
if (inst->IsIntrinsic() && inst->CastToIntrinsic()->IsNativeCall()) {
|
||||
|
@ -17,7 +17,6 @@
|
||||
#define COMPILER_OPTIMIZER_ANALYSIS_LIVENESS_ANALIZER_H
|
||||
|
||||
#include "utils/arena_containers.h"
|
||||
#include "optimizer/analysis/liveness_use_table.h"
|
||||
#include "optimizer/ir/constants.h"
|
||||
#include "optimizer/ir/inst.h"
|
||||
#include "optimizer/ir/marker.h"
|
||||
@ -331,9 +330,6 @@ public:
|
||||
|
||||
bool NoDest() const
|
||||
{
|
||||
if (IsPseudoUserOfMultiOutput(inst_)) {
|
||||
return false;
|
||||
}
|
||||
return inst_->NoDest();
|
||||
}
|
||||
|
||||
@ -543,11 +539,6 @@ public:
|
||||
void DumpLifeIntervals(std::ostream &out = std::cout) const;
|
||||
void DumpLocationsUsage(std::ostream &out = std::cout) const;
|
||||
|
||||
const UseTable &GetUseTable() const
|
||||
{
|
||||
return use_table_;
|
||||
}
|
||||
|
||||
private:
|
||||
ArenaAllocator *GetAllocator()
|
||||
{
|
||||
@ -601,9 +592,6 @@ private:
|
||||
ArenaMultiMap<Inst *, Inst *> pending_catch_phi_inputs_;
|
||||
ArenaVector<LifeIntervals *> physical_general_intervals_;
|
||||
ArenaVector<LifeIntervals *> physical_vector_intervals_;
|
||||
UseTable use_table_;
|
||||
bool has_safepoint_during_call_;
|
||||
|
||||
Marker marker_ {UNDEF_MARKER};
|
||||
};
|
||||
} // namespace panda::compiler
|
||||
|
@ -1,64 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "optimizer/analysis/liveness_use_table.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
UseTable::UseTable(ArenaAllocator *allocator) : table_(allocator->Adapter()), allocator_(allocator) {}
|
||||
|
||||
void UseTable::AddUseOnFixedLocation(const Inst *inst, Location location, LifeNumber ln)
|
||||
{
|
||||
auto res = table_.try_emplace(inst, allocator_->Adapter());
|
||||
auto &uses = res.first->second;
|
||||
ASSERT(location.IsRegisterValid());
|
||||
uses[ln] = location.GetValue();
|
||||
}
|
||||
|
||||
bool UseTable::HasUseOnFixedLocation(const Inst *inst, LifeNumber ln) const
|
||||
{
|
||||
auto it = table_.find(inst);
|
||||
if (it == table_.end()) {
|
||||
return false;
|
||||
}
|
||||
const auto &uses = it->second;
|
||||
return uses.count(ln) > 0;
|
||||
}
|
||||
|
||||
Register UseTable::GetNextUseOnFixedLocation(const Inst *inst, LifeNumber ln) const
|
||||
{
|
||||
auto it = table_.find(inst);
|
||||
if (it == table_.end()) {
|
||||
return INVALID_REG;
|
||||
}
|
||||
const auto &uses = it->second;
|
||||
auto uses_it = uses.lower_bound(ln);
|
||||
return uses_it == uses.end() ? INVALID_REG : uses_it->second;
|
||||
}
|
||||
|
||||
void UseTable::Dump(std::ostream &out, Arch arch) const
|
||||
{
|
||||
out << "UseTable" << std::endl;
|
||||
for (auto [inst, uses] : table_) {
|
||||
out << "Inst v" << inst->GetId() << ": ";
|
||||
auto sep = "";
|
||||
for (auto [ln, r] : uses) {
|
||||
out << sep << "{" << std::to_string(ln) << ", " << Location::MakeRegister(r, inst->GetType()).ToString(arch)
|
||||
<< "}";
|
||||
sep = ", ";
|
||||
}
|
||||
out << std::endl;
|
||||
}
|
||||
}
|
||||
} // namespace panda::compiler
|
@ -1,44 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_OPTIMIZER_ANALYSIS_USE_TABLE_H
|
||||
#define COMPILER_OPTIMIZER_ANALYSIS_USE_TABLE_H
|
||||
|
||||
#include "utils/arena_containers.h"
|
||||
#include "optimizer/ir/inst.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
using FixedUses = ArenaMap<LifeNumber, Register>;
|
||||
|
||||
/**
|
||||
* For each added instruction holds its uses on the fixed locations
|
||||
*/
|
||||
class UseTable {
|
||||
public:
|
||||
explicit UseTable(ArenaAllocator *allocator);
|
||||
|
||||
void AddUseOnFixedLocation(const Inst *inst, Location location, LifeNumber ln);
|
||||
bool HasUseOnFixedLocation(const Inst *inst, LifeNumber ln) const;
|
||||
Register GetNextUseOnFixedLocation(const Inst *inst, LifeNumber ln) const;
|
||||
|
||||
void Dump(std::ostream &out, Arch arch) const;
|
||||
|
||||
private:
|
||||
ArenaUnorderedMap<const Inst *, FixedUses> table_;
|
||||
ArenaAllocator *allocator_;
|
||||
};
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_OPTIMIZER_ANALYSIS_USE_TABLE_H
|
@ -17,7 +17,6 @@
|
||||
|
||||
#include "optimizer/ir/inst.h"
|
||||
#include "optimizer/pass.h"
|
||||
#include "optimizer/analysis/countable_loop_parser.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
class BasicBlock;
|
||||
|
@ -1,63 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "object_type_propagation.h"
|
||||
#include "optimizer/ir/basicblock.h"
|
||||
#include "optimizer/ir/inst.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
bool ObjectTypePropagation::RunImpl()
|
||||
{
|
||||
VisitGraph();
|
||||
return true;
|
||||
}
|
||||
|
||||
void ObjectTypePropagation::VisitNewObject(GraphVisitor *v, Inst *i)
|
||||
{
|
||||
auto self = static_cast<ObjectTypePropagation *>(v);
|
||||
auto inst = i->CastToNewObject();
|
||||
auto klass = self->GetGraph()->GetRuntime()->GetClass(inst->GetMethod(), inst->GetTypeId());
|
||||
if (klass != nullptr) {
|
||||
inst->SetObjectTypeInfo(ObjectTypeInfo(static_cast<ObjectTypeInfo::ClassType>(klass)));
|
||||
}
|
||||
}
|
||||
|
||||
void ObjectTypePropagation::VisitNewArray(GraphVisitor *v, Inst *i)
|
||||
{
|
||||
auto self = static_cast<ObjectTypePropagation *>(v);
|
||||
auto inst = i->CastToNewArray();
|
||||
auto klass = self->GetGraph()->GetRuntime()->GetClass(inst->GetMethod(), inst->GetTypeId());
|
||||
if (klass != nullptr) {
|
||||
inst->SetObjectTypeInfo(ObjectTypeInfo(static_cast<ObjectTypeInfo::ClassType>(klass)));
|
||||
}
|
||||
}
|
||||
|
||||
void ObjectTypePropagation::VisitLoadString(GraphVisitor *v, Inst *i)
|
||||
{
|
||||
auto self = static_cast<ObjectTypePropagation *>(v);
|
||||
auto inst = i->CastToLoadString();
|
||||
auto klass = self->GetGraph()->GetRuntime()->GetStringClass(inst->GetMethod());
|
||||
if (klass != nullptr) {
|
||||
inst->SetObjectTypeInfo(ObjectTypeInfo(static_cast<ObjectTypeInfo::ClassType>(klass)));
|
||||
}
|
||||
}
|
||||
|
||||
void ObjectTypePropagation::VisitLoadArray([[maybe_unused]] GraphVisitor *v, [[maybe_unused]] Inst *i)
|
||||
{
|
||||
// LoadArray should be processed more carefully, because it may contain object of the derived class with own method
|
||||
// implementation. We need to check all array stores and method calls between NewArray and LoadArray.
|
||||
// TODO(mshertennikov): Support it.
|
||||
}
|
||||
} // namespace panda::compiler
|
@ -1,54 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef PANDA_OBJECT_TYPE_PROPAGATION_H
|
||||
#define PANDA_OBJECT_TYPE_PROPAGATION_H
|
||||
|
||||
#include "optimizer/pass.h"
|
||||
#include "optimizer/ir/graph.h"
|
||||
#include "optimizer/ir/graph_visitor.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
// NOLINTNEXTLINE(fuchsia-multiple-inheritance)
|
||||
class ObjectTypePropagation final : public Analysis, public GraphVisitor {
|
||||
public:
|
||||
explicit ObjectTypePropagation(Graph *graph) : Analysis(graph) {}
|
||||
NO_MOVE_SEMANTIC(ObjectTypePropagation);
|
||||
NO_COPY_SEMANTIC(ObjectTypePropagation);
|
||||
~ObjectTypePropagation() override = default;
|
||||
|
||||
const ArenaVector<BasicBlock *> &GetBlocksToVisit() const override
|
||||
{
|
||||
return GetGraph()->GetBlocksRPO();
|
||||
}
|
||||
|
||||
const char *GetPassName() const override
|
||||
{
|
||||
return "ObjectTypePropagation";
|
||||
}
|
||||
|
||||
bool RunImpl() override;
|
||||
|
||||
protected:
|
||||
static void VisitNewObject(GraphVisitor *v, Inst *inst);
|
||||
static void VisitNewArray(GraphVisitor *v, Inst *inst);
|
||||
static void VisitLoadArray(GraphVisitor *v, Inst *inst);
|
||||
static void VisitLoadString(GraphVisitor *v, Inst *inst);
|
||||
|
||||
#include "optimizer/ir/visitor.inc"
|
||||
};
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // PANDA_OBJECT_TYPE_PROPAGATION_H
|
@ -1,73 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "types_analysis.h"
|
||||
#include "optimizer/ir/inst.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
bool TypesAnalysis::RunImpl()
|
||||
{
|
||||
marker_ = GetGraph()->NewMarker();
|
||||
VisitGraph();
|
||||
GetGraph()->EraseMarker(marker_);
|
||||
return true;
|
||||
}
|
||||
|
||||
void TypesAnalysis::MarkedPhiRec(PhiInst *phi, AnyBaseType type)
|
||||
{
|
||||
if (phi->SetMarker(marker_)) {
|
||||
auto phi_type = phi->GetAnyType();
|
||||
// Phi has 2 inputs or users with different types
|
||||
if (phi_type != type) {
|
||||
phi->SetAssumedAnyType(AnyBaseType::UNDEFINED_TYPE);
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
phi->SetAssumedAnyType(type);
|
||||
for (auto &user : phi->GetUsers()) {
|
||||
auto user_inst = user.GetInst();
|
||||
if (user_inst->GetOpcode() == Opcode::Phi) {
|
||||
MarkedPhiRec(user_inst->CastToPhi(), type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void TypesAnalysis::VisitCastValueToAnyType(GraphVisitor *v, Inst *inst)
|
||||
{
|
||||
auto self = static_cast<TypesAnalysis *>(v);
|
||||
auto type = inst->CastToCastValueToAnyType()->GetAnyType();
|
||||
ASSERT(type != AnyBaseType::UNDEFINED_TYPE);
|
||||
for (auto &user : inst->GetUsers()) {
|
||||
auto user_inst = user.GetInst();
|
||||
if (user_inst->GetOpcode() == Opcode::Phi) {
|
||||
self->MarkedPhiRec(user_inst->CastToPhi(), type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void TypesAnalysis::VisitAnyTypeCheck(GraphVisitor *v, Inst *inst)
|
||||
{
|
||||
auto self = static_cast<TypesAnalysis *>(v);
|
||||
auto type = inst->CastToAnyTypeCheck()->GetAnyType();
|
||||
if (type == AnyBaseType::UNDEFINED_TYPE) {
|
||||
return;
|
||||
}
|
||||
auto input = inst->GetInput(0).GetInst();
|
||||
if (input->GetOpcode() == Opcode::Phi) {
|
||||
self->MarkedPhiRec(input->CastToPhi(), type);
|
||||
}
|
||||
}
|
||||
} // namespace panda::compiler
|
@ -1,62 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_OPTIMIZER_ANALYSIS_TYPES_ANALISYS_H
|
||||
#define COMPILER_OPTIMIZER_ANALYSIS_TYPES_ANALISYS_H
|
||||
|
||||
#include "optimizer/pass.h"
|
||||
#include "optimizer/ir/graph.h"
|
||||
#include "optimizer/ir/graph_visitor.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
/**
|
||||
* TypesAnalysis sets assumed types(dynamic type) to phi instruction.
|
||||
* If a PHI is user of a CastValueToAnyType, the type of CastValueToAnyType is set as assumed type of the PHI
|
||||
* If a PHI is input of AnyTypeCheck, the type of CastValueToAnyType is set as assumed type of the PHI
|
||||
* If a PHI has assumed type and we try to set another type, we change assumed type to undefine.
|
||||
* If an assumed type is set for PHI, then we try to assign the type for all PHI that are inputs of the current PHI
|
||||
*/
|
||||
// NOLINTNEXTLINE(fuchsia-multiple-inheritance)
|
||||
class TypesAnalysis final : public Analysis, public GraphVisitor {
|
||||
public:
|
||||
explicit TypesAnalysis(Graph *graph) : Analysis(graph) {}
|
||||
NO_MOVE_SEMANTIC(TypesAnalysis);
|
||||
NO_COPY_SEMANTIC(TypesAnalysis);
|
||||
~TypesAnalysis() override = default;
|
||||
|
||||
const ArenaVector<BasicBlock *> &GetBlocksToVisit() const override
|
||||
{
|
||||
return GetGraph()->GetBlocksRPO();
|
||||
}
|
||||
|
||||
const char *GetPassName() const override
|
||||
{
|
||||
return "TypesAnalysis";
|
||||
}
|
||||
|
||||
bool RunImpl() override;
|
||||
|
||||
protected:
|
||||
static void VisitCastValueToAnyType(GraphVisitor *v, Inst *inst);
|
||||
static void VisitAnyTypeCheck(GraphVisitor *v, Inst *inst);
|
||||
|
||||
#include "optimizer/ir/visitor.inc"
|
||||
private:
|
||||
void MarkedPhiRec(PhiInst *phi, AnyBaseType type);
|
||||
Marker marker_ {UNDEF_MARKER};
|
||||
};
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_OPTIMIZER_ANALYSIS_TYPES_ANALISYS_H
|
@ -1,213 +0,0 @@
|
||||
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import("//arkcompiler/runtime_core/ark_config.gni")
|
||||
|
||||
config("arkencoder_config") {
|
||||
include_dirs = [ "$ark_root/compiler/optimizer/code_generator" ]
|
||||
cflags_cc = [ "-DPANDA_BUILD" ]
|
||||
if (ark_standalone_build) {
|
||||
ldflags = [ "-lrt" ]
|
||||
}
|
||||
}
|
||||
|
||||
ohos_shared_library("libarkencoder") {
|
||||
sources = [
|
||||
"target/asm_printer.cpp",
|
||||
"target/target.cpp",
|
||||
]
|
||||
deps = []
|
||||
|
||||
if (current_cpu == "x86") {
|
||||
sources += [
|
||||
"target/x86/callconv.cpp",
|
||||
"target/x86/encode.cpp",
|
||||
"target/x86/regfile.cpp",
|
||||
]
|
||||
include_dirs = [ "$ark_root/compiler/optimizer/code_generator/target/x86" ]
|
||||
} else if (current_cpu == "amd64" || current_cpu == "x64" ||
|
||||
current_cpu == "x86_64") {
|
||||
if (!is_mingw) {
|
||||
sources += [
|
||||
"target/aarch64/callconv.cpp",
|
||||
"target/aarch64/encode.cpp",
|
||||
"target/aarch64/regfile.cpp",
|
||||
]
|
||||
cflags_cc = [
|
||||
"-DUSE_VIXL_ARM64",
|
||||
"-DVIXL_CODE_BUFFER_MMAP",
|
||||
]
|
||||
|
||||
include_dirs =
|
||||
[ "$ark_root/compiler/optimizer/code_generator/target/aarch64" ]
|
||||
|
||||
if (ark_enable_compiler_x64) {
|
||||
sources += [
|
||||
"target/amd64/callconv.cpp",
|
||||
"target/amd64/encode.cpp",
|
||||
"target/amd64/regfile.cpp",
|
||||
]
|
||||
include_dirs += [
|
||||
"$ark_third_party_root/asmjit/src",
|
||||
"$ark_third_party_root/zydis/include",
|
||||
"$ark_third_party_root/zydis/zycore/include",
|
||||
]
|
||||
deps += [
|
||||
"$ark_third_party_root/asmjit:libasmjit_frontend_static",
|
||||
"$ark_third_party_root/zydis:libZydis",
|
||||
]
|
||||
}
|
||||
}
|
||||
} else if (current_cpu == "arm") {
|
||||
sources += [
|
||||
"target/aarch32/callconv.cpp",
|
||||
"target/aarch32/encode.cpp",
|
||||
"target/aarch32/regfile.cpp",
|
||||
]
|
||||
cflags_cc = [
|
||||
"-DUSE_VIXL_ARM32",
|
||||
"-DVIXL_CODE_BUFFER_MMAP",
|
||||
]
|
||||
include_dirs =
|
||||
[ "$ark_root/compiler/optimizer/code_generator/target/aarch32" ]
|
||||
} else if (current_cpu == "arm64") {
|
||||
sources += [
|
||||
"target/aarch64/callconv.cpp",
|
||||
"target/aarch64/encode.cpp",
|
||||
"target/aarch64/regfile.cpp",
|
||||
]
|
||||
cflags_cc = [
|
||||
"-DUSE_VIXL_ARM64",
|
||||
"-DVIXL_CODE_BUFFER_MMAP",
|
||||
]
|
||||
include_dirs =
|
||||
[ "$ark_root/compiler/optimizer/code_generator/target/aarch64" ]
|
||||
}
|
||||
|
||||
configs = [
|
||||
"$ark_root:ark_config",
|
||||
"$ark_root/libpandabase:arkbase_public_config",
|
||||
"$ark_root/compiler:arkcompiler_public_config",
|
||||
":arkencoder_config",
|
||||
]
|
||||
|
||||
deps += [
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_can_encode_builtin_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_generate_operations_intrinsic_graph_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_generate_operations_intrinsic_inst_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_get_intrinsics_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_get_intrinsics_names_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_intrinsic_codegen_test_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_intrinsics_enum_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_intrinsics_ir_build_inl",
|
||||
"$ark_root/libpandabase:libarkbase",
|
||||
]
|
||||
|
||||
output_extension = "so"
|
||||
relative_install_dir = "ark"
|
||||
part_name = "runtime_core"
|
||||
subsystem_name = "arkcompiler"
|
||||
}
|
||||
|
||||
ohos_static_library("libarkencoder_frontend_static") {
|
||||
sources = [
|
||||
"target/asm_printer.cpp",
|
||||
"target/target.cpp",
|
||||
]
|
||||
deps = []
|
||||
|
||||
if (current_cpu == "x86") {
|
||||
sources += [
|
||||
"target/x86/callconv.cpp",
|
||||
"target/x86/encode.cpp",
|
||||
"target/x86/regfile.cpp",
|
||||
]
|
||||
include_dirs = [ "$ark_root/compiler/optimizer/code_generator/target/x86" ]
|
||||
} else if (current_cpu == "amd64" || current_cpu == "x64" ||
|
||||
current_cpu == "x86_64") {
|
||||
if (!is_mingw) {
|
||||
sources += [
|
||||
"target/aarch64/callconv.cpp",
|
||||
"target/aarch64/encode.cpp",
|
||||
"target/aarch64/regfile.cpp",
|
||||
]
|
||||
cflags_cc = [
|
||||
"-DUSE_VIXL_ARM64",
|
||||
"-DVIXL_CODE_BUFFER_MMAP",
|
||||
]
|
||||
include_dirs =
|
||||
[ "$ark_root/compiler/optimizer/code_generator/target/aarch64" ]
|
||||
|
||||
if (ark_enable_compiler_x64) {
|
||||
sources += [
|
||||
"target/amd64/callconv.cpp",
|
||||
"target/amd64/encode.cpp",
|
||||
"target/amd64/regfile.cpp",
|
||||
]
|
||||
include_dirs += [
|
||||
"$ark_third_party_root/asmjit/src",
|
||||
"$ark_third_party_root/zydis/include",
|
||||
"$ark_third_party_root/zydis/zycore/include",
|
||||
]
|
||||
deps += [
|
||||
"$ark_third_party_root/asmjit:libasmjit_frontend_static",
|
||||
"$ark_third_party_root/zydis:libZydis",
|
||||
]
|
||||
}
|
||||
}
|
||||
} else if (current_cpu == "arm") {
|
||||
sources += [
|
||||
"target/aarch32/callconv.cpp",
|
||||
"target/aarch32/encode.cpp",
|
||||
"target/aarch32/regfile.cpp",
|
||||
]
|
||||
cflags_cc = [
|
||||
"-DUSE_VIXL_ARM32",
|
||||
"-DVIXL_CODE_BUFFER_MMAP",
|
||||
]
|
||||
include_dirs =
|
||||
[ "$ark_root/compiler/optimizer/code_generator/target/aarch32" ]
|
||||
} else if (current_cpu == "arm64") {
|
||||
sources += [
|
||||
"target/aarch64/callconv.cpp",
|
||||
"target/aarch64/encode.cpp",
|
||||
"target/aarch64/regfile.cpp",
|
||||
]
|
||||
cflags_cc = [
|
||||
"-DUSE_VIXL_ARM64",
|
||||
"-DVIXL_CODE_BUFFER_MMAP",
|
||||
]
|
||||
include_dirs =
|
||||
[ "$ark_root/compiler/optimizer/code_generator/target/aarch64" ]
|
||||
}
|
||||
|
||||
configs = [
|
||||
"$ark_root:ark_config",
|
||||
"$ark_root/libpandabase:arkbase_public_config",
|
||||
"$ark_root/compiler:arkcompiler_public_config",
|
||||
":arkencoder_config",
|
||||
]
|
||||
|
||||
deps += [
|
||||
"$ark_root/compiler:intrinsics_stub_inl_h",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_can_encode_builtin_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_generate_operations_intrinsic_graph_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_generate_operations_intrinsic_inst_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_get_intrinsics_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_get_intrinsics_names_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_intrinsics_enum_inl",
|
||||
"$ark_root/compiler:libarkcompiler_intrinsics_gen_inl_intrinsics_ir_build_inl",
|
||||
"$ark_root/compiler:libarkcompiler_opcodes_h_arch_info_gen_h",
|
||||
"$ark_root/libpandabase:libarkbase_frontend_static",
|
||||
]
|
||||
}
|
@ -1,96 +0,0 @@
|
||||
# Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
project(encoder)
|
||||
|
||||
include(${PANDA_ROOT}/compiler/cmake/target.cmake)
|
||||
|
||||
include_directories(
|
||||
.
|
||||
${PANDA_ROOT}
|
||||
)
|
||||
|
||||
set(GENERATED_DIR ${PANDA_BINARY_ROOT}/compiler/generated)
|
||||
|
||||
function(append_sources dir)
|
||||
list(APPEND ENCODE_SOURCES target/${dir}/encode.cpp)
|
||||
list(APPEND ENCODE_SOURCES target/${dir}/callconv.cpp)
|
||||
list(APPEND ENCODE_SOURCES target/${dir}/regfile.cpp)
|
||||
set(ENCODE_SOURCES ${ENCODE_SOURCES} PARENT_SCOPE)
|
||||
endfunction()
|
||||
|
||||
set(ENCODE_SOURCES
|
||||
target/target.cpp
|
||||
target/asm_printer.cpp
|
||||
)
|
||||
|
||||
if (PANDA_COMPILER_TARGET_X86_64)
|
||||
message(STATUS "Encoder: Build amd64 target")
|
||||
append_sources("amd64")
|
||||
endif()
|
||||
if (PANDA_COMPILER_TARGET_X86)
|
||||
message(STATUS "Encoder: Build x86 target")
|
||||
append_sources("x86")
|
||||
endif()
|
||||
if (PANDA_COMPILER_TARGET_AARCH64)
|
||||
message(STATUS "Encoder: Build aarch64 target")
|
||||
append_sources("aarch64")
|
||||
endif()
|
||||
if (PANDA_COMPILER_TARGET_AARCH32)
|
||||
if (NOT(PANDA_TARGET_ARM32_ABI_SOFT OR PANDA_TARGET_ARM32_ABI_SOFTFP OR PANDA_TARGET_ARM32_ABI_HARD))
|
||||
message(ERROR "Undefined ABI for aarch32 architecture - please set PANDA_TARGET_ARM32_ABI_SOFT or PANDA_TARGET_ARM32_ABI_SOFTFP or PANDA_TARGET_ARM32_ABI_HARD")
|
||||
endif()
|
||||
message(STATUS "Encoder: Build aarch32 target")
|
||||
append_sources("aarch32")
|
||||
endif()
|
||||
|
||||
add_library(arkencoder STATIC ${ENCODE_SOURCES})
|
||||
set_property(TARGET arkencoder PROPERTY POSITION_INDEPENDENT_CODE ON)
|
||||
|
||||
target_compile_options(arkencoder PRIVATE -Wno-shadow)
|
||||
|
||||
target_include_directories(arkencoder INTERFACE .)
|
||||
target_include_directories(arkencoder
|
||||
PRIVATE ${PANDA_ROOT}/compiler
|
||||
)
|
||||
|
||||
target_link_libraries(arkencoder arkbase)
|
||||
|
||||
if (PANDA_COMPILER_TARGET_AARCH64)
|
||||
target_link_libraries(arkencoder vixl)
|
||||
add_dependencies(arkencoder vixl)
|
||||
target_compile_options(arkencoder PUBLIC "-DUSE_VIXL_ARM64" "-DVIXL_CODE_BUFFER_MALLOC")
|
||||
endif()
|
||||
if (PANDA_COMPILER_TARGET_AARCH32)
|
||||
target_link_libraries(arkencoder vixl)
|
||||
add_dependencies(arkencoder vixl)
|
||||
target_compile_options(arkencoder PUBLIC "-DUSE_VIXL_ARM32" "-DVIXL_CODE_BUFFER_MALLOC")
|
||||
endif()
|
||||
if (PANDA_COMPILER_TARGET_X86)
|
||||
target_link_libraries(arkencoder asmjit Zydis)
|
||||
add_dependencies(arkencoder asmjit Zydis)
|
||||
endif()
|
||||
if (PANDA_COMPILER_TARGET_X86_64)
|
||||
target_link_libraries(arkencoder asmjit Zydis)
|
||||
add_dependencies(arkencoder asmjit Zydis)
|
||||
endif()
|
||||
|
||||
panda_add_to_clang_tidy(TARGET arkencoder)
|
||||
panda_add_sanitizers(TARGET arkencoder SANITIZERS ${PANDA_SANITIZERS_LIST})
|
||||
|
||||
add_dependencies(arkencoder intrinsics_gen_compiler)
|
||||
add_dependencies(arkencoder isa_gen_compiler)
|
||||
add_dependencies(arkencoder instructions_gen_compiler)
|
||||
target_include_directories(arkencoder PUBLIC ${GENERATED_DIR})
|
||||
|
||||
add_check_style(.)
|
@ -1,265 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_OPTIMIZER_CODEGEN_CALLCONV_H_
|
||||
#define COMPILER_OPTIMIZER_CODEGEN_CALLCONV_H_
|
||||
/*
|
||||
Codegen Hi-Level calling-convention interface
|
||||
Also contains branches targets(labels)
|
||||
|
||||
Responsible for
|
||||
Branches and jump-encoding
|
||||
Labels (points for jump)
|
||||
Conditional instructions
|
||||
*/
|
||||
|
||||
#include <functional>
|
||||
#include "encode.h"
|
||||
#include "compiler/optimizer/ir/datatype.h"
|
||||
#include "compiler/optimizer/ir/locations.h"
|
||||
#include "compiler/optimizer/code_generator/frame_info.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
class ParameterInfo {
|
||||
public:
|
||||
using SlotID = uint8_t;
|
||||
ParameterInfo() = default;
|
||||
virtual ~ParameterInfo() = default;
|
||||
// Get next native parameter, on condition, what previous list - in vector
|
||||
// Push data in Reg
|
||||
// Return register or stack_slot
|
||||
virtual std::variant<Reg, SlotID> GetNativeParam(const TypeInfo &) = 0;
|
||||
|
||||
virtual Location GetNextLocation([[maybe_unused]] DataType::Type type) = 0;
|
||||
|
||||
void Reset()
|
||||
{
|
||||
current_scalar_number_ = 0;
|
||||
current_vector_number_ = 0;
|
||||
current_stack_offset_ = 0;
|
||||
}
|
||||
|
||||
NO_COPY_SEMANTIC(ParameterInfo);
|
||||
NO_MOVE_SEMANTIC(ParameterInfo);
|
||||
|
||||
protected:
|
||||
uint32_t current_scalar_number_ {0}; // NOLINT(misc-non-private-member-variables-in-classes)
|
||||
uint32_t current_vector_number_ {0}; // NOLINT(misc-non-private-member-variables-in-classes)
|
||||
uint8_t current_stack_offset_ {0}; // NOLINT(misc-non-private-member-variables-in-classes)
|
||||
};
|
||||
|
||||
#ifdef PANDA_COMPILER_CFI
|
||||
struct CfiOffsets {
|
||||
size_t push_fplr {0};
|
||||
size_t set_fp {0};
|
||||
size_t push_callees {0};
|
||||
size_t pop_callees {0};
|
||||
size_t pop_fplr {0};
|
||||
};
|
||||
|
||||
struct CfiInfo {
|
||||
CfiOffsets offsets;
|
||||
RegMask callee_regs;
|
||||
VRegMask callee_vregs;
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define SET_CFI_OFFSET(field, value) GetCfiInfo().offsets.field = value
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define SET_CFI_CALLEE_REGS(value) GetCfiInfo().callee_regs = value
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define SET_CFI_CALLEE_VREGS(value) GetCfiInfo().callee_vregs = value
|
||||
#else
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define SET_CFI_OFFSET(field, value)
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define SET_CFI_CALLEE_REGS(value)
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define SET_CFI_CALLEE_VREGS(value)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Specifies CallingConvention mode.
|
||||
*/
|
||||
class CallConvMode final {
|
||||
public:
|
||||
explicit CallConvMode(uint32_t value) : value_(value) {}
|
||||
|
||||
DEFAULT_COPY_SEMANTIC(CallConvMode);
|
||||
DEFAULT_MOVE_SEMANTIC(CallConvMode);
|
||||
|
||||
~CallConvMode() = default;
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define DECLARE_CALLCONV_MODE(name) \
|
||||
static CallConvMode name(bool set = true) \
|
||||
{ \
|
||||
return CallConvMode(Flag##name ::Encode(set)); \
|
||||
} \
|
||||
void Set##name(bool v) \
|
||||
{ \
|
||||
Flag##name ::Set(v, &value_); \
|
||||
} \
|
||||
bool Is##name() const \
|
||||
{ \
|
||||
return Flag##name ::Get(value_); \
|
||||
}
|
||||
|
||||
// Panda ABI convention (native - otherwise)
|
||||
DECLARE_CALLCONV_MODE(Panda);
|
||||
// Compile for osr (jit - otherwise)
|
||||
DECLARE_CALLCONV_MODE(Osr);
|
||||
// The method from dynamic language
|
||||
DECLARE_CALLCONV_MODE(Dyn);
|
||||
|
||||
#undef DECLARE_CALLCONV_MODE
|
||||
|
||||
private:
|
||||
using FlagPanda = BitField<bool, 0, 1>;
|
||||
using FlagOsr = FlagPanda::NextFlag;
|
||||
using FlagDyn = FlagOsr::NextFlag;
|
||||
|
||||
uint32_t value_ {0};
|
||||
|
||||
friend CallConvMode operator|(CallConvMode a, CallConvMode b);
|
||||
};
|
||||
|
||||
inline CallConvMode operator|(CallConvMode a, CallConvMode b)
|
||||
{
|
||||
return CallConvMode(a.value_ | b.value_);
|
||||
}
|
||||
|
||||
/**
|
||||
* CallConv - just holds information about calling convention in current architecture.
|
||||
*/
|
||||
class CallingConvention {
|
||||
public:
|
||||
virtual ~CallingConvention() = default;
|
||||
|
||||
// All possible reasons for call and return
|
||||
enum Reason {
|
||||
// Reason for save/restore registers
|
||||
FUNCTION, // Function inside programm
|
||||
NATIVE, // native function
|
||||
PROGRAMM // Enter/exit from programm (UNSUPPORTED)
|
||||
};
|
||||
|
||||
// Implemented in target.cpp
|
||||
static CallingConvention *Create(ArenaAllocator *arena_allocator, Encoder *enc, RegistersDescription *descr,
|
||||
Arch arch, bool is_panda_abi = false, bool is_osr = false, bool is_dyn = false,
|
||||
bool print_asm = false);
|
||||
|
||||
public:
|
||||
CallingConvention(ArenaAllocator *allocator, Encoder *enc, RegistersDescription *descr, CallConvMode mode)
|
||||
: allocator_(allocator), encoder_(enc), regfile_(descr), mode_(mode)
|
||||
{
|
||||
}
|
||||
|
||||
ArenaAllocator *GetAllocator() const
|
||||
{
|
||||
return allocator_;
|
||||
}
|
||||
|
||||
Encoder *GetEncoder() const
|
||||
{
|
||||
return encoder_;
|
||||
}
|
||||
|
||||
void SetEncoder(Encoder *enc)
|
||||
{
|
||||
encoder_ = enc;
|
||||
}
|
||||
|
||||
RegistersDescription *GetRegfile() const
|
||||
{
|
||||
return regfile_;
|
||||
}
|
||||
|
||||
virtual bool IsValid() const
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
CallConvMode GetMode() const
|
||||
{
|
||||
return mode_;
|
||||
}
|
||||
|
||||
bool IsPandaMode() const
|
||||
{
|
||||
return mode_.IsPanda();
|
||||
}
|
||||
|
||||
bool IsOsrMode() const
|
||||
{
|
||||
return mode_.IsOsr();
|
||||
}
|
||||
|
||||
bool IsDynMode() const
|
||||
{
|
||||
return mode_.IsDyn();
|
||||
}
|
||||
|
||||
#ifdef PANDA_COMPILER_CFI
|
||||
CfiInfo &GetCfiInfo()
|
||||
{
|
||||
return cfi_info_;
|
||||
}
|
||||
|
||||
const CfiInfo &GetCfiInfo() const
|
||||
{
|
||||
return cfi_info_;
|
||||
}
|
||||
static constexpr bool ProvideCFI()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#else
|
||||
static constexpr bool ProvideCFI()
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
// Prologue/Epilogue interfaces
|
||||
virtual void GeneratePrologue(const FrameInfo &frame_info) = 0;
|
||||
virtual void GenerateEpilogue(const FrameInfo &frame_info, std::function<void()> post_job) = 0;
|
||||
|
||||
virtual void GenerateNativePrologue(const FrameInfo &frame_info) = 0;
|
||||
virtual void GenerateNativeEpilogue(const FrameInfo &frame_info, std::function<void()> post_job) = 0;
|
||||
|
||||
// Code generation completion interfaces
|
||||
virtual void *GetCodeEntry() = 0;
|
||||
virtual uint32_t GetCodeSize() = 0;
|
||||
|
||||
// Calculating information about parameters and save regs_offset registers for special needs
|
||||
virtual ParameterInfo *GetParameterInfo(uint8_t regs_offset) = 0;
|
||||
|
||||
NO_COPY_SEMANTIC(CallingConvention);
|
||||
NO_MOVE_SEMANTIC(CallingConvention);
|
||||
|
||||
private:
|
||||
// Must not use ExecModel!
|
||||
ArenaAllocator *allocator_ {nullptr};
|
||||
Encoder *encoder_ {nullptr};
|
||||
RegistersDescription *regfile_ {nullptr};
|
||||
#ifdef PANDA_COMPILER_CFI
|
||||
CfiInfo cfi_info_;
|
||||
#endif
|
||||
CallConvMode mode_ {0};
|
||||
};
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_OPTIMIZER_CODEGEN_CALLCONV_H_
|
File diff suppressed because it is too large
Load Diff
@ -1,879 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_OPTIMIZER_CODEGEN_CODEGEN_H_
|
||||
#define COMPILER_OPTIMIZER_CODEGEN_CODEGEN_H_
|
||||
|
||||
/*
|
||||
Codegen interface for compiler
|
||||
! Do not use this file in runtime
|
||||
*/
|
||||
|
||||
#include "code_info/code_info_builder.h"
|
||||
#include "compiler_logger.h"
|
||||
#include "disassembly.h"
|
||||
#include "frame_info.h"
|
||||
#include "optimizer/analysis/live_registers.h"
|
||||
#include "optimizer/code_generator/callconv.h"
|
||||
#include "optimizer/code_generator/encode.h"
|
||||
#include "optimizer/code_generator/registers_description.h"
|
||||
#include "optimizer/code_generator/slow_path.h"
|
||||
#include "optimizer/code_generator/spill_fill_encoder.h"
|
||||
#include "optimizer/code_generator/target_info.h"
|
||||
#include "optimizer/ir/analysis.h"
|
||||
#include "optimizer/ir/graph.h"
|
||||
#include "optimizer/ir/graph_visitor.h"
|
||||
#include "optimizer/optimizations/regalloc/spill_fills_resolver.h"
|
||||
#include "optimizer/pass_manager.h"
|
||||
#include "utils/cframe_layout.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
// Maximum size in bytes
|
||||
constexpr size_t INST_IN_SLOW_PATH = 64;
|
||||
|
||||
class Encoder;
|
||||
class CodeBuilder;
|
||||
class OsrEntryStub;
|
||||
|
||||
class Codegen : public Optimization {
|
||||
using EntrypointId = RuntimeInterface::EntrypointId;
|
||||
|
||||
public:
|
||||
explicit Codegen(Graph *graph);
|
||||
NO_MOVE_SEMANTIC(Codegen);
|
||||
NO_COPY_SEMANTIC(Codegen);
|
||||
|
||||
~Codegen() override = default;
|
||||
|
||||
bool RunImpl() override;
|
||||
const char *GetPassName() const override;
|
||||
bool AbortIfFailed() const override;
|
||||
|
||||
static bool Run(Graph *graph);
|
||||
|
||||
ArenaAllocator *GetAllocator() const
|
||||
{
|
||||
return allocator_;
|
||||
}
|
||||
ArenaAllocator *GetLocalAllocator() const
|
||||
{
|
||||
return local_allocator_;
|
||||
}
|
||||
FrameInfo *GetFrameInfo() const
|
||||
{
|
||||
return frame_info_;
|
||||
}
|
||||
void SetFrameInfo(FrameInfo *frame_info)
|
||||
{
|
||||
frame_info_ = frame_info;
|
||||
}
|
||||
virtual void CreateFrameInfo();
|
||||
|
||||
RuntimeInterface *GetRuntime() const
|
||||
{
|
||||
return runtime_;
|
||||
}
|
||||
RegistersDescription *GetRegfile() const
|
||||
{
|
||||
return regfile_;
|
||||
}
|
||||
Encoder *GetEncoder() const
|
||||
{
|
||||
return enc_;
|
||||
}
|
||||
CallingConvention *GetCallingConvention() const
|
||||
{
|
||||
return callconv_;
|
||||
}
|
||||
|
||||
GraphVisitor *GetGraphVisitor() const
|
||||
{
|
||||
return visitor_;
|
||||
}
|
||||
|
||||
LabelHolder::LabelId GetLabelEntry() const
|
||||
{
|
||||
return label_entry_;
|
||||
}
|
||||
|
||||
LabelHolder::LabelId GetLabelExit() const
|
||||
{
|
||||
return label_exit_;
|
||||
}
|
||||
|
||||
RuntimeInterface::MethodId GetMethodId()
|
||||
{
|
||||
return method_id_;
|
||||
}
|
||||
|
||||
void SetStartCodeOffset(size_t offset)
|
||||
{
|
||||
start_code_offset_ = offset;
|
||||
}
|
||||
|
||||
size_t GetStartCodeOffset() const
|
||||
{
|
||||
return start_code_offset_;
|
||||
}
|
||||
|
||||
size_t GetLanguageExtensionOffsetFromSpInBytes();
|
||||
|
||||
void Convert(ArenaVector<Reg> *regs_usage, const ArenaVector<bool> *mask, TypeInfo type_info);
|
||||
|
||||
Reg ConvertRegister(Register ref, DataType::Type type = DataType::Type::INT64);
|
||||
|
||||
Imm ConvertImm(uint64_t imm, DataType::Type type);
|
||||
|
||||
Imm ConvertImmWithExtend(uint64_t imm, DataType::Type type);
|
||||
|
||||
Imm ConvertImm(ConstantInst *const_inst, DataType::Type type);
|
||||
|
||||
Condition ConvertCc(ConditionCode cc);
|
||||
Condition ConvertCcOverflow(ConditionCode cc);
|
||||
|
||||
static inline TypeInfo ConvertDataType(DataType::Type type, Arch arch)
|
||||
{
|
||||
return TypeInfo::FromDataType(type, arch);
|
||||
}
|
||||
|
||||
Arch GetArch() const
|
||||
{
|
||||
return GetTarget().GetArch();
|
||||
}
|
||||
|
||||
Target GetTarget() const
|
||||
{
|
||||
return target_;
|
||||
}
|
||||
|
||||
TypeInfo GetPtrRegType() const
|
||||
{
|
||||
return target_.GetPtrRegType();
|
||||
}
|
||||
|
||||
CodeInfoBuilder *GetCodeBuilder() const
|
||||
{
|
||||
return code_builder_;
|
||||
}
|
||||
|
||||
void CreateStackMap(Inst *inst, Inst *user = nullptr);
|
||||
|
||||
void CreateStackMapRec(SaveStateInst *save_state, bool require_vreg_map, Inst *target_site);
|
||||
void CreateVRegMap(SaveStateInst *save_state, size_t vregs_count, Inst *target_site);
|
||||
void CreateVreg(const Location &location, Inst *inst, const VirtualRegister &vreg);
|
||||
void FillVregIndices(SaveStateInst *save_state);
|
||||
|
||||
void CreateOsrEntry(SaveStateInst *save_state);
|
||||
|
||||
void CreateVRegForRegister(const Location &location, Inst *inst, const VirtualRegister &vreg);
|
||||
|
||||
/**
|
||||
* 'live_inputs' shows that inst's source registers should be added the the mask
|
||||
*/
|
||||
template <bool live_inputs = false>
|
||||
std::pair<RegMask, VRegMask> GetLiveRegisters(Inst *inst)
|
||||
{
|
||||
RegMask live_regs;
|
||||
VRegMask live_fp_regs;
|
||||
if (!options.IsCompilerSaveOnlyLiveRegisters() || inst == nullptr) {
|
||||
live_regs.set();
|
||||
live_fp_regs.set();
|
||||
return {live_regs, live_fp_regs};
|
||||
}
|
||||
// Run LiveRegisters pass only if it is actually required
|
||||
if (!GetGraph()->IsAnalysisValid<LiveRegisters>()) {
|
||||
GetGraph()->RunPass<LiveRegisters>();
|
||||
}
|
||||
|
||||
// Add registers from intervals that are live at inst's definition
|
||||
auto &lr = GetGraph()->GetAnalysis<LiveRegisters>();
|
||||
lr.VisitIntervalsWithLiveRegisters<live_inputs>(inst, [&live_regs, &live_fp_regs, this](const auto &li) {
|
||||
auto reg = ConvertRegister(li->GetReg(), li->GetType());
|
||||
GetEncoder()->SetRegister(&live_regs, &live_fp_regs, reg);
|
||||
});
|
||||
|
||||
// Add live temp registers
|
||||
live_regs |= GetEncoder()->GetLiveTmpRegMask();
|
||||
live_fp_regs |= GetEncoder()->GetLiveTmpFpRegMask();
|
||||
|
||||
return {live_regs, live_fp_regs};
|
||||
}
|
||||
|
||||
// Limits live register set to a number of registers used to pass parameters to the runtime call:
|
||||
// 1) these ones are saved/restored by caller
|
||||
// 2) the remaining ones are saved/restored by the bridge function (aarch only)
|
||||
void FillOnlyParameters(RegMask *live_regs, uint32_t num_params) const;
|
||||
|
||||
template <typename T, typename... Args>
|
||||
T *CreateSlowPath(Inst *inst, Args &&... args)
|
||||
{
|
||||
static_assert(std::is_base_of_v<SlowPathBase, T>);
|
||||
auto label = GetEncoder()->CreateLabel();
|
||||
auto slow_path = GetLocalAllocator()->New<T>(label, inst, std::forward<Args>(args)...);
|
||||
slow_paths_.push_back(slow_path);
|
||||
return slow_path;
|
||||
}
|
||||
|
||||
void EmitSlowPaths();
|
||||
|
||||
void InsertTrace(std::initializer_list<std::variant<Reg, Imm>> params);
|
||||
|
||||
void CallIntrinsic(Inst *inst, RuntimeInterface::IntrinsicId id);
|
||||
|
||||
// The function is used for calling runtime functions through special bridges.
|
||||
// !NOTE Don't use the function for calling runtime without bridges(it save only parameters on stack)
|
||||
void CallRuntime(Inst *inst, EntrypointId id, Reg dst_reg, std::initializer_list<std::variant<Reg, Imm>> params,
|
||||
RegMask preserved_regs = {});
|
||||
|
||||
template <typename... Args>
|
||||
void CallRuntimeWithMethod(Inst *inst, void *method, EntrypointId eid, Reg dst_reg, Args &&... params)
|
||||
{
|
||||
if (GetGraph()->IsAotMode()) {
|
||||
ScopedTmpReg method_reg(GetEncoder());
|
||||
LoadMethod(method_reg);
|
||||
CallRuntime(inst, eid, dst_reg, {method_reg, params...});
|
||||
} else {
|
||||
if (Is64BitsArch(GetArch())) {
|
||||
CallRuntime(inst, eid, dst_reg, {Imm(reinterpret_cast<uint64_t>(method)), params...});
|
||||
} else {
|
||||
// uintptr_t causes problems on host cross-jit compilation
|
||||
CallRuntime(inst, eid, dst_reg, {Imm(down_cast<uint32_t>(method)), params...});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SaveRegistersForImplicitRuntime(Inst *inst, RegMask *params_mask, RegMask *mask);
|
||||
|
||||
void VisitNewArray(Inst *inst);
|
||||
|
||||
void LoadClassFromObject(Reg class_reg, Reg obj_reg);
|
||||
void CreateCall(CallInst *call_inst);
|
||||
void VisitCallIndirect(CallIndirectInst *inst);
|
||||
void VisitCall(CallInst *inst);
|
||||
void CreateUnresolvedVirtualMethodLoad(CallInst *vcall, Reg method);
|
||||
void CreateVirtualCall(CallInst *call_inst);
|
||||
void CreateDynamicCall(CallInst *call_inst);
|
||||
void CreateCallIntrinsic(IntrinsicInst *inst);
|
||||
void CreateMultiArrayCall(CallInst *call_inst);
|
||||
void CreateNewObjCall(NewObjectInst *new_obj);
|
||||
void CreateNewObjCallOld(NewObjectInst *new_obj);
|
||||
void CreateMonitorCall(MonitorInst *inst);
|
||||
void CreateMonitorCallOld(MonitorInst *inst);
|
||||
void CreateCheckCastInterfaceCall(Inst *inst);
|
||||
void CreateNonDefaultInitClass(ClassInst *init_inst);
|
||||
void CreatePreWRB(Inst *inst, MemRef mem, bool store_pair = false);
|
||||
void CreatePostWRB(Inst *inst, MemRef mem, Reg reg1, Reg reg2 = INVALID_REGISTER);
|
||||
void EncodePostWRB(Inst *inst, MemRef mem, Reg reg1, Reg reg2, bool check_nullptr = true);
|
||||
void CreatePostInterRegionBarrier(Inst *inst, MemRef mem, Reg reg1, Reg reg2, bool check_nullptr);
|
||||
void CreatePostInterGenerationalBarrier(MemRef mem);
|
||||
void CallBarrier(RegMask live_regs, VRegMask live_vregs, EntrypointId id,
|
||||
const std::initializer_list<std::variant<Reg, Imm>> ¶ms);
|
||||
void CreateLoadClassFromPLT(Inst *inst, Reg tmp_reg, Reg dst, size_t class_id);
|
||||
void CreateJumpToClassResolverPltShared(Inst *inst, Reg tmp_reg, RuntimeInterface::EntrypointId id);
|
||||
void CreateLoadTLABInformation(Reg reg_tlab_start, Reg reg_tlab_size);
|
||||
void CreateCheckForTLABWithConstSize(Inst *inst, Reg reg_tlab_start, Reg reg_tlab_size, size_t size,
|
||||
LabelHolder::LabelId label);
|
||||
void CreateDebugRuntimeCallsForNewObject(Inst *inst, Reg reg_tlab_start, size_t alloc_size, RegMask preserved);
|
||||
void CreateDebugRuntimeCallsForCreateString(Inst *inst, Reg dst);
|
||||
void CreateReturn(const Inst *inst);
|
||||
|
||||
// The function alignment up the value from alignment_reg using tmp_reg.
|
||||
void CreateAlignmentValue(Reg alignment_reg, Reg tmp_reg, size_t alignment);
|
||||
void TryInsertImplicitNullCheck(Inst *inst, size_t prevOffset);
|
||||
|
||||
const CFrameLayout &GetFrameLayout() const
|
||||
{
|
||||
return frame_layout_;
|
||||
}
|
||||
|
||||
bool RegisterKeepCallArgument(CallInst *call_inst, Reg reg);
|
||||
|
||||
void LoadMethod(Reg dst);
|
||||
void LoadFreeSlot(Reg dst);
|
||||
void StoreFreeSlot(Reg src);
|
||||
|
||||
ssize_t GetStackOffset(Location location)
|
||||
{
|
||||
if (location.GetKind() == LocationType::STACK_ARGUMENT) {
|
||||
return location.GetValue() * GetFrameLayout().GetSlotSize();
|
||||
}
|
||||
|
||||
if (location.GetKind() == LocationType::STACK_PARAMETER) {
|
||||
return GetFrameLayout().GetFrameSize<CFrameLayout::BYTES>() +
|
||||
(location.GetValue() * GetFrameLayout().GetSlotSize());
|
||||
}
|
||||
|
||||
ASSERT(location.GetKind() == LocationType::STACK);
|
||||
return GetFrameLayout().GetSpillOffsetFromSpInBytes(location.GetValue());
|
||||
}
|
||||
|
||||
MemRef GetMemRefForSlot(Location location)
|
||||
{
|
||||
ASSERT(location.IsAnyStack());
|
||||
return MemRef(SpReg(), GetStackOffset(location));
|
||||
}
|
||||
|
||||
Reg SpReg() const
|
||||
{
|
||||
return GetTarget().GetStackReg();
|
||||
}
|
||||
|
||||
Reg FpReg() const
|
||||
{
|
||||
return GetTarget().GetFrameReg();
|
||||
}
|
||||
|
||||
bool HasLiveCallerSavedRegs(Inst *inst);
|
||||
void SaveCallerRegisters(RegMask live_regs, VRegMask live_vregs, bool adjust_regs);
|
||||
void LoadCallerRegisters(RegMask live_regs, VRegMask live_vregs, bool adjust_regs);
|
||||
|
||||
// Initialization internal variables
|
||||
void Initialize();
|
||||
|
||||
const Disassembly *GetDisasm() const
|
||||
{
|
||||
return &disasm_;
|
||||
}
|
||||
|
||||
Disassembly *GetDisasm()
|
||||
{
|
||||
return &disasm_;
|
||||
}
|
||||
|
||||
void AddLiveOut(const BasicBlock *bb, const Register reg)
|
||||
{
|
||||
live_outs_[bb].Set(reg);
|
||||
}
|
||||
|
||||
RegMask GetLiveOut(const BasicBlock *bb) const
|
||||
{
|
||||
auto it = live_outs_.find(bb);
|
||||
return it != live_outs_.end() ? it->second : RegMask();
|
||||
}
|
||||
|
||||
Reg ThreadReg() const
|
||||
{
|
||||
return Reg(GetThreadReg(GetArch()), GetTarget().GetPtrRegType());
|
||||
}
|
||||
|
||||
static bool InstEncodedWithLibCall(const Inst *inst, Arch arch);
|
||||
|
||||
protected:
|
||||
virtual void GeneratePrologue();
|
||||
virtual void GenerateEpilogue();
|
||||
|
||||
// Main logic steps
|
||||
bool BeginMethod();
|
||||
bool VisitGraph();
|
||||
void EndMethod();
|
||||
bool CopyToCodeCache();
|
||||
void DumpCode();
|
||||
|
||||
RegMask GetUsedRegs() const
|
||||
{
|
||||
return used_regs_;
|
||||
}
|
||||
RegMask GetUsedVRegs() const
|
||||
{
|
||||
return used_vregs_;
|
||||
}
|
||||
|
||||
void FillCallParams(const std::initializer_list<std::variant<Reg, Imm>> ¶ms);
|
||||
|
||||
void EmitJump(const BasicBlock *bb);
|
||||
|
||||
bool EmitCallRuntimeCode(Inst *inst, EntrypointId id);
|
||||
|
||||
void PrepareAndEmitCallVirtual(CallInst *call_inst);
|
||||
|
||||
void IntfInlineCachePass(CallInst *call_inst, Reg method_reg, Reg tmp_reg, Reg obj_reg);
|
||||
|
||||
void EmitCallVirtual(Reg method_reg);
|
||||
|
||||
void PrepareCallVirtualAot(CallInst *call_inst, Reg method_reg);
|
||||
void PrepareCallVirtual(CallInst *call_inst, Reg method_reg);
|
||||
|
||||
uint32_t GetVtableShift();
|
||||
|
||||
void CalculateCardIndex(MemRef mem, ScopedTmpReg *tmp, ScopedTmpReg *tmp1);
|
||||
|
||||
void EmitGetUnresolvedCalleeMethod(CallInst *call_inst);
|
||||
|
||||
void EmitCreateCallCode(CallInst *call_inst);
|
||||
|
||||
void EmitEpilogueForCreateCall(CallInst *call_inst);
|
||||
|
||||
void CreateBuiltinIntrinsic(IntrinsicInst *inst);
|
||||
static constexpr int32_t NUM_OF_SRC_BUILTIN = 6;
|
||||
static constexpr uint8_t FIRST_OPERAND = 0;
|
||||
static constexpr uint8_t SECOND_OPERAND = 1;
|
||||
static constexpr uint8_t THIRD_OPERAND = 2;
|
||||
static constexpr uint8_t FOURTH_OPERAND = 3;
|
||||
static constexpr uint8_t FIFTH_OPERAND = 4;
|
||||
using SRCREGS = std::array<Reg, NUM_OF_SRC_BUILTIN>;
|
||||
// implementation is generated with compiler/optimizer/templates/intrinsics/intrinsics_codegen.inl.erb
|
||||
void FillBuiltin(IntrinsicInst *inst, SRCREGS src, Reg dst, RegMask *mask);
|
||||
static Reg AcquireNonLiveReg(RegMask *mask);
|
||||
|
||||
void AddParamRegsInLiveMasks(RegMask *live_regs, VRegMask *live_vregs,
|
||||
const std::initializer_list<std::variant<Reg, Imm>> ¶ms);
|
||||
|
||||
void CreateStubCall(Inst *inst, RuntimeInterface::IntrinsicId intrinsicId, Reg dst,
|
||||
const std::initializer_list<std::variant<Reg, Imm>> ¶ms);
|
||||
|
||||
ScopedTmpReg CalculatePreviousTLABAllocSize(Reg reg, LabelHolder::LabelId label);
|
||||
friend class IntrinsicCodegenTest;
|
||||
|
||||
virtual void IntrinsicSlowPathEntry([[maybe_unused]] IntrinsicInst *inst)
|
||||
{
|
||||
GetEncoder()->SetFalseResult();
|
||||
}
|
||||
virtual void IntrinsicCallRuntimeSaveAll([[maybe_unused]] IntrinsicInst *inst)
|
||||
{
|
||||
GetEncoder()->SetFalseResult();
|
||||
}
|
||||
virtual void IntrinsicSaveRegisters([[maybe_unused]] IntrinsicInst *inst)
|
||||
{
|
||||
GetEncoder()->SetFalseResult();
|
||||
}
|
||||
virtual void IntrinsicRestoreRegisters([[maybe_unused]] IntrinsicInst *inst)
|
||||
{
|
||||
GetEncoder()->SetFalseResult();
|
||||
}
|
||||
virtual void IntrinsicTailCall([[maybe_unused]] IntrinsicInst *inst)
|
||||
{
|
||||
GetEncoder()->SetFalseResult();
|
||||
}
|
||||
|
||||
#include "codegen_language_extensions.h"
|
||||
#include "intrinsics_codegen.inl.h"
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
void EncodeImms(const T &imms)
|
||||
{
|
||||
auto param_info = GetCallingConvention()->GetParameterInfo(0);
|
||||
auto imm_type = DataType::INT32;
|
||||
for (auto imm : imms) {
|
||||
auto location = param_info->GetNextLocation(imm_type);
|
||||
ASSERT(location.IsFixedRegister());
|
||||
auto dst_reg = ConvertRegister(location.GetValue(), imm_type);
|
||||
GetEncoder()->EncodeMov(dst_reg, Imm(imm));
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
ArenaAllocator *allocator_;
|
||||
ArenaAllocator *local_allocator_;
|
||||
// Register description
|
||||
RegistersDescription *regfile_;
|
||||
// Encoder implementation
|
||||
Encoder *enc_;
|
||||
// Target architecture calling convention model
|
||||
CallingConvention *callconv_;
|
||||
// Current execution model implementation
|
||||
// Visitor for instructions
|
||||
GraphVisitor *visitor_ {};
|
||||
|
||||
CodeInfoBuilder *code_builder_ {nullptr};
|
||||
|
||||
ArenaVector<SlowPathBase *> slow_paths_;
|
||||
ArenaUnorderedMap<RuntimeInterface::EntrypointId, SlowPathShared *> slow_paths_map_;
|
||||
|
||||
const CFrameLayout frame_layout_; // NOLINT(readability-identifier-naming)
|
||||
|
||||
ArenaVector<OsrEntryStub *> osr_entries_;
|
||||
|
||||
RuntimeInterface::MethodId method_id_ {INVALID_ID};
|
||||
|
||||
size_t start_code_offset_ {0};
|
||||
|
||||
ArenaVector<std::pair<int16_t, int16_t>> vreg_indices_;
|
||||
|
||||
RuntimeInterface *runtime_ {nullptr};
|
||||
|
||||
LabelHolder::LabelId label_entry_ {};
|
||||
LabelHolder::LabelId label_exit_ {};
|
||||
|
||||
FrameInfo *frame_info_ {nullptr};
|
||||
|
||||
const Target target_;
|
||||
|
||||
/* Registers that have been allocated by regalloc */
|
||||
RegMask used_regs_ {0};
|
||||
RegMask used_vregs_ {0};
|
||||
|
||||
/* Map of BasicBlock to live-out regsiters mask. It is needed in epilogue encoding to avoid overwriting of the
|
||||
* live-out registers */
|
||||
ArenaUnorderedMap<const BasicBlock *, RegMask> live_outs_;
|
||||
|
||||
Disassembly disasm_;
|
||||
|
||||
SpillFillsResolver spill_fills_resolver_;
|
||||
|
||||
friend class EncodeVisitor;
|
||||
friend class BaselineCodegen;
|
||||
|
||||
void CreateStubCall(RuntimeInterface::IntrinsicId intrinsicId, Reg dst,
|
||||
const std::initializer_list<std::variant<Reg, Imm>> ¶ms);
|
||||
}; // Codegen
|
||||
|
||||
class EncodeVisitor : public GraphVisitor {
|
||||
using EntrypointId = RuntimeInterface::EntrypointId;
|
||||
|
||||
public:
|
||||
explicit EncodeVisitor(Codegen *cg) : cg_(cg), arch_(cg->GetArch()) {}
|
||||
|
||||
EncodeVisitor() = delete;
|
||||
|
||||
const ArenaVector<BasicBlock *> &GetBlocksToVisit() const override
|
||||
{
|
||||
return cg_->GetGraph()->GetBlocksRPO();
|
||||
}
|
||||
Codegen *GetCodegen() const
|
||||
{
|
||||
return cg_;
|
||||
}
|
||||
Encoder *GetEncoder()
|
||||
{
|
||||
return cg_->GetEncoder();
|
||||
}
|
||||
Arch GetArch() const
|
||||
{
|
||||
return arch_;
|
||||
}
|
||||
CallingConvention *GetCallingConvention()
|
||||
{
|
||||
return cg_->GetCallingConvention();
|
||||
}
|
||||
|
||||
RegistersDescription *GetRegfile()
|
||||
{
|
||||
return cg_->GetRegfile();
|
||||
}
|
||||
|
||||
bool GetResult()
|
||||
{
|
||||
return success_ && cg_->GetEncoder()->GetResult();
|
||||
}
|
||||
|
||||
// For each group of SpillFillData representing spill or fill operations and
|
||||
// sharing the same source and destination types order by stack slot number in descending order.
|
||||
static void SortSpillFillData(ArenaVector<SpillFillData> *spill_fills);
|
||||
// Checks if two spill-fill operations could be coalesced into single operation over pair of arguments.
|
||||
static bool CanCombineSpillFills(SpillFillData pred, SpillFillData succ, const CFrameLayout &fl,
|
||||
const Graph *graph);
|
||||
|
||||
protected:
|
||||
// UnaryOperation
|
||||
static void VisitMov(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitNeg(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitAbs(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitNot(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitSqrt(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
// BinaryOperation
|
||||
static void VisitAdd(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitSub(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitMul(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitShl(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitAShr(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitAnd(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitOr(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitXor(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
// Binary Overflow Operation
|
||||
static void VisitAddOverflow(GraphVisitor *v, Inst *inst);
|
||||
static void VisitAddOverflowCheck(GraphVisitor *v, Inst *inst);
|
||||
static void VisitSubOverflow(GraphVisitor *v, Inst *inst);
|
||||
static void VisitSubOverflowCheck(GraphVisitor *v, Inst *inst);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define BinaryImmOperation(opc) static void Visit##opc##I(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define BINARRY_IMM_OPS(DEF) DEF(Add) DEF(Sub) DEF(Shl) DEF(AShr) DEF(And) DEF(Or) DEF(Xor)
|
||||
|
||||
BINARRY_IMM_OPS(BinaryImmOperation)
|
||||
|
||||
#undef BINARRY_IMM_OPS
|
||||
#undef BinaryImmOperation
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define BinarySignUnsignOperation(opc) static void Visit##opc(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define SIGN_UNSIGN_OPS(DEF) DEF(Div) DEF(Mod) DEF(Min) DEF(Max) DEF(Shr)
|
||||
|
||||
SIGN_UNSIGN_OPS(BinarySignUnsignOperation)
|
||||
|
||||
#undef SIGN_UNSIGN_OPS
|
||||
#undef BinarySignUnsignOperation
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define BinaryShiftedRegisterOperationDef(opc, ignored) static void Visit##opc##SR(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
ENCODE_INST_WITH_SHIFTED_OPERAND(BinaryShiftedRegisterOperationDef)
|
||||
|
||||
#undef BinaryShiftedRegisterOperationDef
|
||||
|
||||
static void VisitShrI(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitCast(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitPhi([[maybe_unused]] GraphVisitor *visitor, [[maybe_unused]] Inst *inst);
|
||||
|
||||
static void VisitConstant(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitNullPtr(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
// Next visitors use calling convention
|
||||
static void VisitIndirectJump(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitIf(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitIfImm(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitCompare(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitCmp(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
// All next visitors use execution model for implementation
|
||||
static void VisitReturnVoid(GraphVisitor *visitor, Inst * /* unused */);
|
||||
|
||||
static void VisitReturn(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitReturnI(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitReturnInlined(GraphVisitor *visitor, Inst * /* unused */);
|
||||
|
||||
static void VisitNewArray(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadConstArray(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitFillConstArray(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitParameter(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitStoreArray(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitSpillFill(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitSaveState(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitSaveStateDeoptimize(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitSaveStateOsr(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadArray(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadCompressedStringChar(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLenArray(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitNullCheck(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitBoundsCheck(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitZeroCheck(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitRefTypeCheck(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitNegativeCheck(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadString(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadObject(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitUnresolvedLoadObject(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoad(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitStoreObject(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitUnresolvedStoreObject(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitStore(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadStatic(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitUnresolvedLoadStatic(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitStoreStatic(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitUnresolvedStoreStatic(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitNewObject(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadClass(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadAndInitClass(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitUnresolvedLoadAndInitClass(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitInitClass(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitUnresolvedInitClass(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadType(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitUnresolvedLoadType(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitCheckCast(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitIsInstance(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitMonitor(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitIntrinsic(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitBuiltin(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitBoundsCheckI(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitStoreArrayI(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadArrayI(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadCompressedStringCharI(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadI(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitStoreI(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitMultiArray(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitCallStatic(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitUnresolvedCallStatic(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitCallVirtual(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitUnresolvedCallVirtual(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitCallDynamic(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitSafePoint(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitSelect(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitSelectImm(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadArrayPair(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadArrayPairI(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadPairPart(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitStoreArrayPair(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitStoreArrayPairI(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitLoadExclusive(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitStoreExclusive(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitNOP(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitThrow(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitDeoptimizeIf(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitDeoptimizeCompare(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitDeoptimizeCompareImm(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitDeoptimize(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitIsMustDeoptimize(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitMAdd(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitMSub(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitMNeg(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitOrNot(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitAndNot(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitXorNot(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitNegSR(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void VisitGetInstanceClass(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitGetManagedClassObject(GraphVisitor *visito, Inst *inst);
|
||||
static void VisitClassImmediate(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitRegDef(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitLiveIn(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitLiveOut(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitCallIndirect(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitCall(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
// Dyn inst.
|
||||
static void VisitCompareAnyType(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitCastAnyTypeValue(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitCastValueToAnyType(GraphVisitor *visitor, Inst *inst);
|
||||
static void VisitAnyTypeCheck(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
void VisitDefault([[maybe_unused]] Inst *inst) override
|
||||
{
|
||||
#ifndef NDEBUG
|
||||
COMPILER_LOG(DEBUG, CODEGEN) << "Can't encode instruction " << GetOpcodeString(inst->GetOpcode())
|
||||
<< " with type " << DataType::ToString(inst->GetType());
|
||||
#endif
|
||||
success_ = false;
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
static void FillUnresolvedClass(GraphVisitor *visitor, Inst *inst);
|
||||
static void FillObjectClass(GraphVisitor *visitor, Reg tmp_reg, LabelHolder::LabelId throw_label);
|
||||
static void FillOtherClass(GraphVisitor *visitor, Inst *inst, Reg tmp_reg, LabelHolder::LabelId throw_label);
|
||||
static void FillArrayObjectClass(GraphVisitor *visitor, Reg tmp_reg, LabelHolder::LabelId throw_label);
|
||||
static void FillArrayClass(GraphVisitor *visitor, Inst *inst, Reg tmp_reg, LabelHolder::LabelId throw_label);
|
||||
static void FillInterfaceClass(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void FillLoadClassUnresolved(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void FillCheckCast(GraphVisitor *visitor, Inst *inst, Reg src, LabelHolder::LabelId end_label,
|
||||
compiler::ClassType klass_type);
|
||||
|
||||
static void FillIsInstanceUnresolved(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void FillIsInstanceCaseObject(GraphVisitor *visitor, Inst *inst, Reg tmp_reg);
|
||||
|
||||
static void FillIsInstanceCaseOther(GraphVisitor *visitor, Inst *inst, Reg tmp_reg, LabelHolder::LabelId end_label);
|
||||
|
||||
static void FillIsInstanceCaseArrayObject(GraphVisitor *visitor, Inst *inst, Reg tmp_reg,
|
||||
LabelHolder::LabelId end_label);
|
||||
|
||||
static void FillIsInstanceCaseArrayClass(GraphVisitor *visitor, Inst *inst, Reg tmp_reg,
|
||||
LabelHolder::LabelId end_label);
|
||||
|
||||
static void FillIsInstanceCaseInterface(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
static void FillIsInstance(GraphVisitor *visitor, Inst *inst, Reg tmp_reg, LabelHolder::LabelId end_label);
|
||||
|
||||
#include "optimizer/ir/visitor.inc"
|
||||
|
||||
private:
|
||||
static void VisitDynamicMethodParameter(GraphVisitor *visitor, Inst *inst);
|
||||
static void HandleDynParamPassed(const SpillFillData &sf, EncodeVisitor *enc);
|
||||
static void HandleDynParamNotPassed(const SpillFillData &sf, EncodeVisitor *enc);
|
||||
static void CastToAny(GraphVisitor *visitor, Inst *inst);
|
||||
|
||||
private:
|
||||
Codegen *cg_;
|
||||
Arch arch_;
|
||||
bool success_ {true};
|
||||
}; // EncodeVisitor
|
||||
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_OPTIMIZER_CODEGEN_CODEGEN_H_
|
@ -1,105 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "codegen_native.h"
|
||||
#include "optimizer/code_generator/method_properties.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
void CodegenNative::CreateFrameInfo()
|
||||
{
|
||||
auto &fl = GetFrameLayout();
|
||||
auto frame = GetGraph()->GetLocalAllocator()->New<FrameInfo>(
|
||||
FrameInfo::PositionedCallers::Encode(true) | FrameInfo::PositionedCallees::Encode(true) |
|
||||
FrameInfo::CallersRelativeFp::Encode(false) | FrameInfo::CalleesRelativeFp::Encode(true));
|
||||
frame->SetFrameSize(fl.GetFrameSize<CFrameLayout::BYTES>());
|
||||
frame->SetSpillsCount(fl.GetSpillsCount());
|
||||
|
||||
frame->SetCallersOffset(
|
||||
fl.GetOffset<CFrameLayout::SP, CFrameLayout::SLOTS>(fl.GetStackStartSlot() + fl.GetCallerLastSlot(false)));
|
||||
frame->SetFpCallersOffset(
|
||||
fl.GetOffset<CFrameLayout::SP, CFrameLayout::SLOTS>(fl.GetStackStartSlot() + fl.GetCallerLastSlot(true)));
|
||||
frame->SetCalleesOffset(
|
||||
-fl.GetOffset<CFrameLayout::FP, CFrameLayout::SLOTS>(fl.GetStackStartSlot() + fl.GetCalleeLastSlot(false)));
|
||||
frame->SetFpCalleesOffset(
|
||||
-fl.GetOffset<CFrameLayout::FP, CFrameLayout::SLOTS>(fl.GetStackStartSlot() + fl.GetCalleeLastSlot(true)));
|
||||
|
||||
ASSERT(!GetGraph()->GetMethodProperties().GetRequireFrameSetup());
|
||||
// we don't need to setup frame in native mode
|
||||
frame->SetSetupFrame(false);
|
||||
// we don't need to save FP and LR registers only for leaf methods
|
||||
frame->SetSaveFrameAndLinkRegs(!GetGraph()->GetMethodProperties().IsLeaf());
|
||||
// we never need to save unused registers in native mode
|
||||
frame->SetSaveUnusedCalleeRegs(false);
|
||||
// we have to sub/add SP in prologue/epilogue in the following cases:
|
||||
// - non-leaf method
|
||||
// - leaf method and there are spills or parameters on stack
|
||||
frame->SetAdjustSpReg(!GetGraph()->GetMethodProperties().IsLeaf() || GetGraph()->GetStackSlotsCount() != 0 ||
|
||||
GetGraph()->GetMethodProperties().GetHasParamsOnStack());
|
||||
SetFrameInfo(frame);
|
||||
}
|
||||
|
||||
void CodegenNative::GeneratePrologue()
|
||||
{
|
||||
SCOPED_DISASM_STR(this, "Method Prologue");
|
||||
|
||||
GetCallingConvention()->GenerateNativePrologue(*GetFrameInfo());
|
||||
|
||||
if (GetGraph()->IsDynamicMethod()) {
|
||||
GenerateExtensionsForPrologue();
|
||||
}
|
||||
|
||||
#if defined(EVENT_METHOD_ENTER_ENABLED) && EVENT_METHOD_ENTER_ENABLED != 0
|
||||
if (GetGraph()->IsAotMode()) {
|
||||
SCOPED_DISASM_STR(this, "LoadMethod for trace");
|
||||
ScopedTmpReg method_reg(GetEncoder());
|
||||
LoadMethod(method_reg);
|
||||
InsertTrace({Imm(static_cast<size_t>(TraceId::METHOD_ENTER)), method_reg,
|
||||
Imm(static_cast<size_t>(events::MethodEnterKind::COMPILED))});
|
||||
} else {
|
||||
InsertTrace({Imm(static_cast<size_t>(TraceId::METHOD_ENTER)),
|
||||
Imm(reinterpret_cast<size_t>(GetGraph()->GetMethod())),
|
||||
Imm(static_cast<size_t>(events::MethodEnterKind::COMPILED))});
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void CodegenNative::GenerateEpilogue()
|
||||
{
|
||||
ASSERT(GetGraph()->GetMethodProperties().IsLeaf());
|
||||
SCOPED_DISASM_STR(this, "Method Epilogue");
|
||||
|
||||
if (GetGraph()->IsDynamicMethod()) {
|
||||
GenerateExtensionsForEpilogue();
|
||||
}
|
||||
|
||||
#if defined(EVENT_METHOD_EXIT_ENABLED) && EVENT_METHOD_EXIT_ENABLED != 0
|
||||
GetCallingConvention()->GenerateNativeEpilogue(*GetFrameInfo(), [this]() {
|
||||
if (GetGraph()->IsAotMode()) {
|
||||
ScopedTmpReg method_reg(GetEncoder());
|
||||
LoadMethod(method_reg);
|
||||
InsertTrace({Imm(static_cast<size_t>(TraceId::METHOD_EXIT)), method_reg,
|
||||
Imm(static_cast<size_t>(events::MethodExitKind::COMPILED))});
|
||||
} else {
|
||||
InsertTrace({Imm(static_cast<size_t>(TraceId::METHOD_EXIT)),
|
||||
Imm(reinterpret_cast<size_t>(GetGraph()->GetMethod())),
|
||||
Imm(static_cast<size_t>(events::MethodExitKind::COMPILED))});
|
||||
}
|
||||
});
|
||||
#else
|
||||
GetCallingConvention()->GenerateNativeEpilogue(*GetFrameInfo(), []() {});
|
||||
#endif
|
||||
}
|
||||
} // namespace panda::compiler
|
@ -1,45 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_OPTIMIZER_CODEGEN_CODEGEN_NATIVE_H
|
||||
#define COMPILER_OPTIMIZER_CODEGEN_CODEGEN_NATIVE_H
|
||||
|
||||
#include "optimizer/code_generator/codegen.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
/**
|
||||
* CodegenNative provides support for 'Native' calling convention.
|
||||
*/
|
||||
class CodegenNative : public Codegen {
|
||||
public:
|
||||
explicit CodegenNative(Graph *graph) : Codegen(graph) {}
|
||||
NO_MOVE_SEMANTIC(CodegenNative);
|
||||
NO_COPY_SEMANTIC(CodegenNative);
|
||||
|
||||
~CodegenNative() override = default;
|
||||
|
||||
const char *GetPassName() const override
|
||||
{
|
||||
return "CodegenNative";
|
||||
}
|
||||
void CreateFrameInfo() override;
|
||||
|
||||
protected:
|
||||
void GeneratePrologue() override;
|
||||
void GenerateEpilogue() override;
|
||||
}; // CodegenNative
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_OPTIMIZER_CODEGEN_CODEGEN_NATIVE_H
|
@ -1,213 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <mutex>
|
||||
#include "codegen.h"
|
||||
#include "disassembly.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
// clang-format off
|
||||
static constexpr std::array INDENT_STRINGS = {
|
||||
" ",
|
||||
" ",
|
||||
" ",
|
||||
" ",
|
||||
" ",
|
||||
" ",
|
||||
" ",
|
||||
" ",
|
||||
" ",
|
||||
};
|
||||
// clang-format on
|
||||
|
||||
static const char *GetIndent(uint32_t depth)
|
||||
{
|
||||
return INDENT_STRINGS[depth];
|
||||
}
|
||||
|
||||
static constexpr auto StreamDeleter = [](std::ostream *stream) {
|
||||
if (!options.IsCompilerDisasmDumpStdout()) {
|
||||
delete stream;
|
||||
}
|
||||
};
|
||||
|
||||
Disassembly::Disassembly(const Codegen *codegen)
|
||||
: codegen_(codegen),
|
||||
encoder_(codegen->GetEncoder()),
|
||||
stream_(nullptr, StreamDeleter),
|
||||
is_enabled_(options.IsCompilerDisasmDump()),
|
||||
is_code_enabled_(is_enabled_ && options.IsCompilerDisasmDumpCode())
|
||||
{
|
||||
}
|
||||
|
||||
void Disassembly::Init()
|
||||
{
|
||||
if (!IsEnabled()) {
|
||||
return;
|
||||
}
|
||||
if (options.IsCompilerDisasmDumpStdout()) {
|
||||
stream_.reset(&std::cout);
|
||||
} else if (options.IsCompilerDisasmDumpSingleFile()) {
|
||||
auto stm = new std::ofstream;
|
||||
if (stm == nullptr) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
static std::once_flag flag;
|
||||
auto file_name = options.GetCompilerDisasmDumpFileName();
|
||||
std::call_once(flag, [&file_name]() { std::remove(file_name.c_str()); });
|
||||
stm->open(file_name, std::ios_base::app);
|
||||
if (!stm->is_open()) {
|
||||
LOG(FATAL, COMPILER) << "Cannot open 'disasm.txt'";
|
||||
}
|
||||
stream_.reset(stm);
|
||||
} else {
|
||||
auto stm = new std::ofstream;
|
||||
if (stm == nullptr) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
std::stringstream ss;
|
||||
auto graph = codegen_->GetGraph();
|
||||
auto exec_num = graph->GetPassManager()->GetExecutionCounter();
|
||||
ss << "disasm_" << exec_num << '_' << codegen_->GetRuntime()->GetClassNameFromMethod(graph->GetMethod()) << '_'
|
||||
<< codegen_->GetRuntime()->GetMethodName(graph->GetMethod()) << (graph->IsOsrMode() ? "_osr" : "") << ".txt";
|
||||
stm->open(ss.str());
|
||||
if (!stm->is_open()) {
|
||||
LOG(FATAL, COMPILER) << "Cannot open '" << ss.str() << "'";
|
||||
}
|
||||
stream_.reset(stm);
|
||||
}
|
||||
}
|
||||
|
||||
static void PrintChapter(std::ostream &stream, const char *name)
|
||||
{
|
||||
stream << name << ":\n";
|
||||
}
|
||||
|
||||
void Disassembly::IncreaseDepth()
|
||||
{
|
||||
if ((depth_ + 1) < INDENT_STRINGS.size()) {
|
||||
depth_++;
|
||||
}
|
||||
}
|
||||
|
||||
void Disassembly::PrintMethodEntry(const Codegen *codegen)
|
||||
{
|
||||
static constexpr const char *indent = " ";
|
||||
auto &stream = GetStream();
|
||||
auto graph = codegen->GetGraph();
|
||||
stream << "======================================================================\n";
|
||||
PrintChapter(stream, "METHOD_INFO");
|
||||
stream << indent << "name: " << codegen->GetRuntime()->GetMethodFullName(graph->GetMethod(), true) << std::endl;
|
||||
stream << indent << "mode: ";
|
||||
graph->GetMode().Dump(stream);
|
||||
stream << std::endl;
|
||||
stream << indent << "id: " << codegen->GetRuntime()->GetMethodId(graph->GetMethod()) << std::endl;
|
||||
if (graph->IsAotMode()) {
|
||||
stream << indent << "code_offset: " << reinterpret_cast<void *>(graph->GetAotData()->GetCodeOffset())
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
auto arch = codegen->GetArch();
|
||||
auto frame = codegen->GetFrameInfo();
|
||||
stream << indent << "frame_size: " << frame->GetFrameSize() << std::endl;
|
||||
stream << indent << "spills_count: " << frame->GetSpillsCount() << std::endl;
|
||||
stream << indent << "Callees: " << (frame->GetCalleesRelativeFp() ? "fp" : "sp") << std::showpos
|
||||
<< frame->GetCalleesOffset() << std::noshowpos << " (" << GetCalleeRegsCount(arch, false) << ")"
|
||||
<< std::endl;
|
||||
stream << indent << "FpCallees: " << (frame->GetCalleesRelativeFp() ? "fp" : "sp") << std::showpos
|
||||
<< frame->GetFpCalleesOffset() << std::noshowpos << " (" << GetCalleeRegsCount(arch, true) << ")"
|
||||
<< std::endl;
|
||||
stream << indent << "Callers: " << (frame->GetCallersRelativeFp() ? "fp" : "sp") << std::showpos
|
||||
<< frame->GetCallersOffset() << std::noshowpos << " (" << GetCallerRegsCount(arch, false) << ")"
|
||||
<< std::endl;
|
||||
stream << indent << "FpCallers: " << (frame->GetCallersRelativeFp() ? "fp" : "sp") << std::showpos
|
||||
<< frame->GetFpCallersOffset() << std::noshowpos << " (" << GetCallerRegsCount(arch, true) << ")"
|
||||
<< std::endl;
|
||||
if (IsCodeEnabled()) {
|
||||
PrintChapter(stream, "DISASSEMBLY");
|
||||
}
|
||||
}
|
||||
|
||||
void Disassembly::PrintCodeInfo(const Codegen *codegen)
|
||||
{
|
||||
auto &stream = GetStream();
|
||||
auto graph = codegen->GetGraph();
|
||||
|
||||
CodeInfo code_info;
|
||||
ASSERT(!graph->GetCodeInfoData().empty());
|
||||
code_info.Decode(graph->GetCodeInfoData());
|
||||
PrintChapter(stream, "CODE_INFO");
|
||||
code_info.Dump(stream);
|
||||
}
|
||||
|
||||
void Disassembly::PrintCodeStatistics(const Codegen *codegen)
|
||||
{
|
||||
auto &stream = GetStream();
|
||||
auto graph = codegen->GetGraph();
|
||||
|
||||
PrintChapter(stream, "CODE_STATS");
|
||||
stream << " code_size: " << std::dec << graph->GetData().Size() << std::endl;
|
||||
}
|
||||
|
||||
void Disassembly::FlushDisasm([[maybe_unused]] const Codegen *codegen)
|
||||
{
|
||||
auto encoder = GetEncoder();
|
||||
auto &stream = GetStream();
|
||||
for (size_t pc = GetPosition(); pc < (encoder->GetCursorOffset());) {
|
||||
stream << GetIndent(GetDepth());
|
||||
auto new_pc = encoder->DisasmInstr(stream, pc, 0);
|
||||
stream << std::endl;
|
||||
pc = new_pc;
|
||||
}
|
||||
SetPosition(encoder->GetCursorOffset());
|
||||
}
|
||||
|
||||
void Disassembly::PrintStackMap(const Codegen *codegen)
|
||||
{
|
||||
FlushDisasm(codegen);
|
||||
auto &stream = GetStream();
|
||||
stream << GetIndent(GetDepth());
|
||||
codegen->GetCodeBuilder()->DumpCurrentStackMap(stream);
|
||||
stream << std::endl;
|
||||
}
|
||||
|
||||
ScopedDisasmPrinter::ScopedDisasmPrinter(Codegen *codegen, const Inst *inst) : disasm_(codegen->GetDisasm())
|
||||
{
|
||||
if (disasm_->IsCodeEnabled()) {
|
||||
disasm_->FlushDisasm(codegen);
|
||||
disasm_->GetStream() << GetIndent(disasm_->GetDepth()) << "# [inst] " << *inst << std::endl;
|
||||
disasm_->IncreaseDepth();
|
||||
}
|
||||
}
|
||||
|
||||
ScopedDisasmPrinter::ScopedDisasmPrinter(Codegen *codegen, const std::string &msg) : disasm_(codegen->GetDisasm())
|
||||
{
|
||||
if (disasm_->IsCodeEnabled()) {
|
||||
disasm_->FlushDisasm(codegen);
|
||||
disasm_->GetStream() << GetIndent(disasm_->GetDepth()) << "# " << msg << std::endl;
|
||||
disasm_->IncreaseDepth();
|
||||
}
|
||||
}
|
||||
|
||||
ScopedDisasmPrinter::~ScopedDisasmPrinter()
|
||||
{
|
||||
if (disasm_->IsCodeEnabled()) {
|
||||
disasm_->FlushDisasm(nullptr);
|
||||
disasm_->DecreaseDepth();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace panda::compiler
|
@ -1,130 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef PANDA_DISASSEMBLY_H
|
||||
#define PANDA_DISASSEMBLY_H
|
||||
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <variant>
|
||||
#include <memory>
|
||||
#include "compiler_options.h"
|
||||
#include "macros.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
class Codegen;
|
||||
class Encoder;
|
||||
class Inst;
|
||||
class StackMap;
|
||||
|
||||
class Disassembly {
|
||||
public:
|
||||
explicit Disassembly(const Codegen *codegen);
|
||||
~Disassembly() = default;
|
||||
NO_COPY_SEMANTIC(Disassembly);
|
||||
NO_MOVE_SEMANTIC(Disassembly);
|
||||
|
||||
void Init();
|
||||
|
||||
std::ostream &GetStream()
|
||||
{
|
||||
return *stream_;
|
||||
}
|
||||
uint32_t GetDepth() const
|
||||
{
|
||||
return depth_;
|
||||
}
|
||||
uint32_t GetPosition() const
|
||||
{
|
||||
return position_;
|
||||
}
|
||||
void SetPosition(uint32_t pos)
|
||||
{
|
||||
position_ = pos;
|
||||
}
|
||||
const Encoder *GetEncoder() const
|
||||
{
|
||||
return encoder_;
|
||||
}
|
||||
void SetEncoder(const Encoder *encoder)
|
||||
{
|
||||
encoder_ = encoder;
|
||||
}
|
||||
void IncreaseDepth();
|
||||
void DecreaseDepth()
|
||||
{
|
||||
depth_--;
|
||||
}
|
||||
bool IsEnabled() const
|
||||
{
|
||||
return is_enabled_;
|
||||
}
|
||||
bool IsCodeEnabled() const
|
||||
{
|
||||
return is_code_enabled_;
|
||||
}
|
||||
|
||||
void PrintMethodEntry(const Codegen *codegen);
|
||||
void PrintCodeInfo(const Codegen *codegen);
|
||||
void PrintCodeStatistics(const Codegen *codegen);
|
||||
void PrintStackMap(const Codegen *codegen);
|
||||
|
||||
private:
|
||||
void FlushDisasm(const Codegen *codegen);
|
||||
|
||||
private:
|
||||
using StreamDeleterType = void (*)(std::ostream *stream);
|
||||
const Codegen *codegen_ {nullptr};
|
||||
const Encoder *encoder_ {nullptr};
|
||||
std::unique_ptr<std::ostream, StreamDeleterType> stream_;
|
||||
uint32_t depth_ {0};
|
||||
uint32_t position_ {0};
|
||||
bool is_enabled_ {false};
|
||||
bool is_code_enabled_ {false};
|
||||
|
||||
friend class ScopedDisasmPrinter;
|
||||
};
|
||||
|
||||
class ScopedDisasmPrinter {
|
||||
public:
|
||||
ScopedDisasmPrinter(Codegen *codegen, const std::string &msg);
|
||||
ScopedDisasmPrinter(Codegen *codegen, const Inst *inst);
|
||||
~ScopedDisasmPrinter();
|
||||
|
||||
NO_COPY_SEMANTIC(ScopedDisasmPrinter);
|
||||
NO_MOVE_SEMANTIC(ScopedDisasmPrinter);
|
||||
|
||||
private:
|
||||
Disassembly *disasm_ {nullptr};
|
||||
};
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define DISASM_VAR_CONCAT2(a, b) a##b
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define DISASM_VAR_CONCAT(a, b) DISASM_VAR_CONCAT2(a, b)
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define SCOPED_DISASM_INST(codegen, inst) ScopedDisasmPrinter DISASM_VAR_CONCAT(disasm_, __LINE__)(codegen, inst)
|
||||
#ifndef NDEBUG
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define SCOPED_DISASM_STR(codegen, str) ScopedDisasmPrinter DISASM_VAR_CONCAT(disasm_, __LINE__)(codegen, str)
|
||||
#else
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define SCOPED_DISASM_STR(codegen, str) (void)codegen
|
||||
#endif
|
||||
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // PANDA_DISASSEMBLY_H
|
File diff suppressed because it is too large
Load Diff
@ -1,209 +0,0 @@
|
||||
# Encoder library
|
||||
|
||||
## Building
|
||||
|
||||
Current realization is not building outside compiler.
|
||||
|
||||
## Sub-modules description
|
||||
|
||||
There are two major variable-parts - architecture and execution model. Current architecture has aim to make easy switching of this two directions.
|
||||
|
||||
### Register file (concrete implementation is architecture dependent)
|
||||
RegistersDescription - is class for get access for all possible information about registers, which are used in the target architecture: amount of available registers, theirs type and size. Description for special registers and temps. It also may holds low-level information about callee and caller-saved registers. Also it is useful to have conversation from abstract register to arch-special registers(for example for vixlReg).
|
||||
|
||||
This file must not use other interfaces.
|
||||
|
||||
### Encoder (concrete implementation is architecture dependent)
|
||||
Encoder - is class for emit main list of instructions - math and memory-usage.
|
||||
|
||||
This class uses RegistersDescription.
|
||||
|
||||
### Calling Convention (concrete implementation is architecture dependent)
|
||||
CallingConvention - is class for control calls/jumps, for spill/fill registers and access for native parameters. That's why it contains Labels list for have possibility to know concrete address for special branch-target.
|
||||
|
||||
This class uses Encoder and RegistersDescription classes.
|
||||
|
||||
Current stack layout must be like next one:
|
||||
```
|
||||
----------+----------------------+
|
||||
Caller | Default calling convention frame:
|
||||
Frame | Caller saved + parameters, if they
|
||||
| don't fit in registers
|
||||
|
|
||||
----------+-Encoder-Frame-Start--+ For CallingConvention::Begin(FUNCTION):
|
||||
--------+----------------------+ (CallConv::PushHeader) {
|
||||
Pre | lr | |
|
||||
Header | fp | |
|
||||
| 1-st param | - (method) |
|
||||
| allignment reg | (push one reg for padding)
|
||||
--------+----------------------+ } // (CallConv::PushHeader)
|
||||
--------+----------------------+ (CallConv::PushRegs(GetCalleeSavedR + V)) {
|
||||
Header | scalar registers | |
|
||||
callee | + optional allign-reg| |
|
||||
| vector registers | |
|
||||
| + optional allign-reg| |
|
||||
--------+----------------------+ } // (CallConv::PushRegs(GetCalleeSavedR + V))
|
||||
--------+----------------------+ CallConv::IncrementStack(SIZE_IN_BYTES)
|
||||
| |
|
||||
| | Memory, avaliable for user
|
||||
| |
|
||||
--------+----------------------+ CallingConvention::Begin(NATIVE):
|
||||
--------+----------------------+ (CallConv::PushRegs(GetCallerSavedR + V)) {
|
||||
Header | scalar registers | |
|
||||
caller | + optional allign-reg| |
|
||||
| vector registers | |
|
||||
| + optional allign-reg| |
|
||||
--------+----------------------+ } // (CallConv::PushRegs(GetCallerSavedR + V))
|
||||
Param | Here will be |
|
||||
| parameters, if they | Must be manually filled
|
||||
| don't fit in regs |
|
||||
| |
|
||||
----------+-Encoder-Frame-End------+
|
||||
----------+------------------------+
|
||||
Native | Default calling convention frame:
|
||||
Frame | Callee saved ... e.t.c
|
||||
|
||||
```
|
||||
|
||||
|
||||
### Execution model (concrete implementation - must be architecture independent)
|
||||
ExecModel - is class for control emitting instructions. It must to be able to create default headers for emitted code and implement execution-specific code, e.g interpreter-ExecState access values.
|
||||
|
||||
This class uses all above classes.
|
||||
|
||||
### Codegen (concrete implementation - must be architecture independent)
|
||||
Codegen - must not be in Encode-library, not to make dependency from compiler. It also must not to make dependency from special architecture.
|
||||
|
||||
This class also must use all of above classes and have possibility to switch between arc or models.
|
||||
|
||||
## Operands
|
||||
|
||||
### TypeInfo
|
||||
|
||||
Class **TypeInfo** contains information about supported types: BOOL, INT8, INT16, INT32, INT64, FLOAT32, FLOAT64
|
||||
You can get next information about type: size, scalar or vector.
|
||||
Example:
|
||||
```
|
||||
auto type = TypeInfo(FLOAT32); // TypeInfo for float type
|
||||
ASSERT(type->GetSize() == 32 && type->IsFloat());
|
||||
```
|
||||
|
||||
### Register
|
||||
|
||||
Class **Reg** contains number of register(id) and **TypeInfo**.
|
||||
You can get next information about register: type, id, size, scalar or vector.
|
||||
|
||||
Example:
|
||||
```
|
||||
auto reg = Reg(0, TypeInfo(INT32)); // scalar word regster
|
||||
ASSERT(reg->GetId() == 0 && reg->GetType() == TypeInfo(INT32) && reg->GetSize() == 32 && reg->IsScalar());
|
||||
```
|
||||
|
||||
### Immediate
|
||||
|
||||
Class **Imm** contains value of the following types: int8_t, int16_t, int32_t, int64_t, float, double
|
||||
You can get next information about immediate: type, value, size.
|
||||
|
||||
Example:
|
||||
```
|
||||
double value = 123.456;
|
||||
auto imm = Imm(value); // double immediate
|
||||
ASSERT(imm.GetValue<double>() == value && imm->GetType() == TypeInfo(FLOAT64) && imm->GetSize() == 64 &&
|
||||
!imm->IsScalar());
|
||||
```
|
||||
|
||||
### Memory
|
||||
|
||||
Class **MemRef** contains base **Reg**, index **Reg**, scale **Imm** and disp **Imm**.
|
||||
The memory address is calculated using the following formula: `base + (index << scale) + disp`
|
||||
If a parameter is not defined, it is considered equal to 0. The base must be defined.
|
||||
|
||||
Example:
|
||||
```
|
||||
// memory with base register and disp
|
||||
auto base_reg = Reg(5, TypeInfo(INT64));
|
||||
auto disp = Imm(static_cast<int642>(16));
|
||||
auto mem_disp = MemRef(base_reg, disp); // base_reg + disp
|
||||
ASSERT(mem_disp.HasBase() && !mem_disp.HasIndex() && !mem_disp.HasScale() && mem_disp.HasDisp());
|
||||
ASSERT(mem_disp.GetBase() == base_reg && mem_disp.GetDisp() == disp);
|
||||
ASSERT(mem_disp.GetIndex() == INVALID_REGISTER && mem_disp.GetScale() == INVALID_IMM);
|
||||
|
||||
// memory with base , index registers and scale
|
||||
auto base_reg = Reg(5, TypeInfo(INT64));
|
||||
auto index_reg = Reg(6, TypeInfo(INT64));
|
||||
auto scale = Imm(static_cast<int32>(3));
|
||||
auto mem_scale = MemRef(base_reg, index_reg, scale); // base_reg + (index_reg << scale)
|
||||
ASSERT(mem_scale.HasBase() && mem_scale.HasIndex() && mem_scale.HasScale() && !mem_scale.HasDisp());
|
||||
ASSERT(mem_scale.GetBase() == base_reg && mem_scale.GetIndex() == index_reg && mem_scale.GetScale() == scale);
|
||||
ASSERT(mem_scale.GetDisp() == INVALID_IMM);
|
||||
```
|
||||
|
||||
## Code Example (WIP)
|
||||
|
||||
```
|
||||
// Example of usage encode-library
|
||||
/* == Sequence for create and configure encoder ==*/
|
||||
// 1. Create encoder for special-architecture
|
||||
Encoder *enc= buildEncoder(Arch);
|
||||
// 2. Create execution model - must be target independent
|
||||
ExecModel* exec = buildExecutionModel(Model::JIT);
|
||||
// 3. Create Calling convention-model - architecture-dependent
|
||||
CallingConvention *callconv = buildCallingConvention(exec, Arch);
|
||||
// 4. Fill Encoder
|
||||
enc->Configure(exec, callconv);
|
||||
/*======= Here encoder is fully configured =======*/
|
||||
// Usage:
|
||||
|
||||
|
||||
// Method start header
|
||||
callconv->Begin(CallingConvention::Reason::StartMethod);
|
||||
callconv->CallerStart(CallingConvention::Reason::StartMethod); // (?)
|
||||
|
||||
// Use encoding:
|
||||
// Get register information
|
||||
auto register_description = enc->GetRegisterDescription();
|
||||
// Get special registers data
|
||||
auto sp = register_description->GetSpecialRegister(SpecialReg::stack_pointer);
|
||||
ASSERT(sp.IsValid());
|
||||
// Get all un-reserved register list
|
||||
auto available_regs = register_description->GetRegAllocatable()
|
||||
|
||||
// Build memory operand
|
||||
auto memory = enc->BuildMem(sp, callconv->GetParamOffset(2));
|
||||
|
||||
auto tmp1 = available_regs[0];
|
||||
enc->EncodeLoad(tmp1, memory);
|
||||
|
||||
auto tmp2 = available_regs[1];
|
||||
enc->EncodeMov(tmp2, Imm(0x123));
|
||||
|
||||
enc->EncodeAdd(tmp1, tmp2, tmp1);
|
||||
|
||||
auto label = callconv->CreateLabel();
|
||||
// or CreateLabel<type>
|
||||
|
||||
callconv->EncodeJump(label, CallingConvention::Reason::Jump);
|
||||
|
||||
callconv->BindLabel(label);
|
||||
enc->EncodeAdd(tmp1, tmp2, tmp1);
|
||||
|
||||
enc->EncodeStore(tmp1, memory);
|
||||
callconv->EncodeBranch(label);
|
||||
|
||||
callconv->CallerEnd(CallingConvention::Reason::ExitMethod); // (?)
|
||||
callconv->EncodeReturn(CallingConvention::Reason::ExitMethod);
|
||||
// End of method
|
||||
callconv->End(CallingConvention::Reason::EndMethod);
|
||||
|
||||
enc->Finalize();
|
||||
auto code = enc->GetEncodeData();
|
||||
code_allocator->Allocate(code.start(), code.size());
|
||||
///Example from encoder-part
|
||||
|
||||
void Aarch64Encoder::EncodeAShr(Reg dst, Reg src0, Reg src1) {
|
||||
if (dst.GetSize() < MAX_REG_SIZE) {
|
||||
__ And(VixlReg(src1), (VixlReg(src1), VixlImm(dsr.GetSize() - 1));
|
||||
}
|
||||
__ Asr(VixlReg(dst), VixlReg(src0), VixlReg(src1));
|
||||
}
|
||||
```
|
@ -1,154 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef PANDA_FRAME_INFO_H
|
||||
#define PANDA_FRAME_INFO_H
|
||||
|
||||
#include "libpandabase/utils/cframe_layout.h"
|
||||
#include "libpandabase/utils/bit_field.h"
|
||||
#include "libpandabase/mem/mem.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
class Encoder;
|
||||
class Graph;
|
||||
|
||||
/**
|
||||
* This class describes layout of the frame being compiled.
|
||||
*/
|
||||
class FrameInfo {
|
||||
public:
|
||||
explicit FrameInfo(uint32_t fields) : fields_(fields) {}
|
||||
~FrameInfo() = default;
|
||||
NO_COPY_SEMANTIC(FrameInfo);
|
||||
NO_MOVE_SEMANTIC(FrameInfo);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define FRAME_INFO_GET_ATTR(name, var) \
|
||||
auto Get##name() const \
|
||||
{ \
|
||||
return var; \
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define FRAME_INFO_SET_ATTR(name, var) \
|
||||
void Set##name(ssize_t val) \
|
||||
{ \
|
||||
ASSERT(val <= std::numeric_limits<decltype(var)>::max()); \
|
||||
ASSERT(val >= std::numeric_limits<decltype(var)>::min()); \
|
||||
var = val; \
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define FRAME_INFO_ATTR(name, var) \
|
||||
FRAME_INFO_GET_ATTR(name, var) \
|
||||
FRAME_INFO_SET_ATTR(name, var)
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define FRAME_INFO_GET_FIELD(name, type) \
|
||||
type Get##name() const \
|
||||
{ \
|
||||
return name::Get(fields_); \
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define FRAME_INFO_SET_FIELD(name, type) \
|
||||
void Set##name(type val) \
|
||||
{ \
|
||||
name::Set(val, &fields_); \
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define FRAME_INFO_FIELD(name, type) \
|
||||
FRAME_INFO_GET_FIELD(name, type) \
|
||||
FRAME_INFO_SET_FIELD(name, type)
|
||||
|
||||
FRAME_INFO_ATTR(FrameSize, frame_size_);
|
||||
FRAME_INFO_ATTR(SpillsCount, spills_count_);
|
||||
FRAME_INFO_ATTR(CallersOffset, callers_offset_);
|
||||
FRAME_INFO_ATTR(CalleesOffset, callees_offset_);
|
||||
FRAME_INFO_ATTR(FpCallersOffset, fp_callers_offset_);
|
||||
FRAME_INFO_ATTR(FpCalleesOffset, fp_callees_offset_);
|
||||
FRAME_INFO_FIELD(PositionedCallers, bool);
|
||||
FRAME_INFO_FIELD(PositionedCallees, bool);
|
||||
FRAME_INFO_FIELD(CallersRelativeFp, bool);
|
||||
FRAME_INFO_FIELD(CalleesRelativeFp, bool);
|
||||
// SaveFrameAndLinkRegs - save/restore FP and LR registers in prologue/epilogue.
|
||||
FRAME_INFO_FIELD(SaveFrameAndLinkRegs, bool);
|
||||
// SetupFrame - setup CFrame (aka. 'managed' frame).
|
||||
// Namely, set FP reg, method and flags in prologue.
|
||||
FRAME_INFO_FIELD(SetupFrame, bool);
|
||||
// SaveUnusedCalleeRegs - save/restore used+unused callee-saved registers in prologue/epilogue.
|
||||
FRAME_INFO_FIELD(SaveUnusedCalleeRegs, bool);
|
||||
// AdjustSpReg - sub SP,#framesize in prologue and add SP,#framesize in epilogue.
|
||||
FRAME_INFO_FIELD(AdjustSpReg, bool);
|
||||
FRAME_INFO_FIELD(HasFloatRegs, bool);
|
||||
|
||||
using PositionedCallers = BitField<bool, 0, 1>;
|
||||
using PositionedCallees = PositionedCallers::NextFlag;
|
||||
using CallersRelativeFp = PositionedCallees::NextFlag;
|
||||
using CalleesRelativeFp = CallersRelativeFp::NextFlag;
|
||||
using SaveFrameAndLinkRegs = CalleesRelativeFp::NextFlag;
|
||||
using SetupFrame = SaveFrameAndLinkRegs::NextFlag;
|
||||
using SaveUnusedCalleeRegs = SetupFrame::NextFlag;
|
||||
using AdjustSpReg = SaveUnusedCalleeRegs::NextFlag;
|
||||
using HasFloatRegs = AdjustSpReg::NextFlag;
|
||||
|
||||
// The following static 'constructors' are for situations
|
||||
// when we have to generate prologue/epilogue but there is
|
||||
// no codegen at hand (some tests etc.)
|
||||
// 'Leaf' means a prologue for a function which does not call
|
||||
// any other functions (library, runtime etc.)
|
||||
static FrameInfo LeafPrologue()
|
||||
{
|
||||
return FrameInfo(AdjustSpReg::Encode(true));
|
||||
}
|
||||
|
||||
// 'Native' means just a regular prologue, that is used for native functions.
|
||||
// 'Native' is also used for Irtoc.
|
||||
static FrameInfo NativePrologue()
|
||||
{
|
||||
return FrameInfo(AdjustSpReg::Encode(true) | SaveFrameAndLinkRegs::Encode(true) |
|
||||
SaveUnusedCalleeRegs::Encode(true));
|
||||
}
|
||||
|
||||
// 'Full' means NativePrologue + setting up frame (set FP, method and flags),
|
||||
// i.e. a prologue for managed code.
|
||||
static FrameInfo FullPrologue()
|
||||
{
|
||||
return FrameInfo(AdjustSpReg::Encode(true) | SaveFrameAndLinkRegs::Encode(true) |
|
||||
SaveUnusedCalleeRegs::Encode(true) | SetupFrame::Encode(true));
|
||||
}
|
||||
|
||||
#undef FRAME_INFO_GET_ATTR
|
||||
#undef FRAME_INFO_SET_ATTR
|
||||
#undef FRAME_INFO_ATTR
|
||||
#undef FRAME_INFO_GET_FIELD
|
||||
#undef FRAME_INFO_SET_FIELD
|
||||
#undef FRAME_INFO_FIELD
|
||||
|
||||
private:
|
||||
uint32_t fields_ {0};
|
||||
int32_t frame_size_ {0};
|
||||
int16_t spills_count_ {0};
|
||||
// Offset to caller registers storage (in words)
|
||||
int16_t callers_offset_ {0};
|
||||
int16_t callees_offset_ {0};
|
||||
int16_t fp_callers_offset_ {0};
|
||||
int16_t fp_callees_offset_ {0};
|
||||
};
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // PANDA_FRAME_INFO_H
|
@ -1,59 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_OPTIMIZER_CODEGEN_LIB_CALL_INST_H
|
||||
#define COMPILER_OPTIMIZER_CODEGEN_LIB_CALL_INST_H
|
||||
|
||||
#include "compiler/optimizer/ir/graph.h"
|
||||
#include "compiler/optimizer/ir/inst.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
inline bool HasLibCall(Inst *inst, Arch arch)
|
||||
{
|
||||
auto opcode = inst->GetOpcode();
|
||||
auto type = inst->GetType();
|
||||
switch (arch) {
|
||||
case Arch::X86_64:
|
||||
case Arch::AARCH64: {
|
||||
if (opcode == Opcode::Mod) {
|
||||
return DataType::IsFloatType(type);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
case Arch::AARCH32: {
|
||||
if (opcode == Opcode::Mod) {
|
||||
return true;
|
||||
}
|
||||
if (opcode == Opcode::Div) {
|
||||
return type == DataType::INT64 || type == DataType::UINT64;
|
||||
}
|
||||
if (opcode == Opcode::Cast) {
|
||||
auto src_type = inst->GetInputType(0);
|
||||
if (DataType::IsFloatType(type)) {
|
||||
return src_type == DataType::INT64 || src_type == DataType::UINT64;
|
||||
}
|
||||
if (DataType::IsFloatType(src_type)) {
|
||||
return type == DataType::INT64 || type == DataType::UINT64;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_OPTIMIZER_CODEGEN_LIB_CALL_INST_H
|
@ -1,82 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "optimizer/code_generator/codegen.h"
|
||||
#include "optimizer/ir/graph.h"
|
||||
#include "optimizer/ir/graph_visitor.h"
|
||||
#include "optimizer/code_generator/method_properties.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
MethodProperties::MethodProperties(const Graph *graph)
|
||||
{
|
||||
for (auto bb : graph->GetBlocksRPO()) {
|
||||
// Calls may be in the middle of method
|
||||
for (auto inst : bb->Insts()) {
|
||||
if (inst->IsInitObject()) {
|
||||
ASSERT(options.IsCompilerSupportInitObjectInst());
|
||||
}
|
||||
if (inst->GetFlag(inst_flags::CAN_DEOPTIMIZE)) {
|
||||
SetHasDeopt(true);
|
||||
}
|
||||
if (inst->GetOpcode() == Opcode::Return || inst->GetOpcode() == Opcode::ReturnI ||
|
||||
inst->GetOpcode() == Opcode::ReturnVoid) {
|
||||
last_return_ = inst;
|
||||
}
|
||||
if (!GetHasParamsOnStack() && inst->GetOpcode() == Opcode::Parameter) {
|
||||
auto sf = static_cast<const ParameterInst *>(inst)->GetLocationData();
|
||||
if (sf.DstValue() != INVALID_REG && sf.SrcType() == LocationType::STACK_PARAMETER) {
|
||||
SetHasParamsOnStack(true);
|
||||
}
|
||||
}
|
||||
if (inst->GetOpcode() == Opcode::SafePoint) {
|
||||
SetHasSafepoints(true);
|
||||
}
|
||||
if (inst->IsCall() || inst->IsIntrinsic()) {
|
||||
SetHasCalls(true);
|
||||
}
|
||||
if (Codegen::InstEncodedWithLibCall(inst, graph->GetArch())) {
|
||||
SetHasLibCalls(true);
|
||||
}
|
||||
if (inst->IsRuntimeCall()) {
|
||||
SetHasRuntimeCalls(true);
|
||||
}
|
||||
if (inst->RequireState()) {
|
||||
SetHasRequireState(true);
|
||||
}
|
||||
if (inst->CanThrow()) {
|
||||
SetCanThrow(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* "Compact" prologue/epilogue means that unused callee-saved registers
|
||||
* are not saved in the prologue and restored in the epilogue.
|
||||
*
|
||||
* 1. We do support compact prologue/epilogue only for AARCH64.
|
||||
* The reasons are as follows:
|
||||
* - for X86_64 we're reserving almost all callee-saved registers for temporaries
|
||||
* - for AARCH32 we're treating all callee-saved registers as "used".
|
||||
* Thus there is no sense in supporting compact prologue for these targets.
|
||||
*
|
||||
* 2. We don't support compact prologue/epilogue for OSR to simplify OSR entry bridge.
|
||||
*/
|
||||
SetCompactPrologueAllowed(graph->GetArch() == Arch::AARCH64 && !graph->IsOsrMode() &&
|
||||
options.IsCompilerCompactPrologue());
|
||||
|
||||
SetRequireFrameSetup(!IsLeaf() || graph->IsOsrMode());
|
||||
}
|
||||
} // namespace panda::compiler
|
@ -1,104 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_OPTIMIZER_CODEGEN_METHOD_PROPERTIES_H
|
||||
#define COMPILER_OPTIMIZER_CODEGEN_METHOD_PROPERTIES_H
|
||||
|
||||
#include <cstddef>
|
||||
#include <iostream>
|
||||
#include "libpandabase/mem/arena_allocator.h"
|
||||
#include "libpandabase/utils/bit_field.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
class Graph;
|
||||
|
||||
class MethodProperties {
|
||||
public:
|
||||
explicit MethodProperties(const Graph *graph);
|
||||
MethodProperties(const MethodProperties &) = default;
|
||||
MethodProperties &operator=(const MethodProperties &) = default;
|
||||
MethodProperties(MethodProperties &&) = default;
|
||||
MethodProperties &operator=(MethodProperties &&) = default;
|
||||
~MethodProperties() = default;
|
||||
|
||||
static MethodProperties *Create(ArenaAllocator *arena_allocator, const Graph *graph)
|
||||
{
|
||||
return arena_allocator->New<MethodProperties>(graph);
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define MPROP_GET_FIELD(name, type) \
|
||||
type Get##name() const \
|
||||
{ \
|
||||
return name::Get(fields_); \
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define MPROP_SET_FIELD(name, type) \
|
||||
void Set##name(type val) \
|
||||
{ \
|
||||
name::Set(val, &fields_); \
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define MPROP_FIELD(name, type) \
|
||||
MPROP_GET_FIELD(name, type) \
|
||||
MPROP_SET_FIELD(name, type)
|
||||
|
||||
using FieldsTy = uint32_t;
|
||||
|
||||
MPROP_FIELD(CanThrow, bool);
|
||||
MPROP_FIELD(HasCalls, bool);
|
||||
MPROP_FIELD(HasLibCalls, bool);
|
||||
MPROP_FIELD(HasRuntimeCalls, bool);
|
||||
MPROP_FIELD(HasSafepoints, bool);
|
||||
MPROP_FIELD(HasRequireState, bool);
|
||||
MPROP_FIELD(HasDeopt, bool);
|
||||
MPROP_FIELD(HasParamsOnStack, bool);
|
||||
MPROP_FIELD(CompactPrologueAllowed, bool);
|
||||
MPROP_FIELD(RequireFrameSetup, bool);
|
||||
|
||||
using CanThrow = BitField<bool, 0, 1>;
|
||||
using HasCalls = CanThrow::NextFlag;
|
||||
using HasLibCalls = HasCalls::NextFlag;
|
||||
using HasRuntimeCalls = HasLibCalls::NextFlag;
|
||||
using HasSafepoints = HasRuntimeCalls::NextFlag;
|
||||
using HasRequireState = HasSafepoints::NextFlag;
|
||||
using HasDeopt = HasRequireState::NextFlag;
|
||||
using HasParamsOnStack = HasDeopt::NextFlag;
|
||||
using CompactPrologueAllowed = HasParamsOnStack::NextFlag;
|
||||
using RequireFrameSetup = CompactPrologueAllowed::NextFlag;
|
||||
|
||||
bool IsLeaf() const
|
||||
{
|
||||
return !GetHasCalls() && !GetHasRuntimeCalls() && !GetHasLibCalls() && !GetHasRequireState() && !GetCanThrow();
|
||||
}
|
||||
|
||||
Inst *GetLastReturn() const
|
||||
{
|
||||
return last_return_;
|
||||
}
|
||||
|
||||
#undef MPROP_GET_FIELD
|
||||
#undef MPROP_SET_FIELD
|
||||
#undef MPROP_FIELD
|
||||
|
||||
private:
|
||||
Inst *last_return_ {nullptr};
|
||||
FieldsTy fields_ {0};
|
||||
};
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_OPTIMIZER_CODEGEN_METHOD_PROPERTIES_H
|
@ -1,940 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_OPTIMIZER_CODEGEN_OPERANDS_H_
|
||||
#define COMPILER_OPTIMIZER_CODEGEN_OPERANDS_H_
|
||||
|
||||
/*
|
||||
Arch-feature definitions
|
||||
*/
|
||||
#include <bitset>
|
||||
#include <cstdint>
|
||||
|
||||
#include "utils/arch.h"
|
||||
#include "utils/arena_containers.h"
|
||||
#include "utils/bit_field.h"
|
||||
#include "utils/bit_utils.h"
|
||||
#include "utils/regmask.h"
|
||||
#include "compiler/optimizer/ir/constants.h"
|
||||
#include "compiler/optimizer/ir/datatype.h"
|
||||
#include "utils/type_helpers.h"
|
||||
|
||||
#ifdef __clang_analyzer__
|
||||
#ifdef PANDA_TARGET_ARM32
|
||||
#define __arm__
|
||||
#endif
|
||||
#endif
|
||||
|
||||
namespace panda::compiler {
|
||||
constexpr uint8_t BYTE_SIZE = 8;
|
||||
constexpr uint8_t HALF_SIZE = 16;
|
||||
constexpr uint8_t WORD_SIZE = 32;
|
||||
constexpr uint8_t DOUBLE_WORD_SIZE = 64;
|
||||
constexpr uint8_t WORD_SIZE_BYTE = 4;
|
||||
constexpr uint8_t DOUBLE_WORD_SIZE_BYTE = 8;
|
||||
constexpr uint8_t QUAD_WORD_SIZE_BYTE = 16;
|
||||
/// Maximum possible registers count (for scalar and for vector):
|
||||
constexpr uint8_t MAX_NUM_REGS = 32;
|
||||
constexpr uint8_t MAX_NUM_VREGS = 32;
|
||||
|
||||
constexpr uint64_t NAN_DOUBLE = uint64_t(0x7ff8000000000000);
|
||||
constexpr uint32_t NAN_FLOAT = uint32_t(0x7fc00000);
|
||||
constexpr uint32_t NAN_FLOAT_BITS = NAN_FLOAT >> 16U;
|
||||
|
||||
// Constants for cast from float to int64:
|
||||
// The number of the bit from which exponential part starts in float
|
||||
constexpr uint8_t START_EXP_FLOAT = 23;
|
||||
// Size exponential part in float
|
||||
constexpr uint8_t SIZE_EXP_FLOAT = 8;
|
||||
// The maximum exponential part of float that can be loaded in int64
|
||||
constexpr uint32_t POSSIBLE_EXP_FLOAT = 0xbe;
|
||||
// Mask say that float number is NaN by IEEE 754
|
||||
constexpr uint32_t UP_BITS_NAN_FLOAT = 0xff;
|
||||
|
||||
// Constants for cast from double to int64:
|
||||
// The number of the bit from which exponential part starts in double
|
||||
constexpr uint8_t START_EXP_DOUBLE = 20;
|
||||
// Size exponential part in double
|
||||
constexpr uint8_t SIZE_EXP_DOUBLE = 11;
|
||||
// The maximum exponential part of double that can be loaded in int64
|
||||
constexpr uint32_t POSSIBLE_EXP_DOUBLE = 0x43e;
|
||||
// Mask say that double number is NaN by IEEE 754
|
||||
constexpr uint32_t UP_BITS_NAN_DOUBLE = 0x7ff;
|
||||
|
||||
constexpr uint32_t SHIFT_BITS_DOUBLE = 12;
|
||||
constexpr uint32_t SHIFT_BITS_FLOAT = 9;
|
||||
|
||||
// Return true, if architecture can be encoded.
|
||||
bool BackendSupport(Arch arch);
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define ENCODE_MATH_LIST(DEF) \
|
||||
DEF(Mov, UnaryOperation) \
|
||||
DEF(Neg, UnaryOperation) \
|
||||
DEF(Abs, UnaryOperation) \
|
||||
DEF(Not, UnaryOperation) \
|
||||
DEF(Add, BinaryOperation) \
|
||||
DEF(Sub, BinaryOperation) \
|
||||
DEF(Mul, BinaryOperation) \
|
||||
DEF(Shl, BinaryOperation) \
|
||||
DEF(Shr, BinaryOperation) \
|
||||
DEF(AShr, BinaryOperation) \
|
||||
DEF(And, BinaryOperation) \
|
||||
DEF(Or, BinaryOperation) \
|
||||
DEF(Xor, BinaryOperation) \
|
||||
DEF(Sqrt, UnaryOperation)
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-macro-usage)
|
||||
#define ENCODE_INST_WITH_SHIFTED_OPERAND(DEF) \
|
||||
DEF(And, BinaryShiftedRegisterOperation) \
|
||||
DEF(Or, BinaryShiftedRegisterOperation) \
|
||||
DEF(Xor, BinaryShiftedRegisterOperation) \
|
||||
DEF(OrNot, BinaryShiftedRegisterOperation) \
|
||||
DEF(AndNot, BinaryShiftedRegisterOperation) \
|
||||
DEF(XorNot, BinaryShiftedRegisterOperation) \
|
||||
DEF(Add, BinaryShiftedRegisterOperation) \
|
||||
DEF(Sub, BinaryShiftedRegisterOperation)
|
||||
|
||||
// Arch-independent access types
|
||||
|
||||
/**
|
||||
* Template class for identify types compile-time (nortti - can't use typeid).
|
||||
* Used in register class. Immediate class support conversion to it.
|
||||
*/
|
||||
class TypeInfo final {
|
||||
public:
|
||||
enum TypeId : uint8_t { INT8 = 0, INT16 = 1, INT32 = 2, INT64 = 3, FLOAT32 = 4, FLOAT64 = 5, INVALID = 6 };
|
||||
|
||||
/**
|
||||
* Template constructor - use template parameter for create object.
|
||||
*/
|
||||
template <class T>
|
||||
constexpr explicit TypeInfo(T /* unused */)
|
||||
{
|
||||
#ifndef __clang_analyzer__
|
||||
if constexpr (std::is_same<T, uint8_t>()) {
|
||||
type_id_ = INT8;
|
||||
} else if constexpr (std::is_same<T, int8_t>()) {
|
||||
type_id_ = INT8;
|
||||
} else if constexpr (std::is_same<T, uint16_t>()) {
|
||||
type_id_ = INT16;
|
||||
} else if constexpr (std::is_same<T, int16_t>()) {
|
||||
type_id_ = INT16;
|
||||
} else if constexpr (std::is_same<T, uint32_t>()) {
|
||||
type_id_ = INT32;
|
||||
} else if constexpr (std::is_same<T, int32_t>()) {
|
||||
type_id_ = INT32;
|
||||
} else if constexpr (std::is_same<T, uint64_t>()) {
|
||||
type_id_ = INT64;
|
||||
} else if constexpr (std::is_same<T, int64_t>()) {
|
||||
type_id_ = INT64;
|
||||
} else if constexpr (std::is_same<T, float>()) {
|
||||
type_id_ = FLOAT32;
|
||||
} else if constexpr (std::is_same<T, double>()) {
|
||||
type_id_ = FLOAT64;
|
||||
} else {
|
||||
type_id_ = INVALID;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
constexpr explicit TypeInfo(TypeId type) : type_id_(type) {}
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(TypeInfo);
|
||||
DEFAULT_COPY_SEMANTIC(TypeInfo);
|
||||
~TypeInfo() = default;
|
||||
|
||||
/**
|
||||
* Constructor for create invalid TypeInfo
|
||||
*/
|
||||
constexpr TypeInfo() = default;
|
||||
|
||||
/**
|
||||
* Validation check
|
||||
*/
|
||||
constexpr bool IsValid() const
|
||||
{
|
||||
return type_id_ != INVALID;
|
||||
}
|
||||
|
||||
/**
|
||||
* Type expected size
|
||||
*/
|
||||
constexpr size_t GetSize() const
|
||||
{
|
||||
ASSERT(IsValid());
|
||||
switch (type_id_) {
|
||||
case INT8:
|
||||
return BYTE_SIZE;
|
||||
case INT16:
|
||||
return HALF_SIZE;
|
||||
case INT32:
|
||||
case FLOAT32:
|
||||
return WORD_SIZE;
|
||||
case INT64:
|
||||
case FLOAT64:
|
||||
return DOUBLE_WORD_SIZE;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
constexpr bool IsFloat() const
|
||||
{
|
||||
ASSERT(IsValid());
|
||||
return type_id_ == FLOAT32 || type_id_ == FLOAT64;
|
||||
}
|
||||
|
||||
constexpr bool IsScalar() const
|
||||
{
|
||||
// VOID - is scalar type here
|
||||
return !IsFloat();
|
||||
}
|
||||
|
||||
constexpr bool operator==(const TypeInfo &other) const
|
||||
{
|
||||
return (type_id_ == other.type_id_);
|
||||
}
|
||||
|
||||
constexpr bool operator!=(const TypeInfo &other) const
|
||||
{
|
||||
return !operator==(other);
|
||||
}
|
||||
|
||||
static TypeInfo FromDataType(DataType::Type type, Arch arch)
|
||||
{
|
||||
switch (type) {
|
||||
case DataType::BOOL:
|
||||
case DataType::UINT8:
|
||||
case DataType::INT8: {
|
||||
return TypeInfo(INT8);
|
||||
}
|
||||
case DataType::UINT16:
|
||||
case DataType::INT16: {
|
||||
return TypeInfo(INT16);
|
||||
}
|
||||
case DataType::UINT32:
|
||||
case DataType::INT32: {
|
||||
return TypeInfo(INT32);
|
||||
}
|
||||
case DataType::UINT64:
|
||||
case DataType::INT64:
|
||||
case DataType::ANY: {
|
||||
return TypeInfo(INT64);
|
||||
}
|
||||
case DataType::FLOAT32: {
|
||||
return TypeInfo(FLOAT32);
|
||||
}
|
||||
case DataType::FLOAT64: {
|
||||
return TypeInfo(FLOAT64);
|
||||
}
|
||||
case DataType::REFERENCE: {
|
||||
return FromDataType(DataType::GetIntTypeForReference(arch), arch);
|
||||
}
|
||||
case DataType::POINTER: {
|
||||
return Is64BitsArch(arch) ? TypeInfo(INT64) : TypeInfo(INT32);
|
||||
}
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
DataType::Type ToDataType() const
|
||||
{
|
||||
switch (type_id_) {
|
||||
case INT8:
|
||||
return DataType::INT8;
|
||||
case INT16:
|
||||
return DataType::INT16;
|
||||
case INT32:
|
||||
return DataType::INT32;
|
||||
case INT64:
|
||||
return DataType::INT64;
|
||||
case FLOAT32:
|
||||
return DataType::FLOAT32;
|
||||
case FLOAT64:
|
||||
return DataType::FLOAT64;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
static constexpr TypeInfo GetScalarTypeBySize(size_t size);
|
||||
|
||||
void Dump()
|
||||
{
|
||||
std::cerr << "TypeInfo:";
|
||||
switch (type_id_) {
|
||||
case INT8:
|
||||
std::cerr << "INT8";
|
||||
break;
|
||||
case INT16:
|
||||
std::cerr << "INT16";
|
||||
break;
|
||||
case INT32:
|
||||
std::cerr << "INT32";
|
||||
break;
|
||||
case FLOAT32:
|
||||
std::cerr << "FLOAT32";
|
||||
break;
|
||||
case INT64:
|
||||
std::cerr << "INT64";
|
||||
break;
|
||||
case FLOAT64:
|
||||
std::cerr << "FLOAT64";
|
||||
break;
|
||||
default:
|
||||
std::cerr << "INVALID";
|
||||
break;
|
||||
}
|
||||
std::cerr << ", size = " << GetSize();
|
||||
}
|
||||
|
||||
private:
|
||||
TypeId type_id_ {INVALID};
|
||||
};
|
||||
|
||||
constexpr TypeInfo INT8_TYPE {TypeInfo::INT8};
|
||||
constexpr TypeInfo INT16_TYPE {TypeInfo::INT16};
|
||||
constexpr TypeInfo INT32_TYPE {TypeInfo::INT32};
|
||||
constexpr TypeInfo INT64_TYPE {TypeInfo::INT64};
|
||||
constexpr TypeInfo FLOAT32_TYPE {TypeInfo::FLOAT32};
|
||||
constexpr TypeInfo FLOAT64_TYPE {TypeInfo::FLOAT64};
|
||||
constexpr TypeInfo INVALID_TYPE;
|
||||
|
||||
constexpr TypeInfo TypeInfo::GetScalarTypeBySize(size_t size)
|
||||
{
|
||||
auto type = INT64_TYPE;
|
||||
if (size == BYTE_SIZE) {
|
||||
type = INT8_TYPE;
|
||||
} else if (size == HALF_SIZE) {
|
||||
type = INT16_TYPE;
|
||||
} else if (size == WORD_SIZE) {
|
||||
type = INT32_TYPE;
|
||||
}
|
||||
return type;
|
||||
}
|
||||
|
||||
// Mapping model for registers:
|
||||
// reg-reg - support getters for small parts of registers
|
||||
// reg-other - mapping between types of registers
|
||||
enum RegMapping : uint32_t {
|
||||
SCALAR_SCALAR = 1UL << 0UL,
|
||||
SCALAR_VECTOR = 1UL << 1UL,
|
||||
SCALAR_FLOAT = 1UL << 2UL,
|
||||
VECTOR_VECTOR = 1UL << 3UL,
|
||||
VECTOR_FLOAT = 1UL << 4UL,
|
||||
FLOAT_FLOAT = 1UL << 5UL
|
||||
};
|
||||
|
||||
constexpr uint8_t INVALID_REG_ID = std::numeric_limits<uint8_t>::max();
|
||||
constexpr uint8_t ACC_REG_ID = INVALID_REG_ID - 1U;
|
||||
|
||||
class Reg final {
|
||||
public:
|
||||
using RegIDType = uint8_t;
|
||||
using RegSizeType = size_t;
|
||||
|
||||
constexpr Reg() = default;
|
||||
DEFAULT_MOVE_SEMANTIC(Reg);
|
||||
DEFAULT_COPY_SEMANTIC(Reg);
|
||||
~Reg() = default;
|
||||
|
||||
// Default register constructor
|
||||
constexpr Reg(RegIDType id, TypeInfo type) : id_(id), type_(type) {}
|
||||
|
||||
constexpr RegIDType GetId() const
|
||||
{
|
||||
return id_;
|
||||
}
|
||||
|
||||
constexpr size_t GetMask() const
|
||||
{
|
||||
return (1U << id_);
|
||||
}
|
||||
|
||||
constexpr TypeInfo GetType() const
|
||||
{
|
||||
return type_;
|
||||
}
|
||||
|
||||
RegSizeType GetSize() const
|
||||
{
|
||||
return GetType().GetSize();
|
||||
}
|
||||
|
||||
bool IsScalar() const
|
||||
{
|
||||
return GetType().IsScalar();
|
||||
}
|
||||
|
||||
bool IsFloat() const
|
||||
{
|
||||
return GetType().IsFloat();
|
||||
}
|
||||
|
||||
constexpr bool IsValid() const
|
||||
{
|
||||
return type_ != INVALID_TYPE && id_ != INVALID_REG_ID;
|
||||
}
|
||||
|
||||
Reg As(TypeInfo type) const
|
||||
{
|
||||
return Reg(GetId(), type);
|
||||
}
|
||||
|
||||
constexpr bool operator==(Reg other) const
|
||||
{
|
||||
return (GetId() == other.GetId()) && (GetType() == other.GetType());
|
||||
}
|
||||
|
||||
constexpr bool operator!=(Reg other) const
|
||||
{
|
||||
return !operator==(other);
|
||||
}
|
||||
|
||||
void Dump()
|
||||
{
|
||||
std::cerr << " Reg: id = " << static_cast<int64_t>(id_) << ", ";
|
||||
type_.Dump();
|
||||
std::cerr << "\n";
|
||||
}
|
||||
|
||||
private:
|
||||
RegIDType id_ {INVALID_REG_ID};
|
||||
TypeInfo type_ {INVALID_TYPE};
|
||||
}; // Reg
|
||||
|
||||
constexpr Reg INVALID_REGISTER = Reg();
|
||||
|
||||
static_assert(!INVALID_REGISTER.IsValid());
|
||||
static_assert(sizeof(Reg) <= sizeof(uintptr_t));
|
||||
|
||||
/**
|
||||
* Immediate class may hold only int or float values (maybe vectors in future).
|
||||
* It knows nothing about pointers and bools (bools maybe be in future).
|
||||
*/
|
||||
class Imm final {
|
||||
static inline constexpr uint8_t BITS_PER_BYTE = 8;
|
||||
static constexpr size_t UNDEFINED_SIZE = 0;
|
||||
static constexpr size_t INT8_SIZE = 8;
|
||||
static constexpr size_t INT16_SIZE = 16;
|
||||
static constexpr size_t INT32_SIZE = 32;
|
||||
static constexpr size_t INT64_SIZE = 64;
|
||||
static constexpr size_t FLOAT32_SIZE = 32;
|
||||
static constexpr size_t FLOAT64_SIZE = 64;
|
||||
|
||||
enum VariantID {
|
||||
// Pointer used for invalidate variants
|
||||
V_INT8 = 1,
|
||||
V_INT16 = 2,
|
||||
V_INT32 = 3,
|
||||
V_INT64 = 4,
|
||||
V_FLOAT32 = 5,
|
||||
V_FLOAT64 = 6,
|
||||
};
|
||||
|
||||
template <class T>
|
||||
constexpr bool CheckVariantID() const
|
||||
{
|
||||
#ifndef __clang_analyzer__
|
||||
// Immediate could be only signed (int/float)
|
||||
// look at value_-type.
|
||||
static_assert(std::is_signed<T>::value);
|
||||
if constexpr (std::is_same<T, int8_t>()) {
|
||||
return value_.index() == V_INT8;
|
||||
}
|
||||
if constexpr (std::is_same<T, int16_t>()) {
|
||||
return value_.index() == V_INT16;
|
||||
}
|
||||
if constexpr (std::is_same<T, int32_t>()) {
|
||||
return value_.index() == V_INT32;
|
||||
}
|
||||
if constexpr (std::is_same<T, int64_t>()) {
|
||||
return value_.index() == V_INT64;
|
||||
}
|
||||
if constexpr (std::is_same<T, float>()) {
|
||||
return value_.index() == V_FLOAT32;
|
||||
}
|
||||
if constexpr (std::is_same<T, double>()) {
|
||||
return value_.index() == V_FLOAT64;
|
||||
}
|
||||
return false;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
public:
|
||||
// Invalid constructor
|
||||
constexpr Imm() = default;
|
||||
|
||||
// Special type constructor
|
||||
template <class T>
|
||||
constexpr explicit Imm(T value) : value_(value)
|
||||
{
|
||||
}
|
||||
|
||||
// Partial template specialization
|
||||
constexpr explicit Imm(uint8_t value) : value_(static_cast<int8_t>(value)) {};
|
||||
|
||||
constexpr explicit Imm(uint16_t value) : value_(static_cast<int16_t>(value)) {};
|
||||
|
||||
constexpr explicit Imm(uint32_t value) : value_(static_cast<int32_t>(value)) {};
|
||||
|
||||
constexpr explicit Imm(uint64_t value) : value_(static_cast<int64_t>(value)) {};
|
||||
|
||||
#if (PANDA_TARGET_MACOS)
|
||||
constexpr explicit Imm(size_t value) : value_(static_cast<int64_t>(value)) {};
|
||||
|
||||
constexpr explicit Imm(long value) : value_(static_cast<int64_t>(value)) {};
|
||||
#endif
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(Imm);
|
||||
DEFAULT_COPY_SEMANTIC(Imm);
|
||||
~Imm() = default;
|
||||
|
||||
template <class T>
|
||||
T GetValue() const
|
||||
{
|
||||
ASSERT(CheckVariantID<T>());
|
||||
ASSERT(sizeof(T) * BITS_PER_BYTE == GetSize());
|
||||
return std::get<T>(value_);
|
||||
}
|
||||
|
||||
void Inc(size_t value)
|
||||
{
|
||||
switch (value_.index()) {
|
||||
case V_INT8:
|
||||
value_ = static_cast<int8_t>(std::get<int8_t>(value_) + value);
|
||||
break;
|
||||
case V_INT16:
|
||||
value_ = static_cast<int16_t>(std::get<int16_t>(value_) + value);
|
||||
break;
|
||||
case V_INT32:
|
||||
value_ = static_cast<int32_t>(std::get<int32_t>(value_) + value);
|
||||
break;
|
||||
case V_INT64:
|
||||
value_ = static_cast<int64_t>(std::get<int64_t>(value_) + value);
|
||||
break;
|
||||
case V_FLOAT32:
|
||||
value_ = static_cast<float>(std::get<float>(value_) + value);
|
||||
break;
|
||||
case V_FLOAT64:
|
||||
value_ = static_cast<double>(std::get<double>(value_) + value);
|
||||
break;
|
||||
default:
|
||||
// Check before increment
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void Dec(size_t value)
|
||||
{
|
||||
switch (value_.index()) {
|
||||
case V_INT8:
|
||||
value_ = static_cast<int8_t>(std::get<int8_t>(value_) - value);
|
||||
break;
|
||||
case V_INT16:
|
||||
value_ = static_cast<int16_t>(std::get<int16_t>(value_) - value);
|
||||
break;
|
||||
case V_INT32:
|
||||
value_ = static_cast<int32_t>(std::get<int32_t>(value_) - value);
|
||||
break;
|
||||
case V_INT64:
|
||||
value_ = static_cast<int64_t>(std::get<int64_t>(value_) - value);
|
||||
break;
|
||||
case V_FLOAT32:
|
||||
value_ = static_cast<float>(std::get<float>(value_) - value);
|
||||
break;
|
||||
case V_FLOAT64:
|
||||
value_ = static_cast<double>(std::get<double>(value_) - value);
|
||||
break;
|
||||
default:
|
||||
// Check before decrement
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
TypeInfo GetType() const
|
||||
{
|
||||
switch (value_.index()) {
|
||||
case V_INT8:
|
||||
return INT8_TYPE;
|
||||
case V_INT16:
|
||||
return INT16_TYPE;
|
||||
case V_INT32:
|
||||
return INT32_TYPE;
|
||||
case V_INT64:
|
||||
return INT64_TYPE;
|
||||
case V_FLOAT32:
|
||||
return FLOAT32_TYPE;
|
||||
case V_FLOAT64:
|
||||
return FLOAT64_TYPE;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
return INVALID_TYPE;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr size_t GetSize() const
|
||||
{
|
||||
switch (value_.index()) {
|
||||
case V_INT8:
|
||||
return INT8_SIZE;
|
||||
case V_INT16:
|
||||
return INT16_SIZE;
|
||||
case V_INT32:
|
||||
return INT32_SIZE;
|
||||
case V_INT64:
|
||||
return INT64_SIZE;
|
||||
case V_FLOAT32:
|
||||
return FLOAT32_SIZE;
|
||||
case V_FLOAT64:
|
||||
return FLOAT64_SIZE;
|
||||
default:
|
||||
return UNDEFINED_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
bool IsZero() const
|
||||
{
|
||||
if (std::holds_alternative<float>(value_)) {
|
||||
return std::get<float>(value_) == 0.0;
|
||||
}
|
||||
if (std::holds_alternative<double>(value_)) {
|
||||
return std::get<double>(value_) == 0.0;
|
||||
}
|
||||
if (std::holds_alternative<int8_t>(value_)) {
|
||||
return std::get<int8_t>(value_) == 0;
|
||||
}
|
||||
if (std::holds_alternative<int16_t>(value_)) {
|
||||
return std::get<int16_t>(value_) == 0;
|
||||
}
|
||||
if (std::holds_alternative<int32_t>(value_)) {
|
||||
return std::get<int32_t>(value_) == 0;
|
||||
}
|
||||
if (std::holds_alternative<int64_t>(value_)) {
|
||||
return std::get<int64_t>(value_) == 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool IsFloat() const
|
||||
{
|
||||
return std::holds_alternative<float>(value_) || std::holds_alternative<double>(value_);
|
||||
}
|
||||
|
||||
bool IsScalar() const
|
||||
{
|
||||
return std::holds_alternative<int8_t>(value_) || std::holds_alternative<int16_t>(value_) ||
|
||||
std::holds_alternative<int32_t>(value_) || std::holds_alternative<int64_t>(value_);
|
||||
}
|
||||
|
||||
bool IsValid() const
|
||||
{
|
||||
bool hold_data = std::holds_alternative<int8_t>(value_) || std::holds_alternative<int16_t>(value_) ||
|
||||
std::holds_alternative<int32_t>(value_) || std::holds_alternative<int64_t>(value_) ||
|
||||
std::holds_alternative<float>(value_) || std::holds_alternative<double>(value_);
|
||||
return (GetSize() != 0) && hold_data;
|
||||
}
|
||||
|
||||
unsigned GetShift()
|
||||
{
|
||||
if (GetType() == INT64_TYPE) {
|
||||
return GetValue<int64_t>();
|
||||
}
|
||||
if (GetType() == INT32_TYPE) {
|
||||
return GetValue<int32_t>();
|
||||
}
|
||||
if (GetType() == INT16_TYPE) {
|
||||
return GetValue<int16_t>();
|
||||
}
|
||||
if (GetType() == INT8_TYPE) {
|
||||
return GetValue<int8_t>();
|
||||
}
|
||||
UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool operator==(Imm other) const
|
||||
{
|
||||
return value_ == other.value_;
|
||||
}
|
||||
|
||||
bool operator!=(Imm other) const
|
||||
{
|
||||
return !(operator==(other));
|
||||
}
|
||||
|
||||
private:
|
||||
std::variant<void *, int8_t, int16_t, int32_t, int64_t, float, double> value_ {nullptr};
|
||||
}; // Imm
|
||||
|
||||
constexpr Imm INVALID_IMM = Imm();
|
||||
|
||||
// Why memory ref - because you may create one link for one encode-session
|
||||
// And when you see this one - you can easy understand, what type of memory
|
||||
// you use. But if you load/store dirrectly address - you need to decode it
|
||||
// each time, when you read code
|
||||
// model -> base + index<<scale + disp
|
||||
class MemRef final {
|
||||
public:
|
||||
MemRef() = default;
|
||||
|
||||
explicit MemRef(Reg base) : MemRef(base, 0) {}
|
||||
MemRef(Reg base, ssize_t disp) : MemRef(base, INVALID_REGISTER, 0, disp) {}
|
||||
MemRef(Reg base, Reg index, uint16_t scale) : MemRef(base, index, scale, 0) {}
|
||||
MemRef(Reg base, Reg index, uint16_t scale, ssize_t disp) : disp_(disp), scale_(scale), base_(base), index_(index)
|
||||
{
|
||||
CHECK_LE(disp, std::numeric_limits<decltype(disp_)>::max());
|
||||
CHECK_LE(scale, std::numeric_limits<decltype(scale_)>::max());
|
||||
}
|
||||
DEFAULT_MOVE_SEMANTIC(MemRef);
|
||||
DEFAULT_COPY_SEMANTIC(MemRef);
|
||||
~MemRef() = default;
|
||||
|
||||
Reg GetBase() const
|
||||
{
|
||||
return base_;
|
||||
}
|
||||
Reg GetIndex() const
|
||||
{
|
||||
return index_;
|
||||
}
|
||||
auto GetScale() const
|
||||
{
|
||||
return scale_;
|
||||
}
|
||||
auto GetDisp() const
|
||||
{
|
||||
return disp_;
|
||||
}
|
||||
|
||||
bool HasBase() const
|
||||
{
|
||||
return base_.IsValid();
|
||||
}
|
||||
bool HasIndex() const
|
||||
{
|
||||
return index_.IsValid();
|
||||
}
|
||||
bool HasScale() const
|
||||
{
|
||||
return HasIndex() && scale_ != 0;
|
||||
}
|
||||
bool HasDisp() const
|
||||
{
|
||||
return disp_ != 0;
|
||||
}
|
||||
// Ref must contain at least one of field
|
||||
bool IsValid() const
|
||||
{
|
||||
return HasBase() || HasIndex() || HasScale() || HasDisp();
|
||||
}
|
||||
|
||||
// return true if mem doesn't has index and scalar
|
||||
bool IsOffsetMem() const
|
||||
{
|
||||
return !HasIndex() && !HasScale();
|
||||
}
|
||||
|
||||
bool operator==(MemRef other) const
|
||||
{
|
||||
return (base_ == other.base_) && (index_ == other.index_) && (scale_ == other.scale_) && (disp_ == other.disp_);
|
||||
}
|
||||
bool operator!=(MemRef other) const
|
||||
{
|
||||
return !(operator==(other));
|
||||
}
|
||||
|
||||
private:
|
||||
ssize_t disp_ {0};
|
||||
uint16_t scale_ {0};
|
||||
Reg base_ {INVALID_REGISTER};
|
||||
Reg index_ {INVALID_REGISTER};
|
||||
}; // MemRef
|
||||
|
||||
inline ArenaVector<std::pair<uint8_t, uint8_t>> ResoveParameterSequence(
|
||||
ArenaVector<std::pair<uint8_t, uint8_t>> *moved_registers, uint8_t tmp, ArenaAllocator *allocator)
|
||||
{
|
||||
constexpr uint8_t INVALID_FIST = -1;
|
||||
constexpr uint8_t INVALID_SECOND = -2;
|
||||
|
||||
moved_registers->emplace_back(std::pair<uint8_t, uint8_t>(INVALID_FIST, INVALID_SECOND));
|
||||
/*
|
||||
Example:
|
||||
1. mov x0 <- x3
|
||||
2. mov x1 <- x0
|
||||
3. mov x2 <- x3
|
||||
4. mov x3 <- x2
|
||||
Agreement - in dst can't be multipy same registers (double move to one register)
|
||||
- src for movs can hold same register multiply times
|
||||
|
||||
Algorithm:
|
||||
1. Find handing edges (x1 - just in dst)
|
||||
emit "2. mov x1 <- x0"
|
||||
goto 1.
|
||||
emit "1. mov x0 <- x3"
|
||||
2. Assert all registers used just one time (loop from registers sequence)
|
||||
All multiply-definitions must be resolved on previous step
|
||||
emit ".. mov xtmp <- x2" (strore xtmp == x3)
|
||||
emit "3. mov x2 <- x3"
|
||||
emit "4. mov x3 <- xtmp" (ASSERT(4->GetReg == x3) - there is no other possible situations here)
|
||||
*/
|
||||
// Calculate weigth
|
||||
ArenaVector<std::pair<uint8_t, uint8_t>> result(allocator->Adapter());
|
||||
// --moved_registers->end() - for remove marker-element
|
||||
for (auto pair = moved_registers->begin(); pair != --moved_registers->end();) {
|
||||
auto conflict = std::find_if(moved_registers->begin(), moved_registers->end(), [pair](auto in_pair) {
|
||||
return (in_pair.second == pair->first && (in_pair != *pair));
|
||||
});
|
||||
if (conflict == moved_registers->end()) {
|
||||
// emit immediate - there are no another possible combinations
|
||||
result.emplace_back(*pair);
|
||||
moved_registers->erase(pair);
|
||||
pair = moved_registers->begin();
|
||||
} else {
|
||||
++pair;
|
||||
}
|
||||
}
|
||||
// Here just loops
|
||||
for (;;) {
|
||||
/* Need support single mov x1 <- x1:
|
||||
ASSERT(moved_registers->size() != 1);
|
||||
*/
|
||||
|
||||
auto curr_pair = moved_registers->begin();
|
||||
|
||||
if (curr_pair->first == INVALID_FIST && curr_pair->second == INVALID_SECOND) {
|
||||
moved_registers->erase(curr_pair);
|
||||
break;
|
||||
// Finish algorithm - only marker in vector
|
||||
}
|
||||
auto saved_reg = curr_pair->first;
|
||||
result.emplace_back(std::pair<uint8_t, uint8_t>(tmp, curr_pair->first));
|
||||
result.emplace_back(*curr_pair); // we already save dst_register
|
||||
|
||||
// Remove current instruction
|
||||
auto curr_reg = curr_pair->second;
|
||||
moved_registers->erase(curr_pair);
|
||||
|
||||
for (; curr_pair != moved_registers->end();) {
|
||||
curr_pair = std::find_if(moved_registers->begin(), moved_registers->end(),
|
||||
[curr_reg](auto in_pair) { return in_pair.first == curr_reg; });
|
||||
if (curr_pair != moved_registers->end()) {
|
||||
if (curr_pair->second == saved_reg) {
|
||||
result.emplace_back(std::pair<uint8_t, uint8_t>(curr_pair->first, tmp));
|
||||
moved_registers->erase(curr_pair);
|
||||
break;
|
||||
// exit from loop
|
||||
};
|
||||
result.emplace_back(*curr_pair);
|
||||
curr_reg = curr_pair->second;
|
||||
moved_registers->erase(curr_pair);
|
||||
} else {
|
||||
ASSERT(curr_pair != moved_registers->end());
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Condition also used for tell comparison registers type
|
||||
enum Condition {
|
||||
EQ, // equal to 0
|
||||
NE, // not equal to 0
|
||||
// signed
|
||||
LT, // less
|
||||
LE, // less than or equal
|
||||
GT, // greater
|
||||
GE, // greater than or equal
|
||||
// unsigned - checked from registers
|
||||
LO, // less
|
||||
LS, // less than or equal
|
||||
HI, // greater
|
||||
HS, // greater than or equal
|
||||
// Special arch-dependecy TODO (igorban) Fix them
|
||||
MI, // N set Negative
|
||||
PL, // N clear Positive or zero
|
||||
VS, // V set Overflow.
|
||||
VC, // V clear No overflow.
|
||||
AL, // Always.
|
||||
NV, // Behaves as always/al.
|
||||
|
||||
TST_EQ,
|
||||
TST_NE,
|
||||
|
||||
INVALID_COND
|
||||
};
|
||||
|
||||
static inline bool IsTestCc(Condition cond)
|
||||
{
|
||||
return cond == TST_EQ || cond == TST_NE;
|
||||
}
|
||||
|
||||
class Shift final {
|
||||
public:
|
||||
explicit Shift(Reg base, ShiftType type, uint32_t scale) : scale_(scale), base_(base), type_(type) {}
|
||||
explicit Shift(Reg base, uint32_t scale) : Shift(base, ShiftType::LSL, scale) {}
|
||||
|
||||
DEFAULT_MOVE_SEMANTIC(Shift);
|
||||
DEFAULT_COPY_SEMANTIC(Shift);
|
||||
~Shift() = default;
|
||||
|
||||
Reg GetBase() const
|
||||
{
|
||||
return base_;
|
||||
}
|
||||
|
||||
ShiftType GetType() const
|
||||
{
|
||||
return type_;
|
||||
}
|
||||
|
||||
uint32_t GetScale() const
|
||||
{
|
||||
return scale_;
|
||||
}
|
||||
|
||||
private:
|
||||
uint32_t scale_ {0};
|
||||
Reg base_;
|
||||
ShiftType type_ {INVALID_SHIFT};
|
||||
};
|
||||
|
||||
inline int64_t GetIntValue(Imm imm)
|
||||
{
|
||||
int64_t value {0};
|
||||
auto type = imm.GetType();
|
||||
if (type == INT32_TYPE) {
|
||||
value = imm.GetValue<int32_t>();
|
||||
} else if (type == INT64_TYPE) {
|
||||
value = imm.GetValue<int64_t>();
|
||||
} else if (type == INT16_TYPE) {
|
||||
value = imm.GetValue<int16_t>();
|
||||
} else if (type == INT8_TYPE) {
|
||||
value = imm.GetValue<int8_t>();
|
||||
} else {
|
||||
// Inconsistent int-type
|
||||
UNREACHABLE();
|
||||
}
|
||||
return value;
|
||||
}
|
||||
} // namespace panda::compiler
|
||||
#endif // COMPILER_OPTIMIZER_CODEGEN_REGISTERS_H_
|
@ -1,152 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef COMPILER_OPTIMIZER_CODEGEN_REGFILE_H_
|
||||
#define COMPILER_OPTIMIZER_CODEGEN_REGFILE_H_
|
||||
|
||||
#include "operands.h"
|
||||
#include "utils/arch.h"
|
||||
|
||||
/*
|
||||
Register file wrapper used for get major data and for Regalloc
|
||||
*/
|
||||
namespace panda::compiler {
|
||||
#ifdef PANDA_COMPILER_TARGET_X86_64
|
||||
namespace amd64 {
|
||||
static constexpr size_t RENAMING_MASK_3_5_OR_9_11 {0xE38};
|
||||
static constexpr size_t RENAMING_CONST {14U};
|
||||
|
||||
// There is a problem with callee/caller register numbers with amd64.
|
||||
// For example, take a look at
|
||||
// caller reg mask: 0000111111000111 and
|
||||
// callee reg mask: 1111000000001000
|
||||
// Stack walker requires this mask to be densed, so the decision is to
|
||||
// rename regs number 3, 4, 5 to 11, 10, 9 (and vice versa).
|
||||
// Resulting
|
||||
// caller mask is 0000000111111111 and
|
||||
// callee mask is 1111100000000000.
|
||||
static inline constexpr size_t ConvertRegNumber(size_t reg_id)
|
||||
{
|
||||
ASSERT(reg_id < MAX_NUM_REGS);
|
||||
// NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult)
|
||||
if ((RENAMING_MASK_3_5_OR_9_11 & (size_t(1) << reg_id)) != 0) {
|
||||
return RENAMING_CONST - reg_id;
|
||||
}
|
||||
return reg_id;
|
||||
}
|
||||
} // namespace amd64
|
||||
#endif // PANDA_COMPILER_TARGET_X86_64
|
||||
|
||||
class RegistersDescription {
|
||||
public:
|
||||
explicit RegistersDescription(ArenaAllocator *aa, Arch arch) : arena_allocator_(aa), arch_(arch) {}
|
||||
virtual ~RegistersDescription() = default;
|
||||
|
||||
virtual ArenaVector<Reg> GetCalleeSaved() = 0;
|
||||
virtual void SetCalleeSaved(const ArenaVector<Reg> &) = 0;
|
||||
// Set used regs - change GetCallee
|
||||
virtual void SetUsedRegs(const ArenaVector<Reg> &) = 0;
|
||||
// Return zero register. If target architecture doesn't support zero register, it should return INVALID_REGISTER.
|
||||
virtual Reg GetZeroReg() const = 0;
|
||||
virtual bool IsZeroReg(Reg reg) const = 0;
|
||||
virtual Reg::RegIDType GetTempReg() = 0;
|
||||
virtual Reg::RegIDType GetTempVReg() = 0;
|
||||
// Return RegMapping bitset
|
||||
virtual bool SupportMapping(uint32_t) = 0;
|
||||
|
||||
virtual bool IsValid() const
|
||||
{
|
||||
return false;
|
||||
};
|
||||
|
||||
virtual bool IsCalleeRegister(Reg reg) = 0;
|
||||
|
||||
ArenaAllocator *GetAllocator() const
|
||||
{
|
||||
return arena_allocator_;
|
||||
};
|
||||
|
||||
// May be re-define to ignore some cases
|
||||
virtual bool IsRegUsed(ArenaVector<Reg> vec_reg, Reg reg)
|
||||
{
|
||||
// size ignored in arm64
|
||||
auto equality = [reg](Reg in) {
|
||||
return ((reg.GetId() == in.GetId()) && (reg.GetType() == in.GetType()) &&
|
||||
(reg.GetSize() == in.GetSize())) ||
|
||||
(!reg.IsValid() && !in.IsValid());
|
||||
};
|
||||
return (std::find_if(vec_reg.begin(), vec_reg.end(), equality) != vec_reg.end());
|
||||
}
|
||||
|
||||
static RegistersDescription *Create(ArenaAllocator *arena_allocator, Arch arch);
|
||||
|
||||
RegMask GetRegMask() const
|
||||
{
|
||||
return reg_mask_.None() ? GetDefaultRegMask() : reg_mask_;
|
||||
}
|
||||
|
||||
void SetRegMask(const RegMask &mask)
|
||||
{
|
||||
reg_mask_ = mask;
|
||||
}
|
||||
|
||||
// Get registers mask which used in codegen, runtime e.t.c
|
||||
// 0 means - available, 1 - unavailable to use
|
||||
// Note that it is a default architecture-specific registers mask.
|
||||
virtual RegMask GetDefaultRegMask() const = 0;
|
||||
|
||||
// Get vector registers mask which used in codegen, runtime e.t.c
|
||||
virtual VRegMask GetVRegMask() = 0;
|
||||
|
||||
virtual RegMask GetCallerSavedRegMask() const = 0;
|
||||
virtual RegMask GetCallerSavedVRegMask() const = 0;
|
||||
|
||||
void FillUsedCalleeSavedRegisters(RegMask *callee_regs, VRegMask *callee_vregs, bool set_all_callee_registers)
|
||||
{
|
||||
if (set_all_callee_registers) {
|
||||
*callee_regs = RegMask(panda::GetCalleeRegsMask(arch_, false));
|
||||
*callee_vregs = VRegMask(panda::GetCalleeRegsMask(arch_, true));
|
||||
} else {
|
||||
*callee_regs = GetUsedRegsMask<RegMask, false>(GetCalleeSaved());
|
||||
*callee_vregs = GetUsedRegsMask<VRegMask, true>(GetCalleeSaved());
|
||||
}
|
||||
}
|
||||
|
||||
NO_COPY_SEMANTIC(RegistersDescription);
|
||||
NO_MOVE_SEMANTIC(RegistersDescription);
|
||||
|
||||
private:
|
||||
ArenaAllocator *arena_allocator_ {nullptr};
|
||||
Arch arch_;
|
||||
RegMask reg_mask_ {0};
|
||||
|
||||
template <typename M, bool is_fp>
|
||||
M GetUsedRegsMask(const ArenaVector<Reg> ®s)
|
||||
{
|
||||
M mask;
|
||||
for (auto reg : regs) {
|
||||
// NOLINTNEXTLINE(bugprone-branch-clone,-warnings-as-errors)
|
||||
if (reg.IsFloat() && is_fp) {
|
||||
mask.set(reg.GetId());
|
||||
} else if (reg.IsScalar() && !is_fp) {
|
||||
mask.set(reg.GetId());
|
||||
}
|
||||
}
|
||||
return mask;
|
||||
}
|
||||
};
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // COMPILER_OPTIMIZER_CODEGEN_REGFILE_H_
|
@ -1,36 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef PANDA_RELOCATIONS_H
|
||||
#define PANDA_RELOCATIONS_H
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
class RelocationInfo {
|
||||
public:
|
||||
uint32_t data {0};
|
||||
uint32_t offset {0};
|
||||
uint32_t type {0};
|
||||
int8_t addend {0};
|
||||
};
|
||||
|
||||
class RelocationHandler {
|
||||
public:
|
||||
virtual void AddRelocation(const RelocationInfo &info) = 0;
|
||||
};
|
||||
|
||||
} // namespace panda::compiler
|
||||
|
||||
#endif // PANDA_RELOCATIONS_H
|
@ -1,288 +0,0 @@
|
||||
/**
|
||||
* Copyright (c) 2021-2022 Huawei Device Co., Ltd.
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "slow_path.h"
|
||||
#include "codegen.h"
|
||||
|
||||
namespace panda::compiler {
|
||||
|
||||
void SlowPathBase::Generate(Codegen *codegen)
|
||||
{
|
||||
ASSERT(!generated_);
|
||||
|
||||
SCOPED_DISASM_STR(codegen, std::string("SlowPath for inst ") + std::to_string(GetInst()->GetId()) + ". " +
|
||||
GetInst()->GetOpcodeStr());
|
||||
Encoder *encoder = codegen->GetEncoder();
|
||||
ASSERT(encoder->IsValid());
|
||||
encoder->BindLabel(GetLabel());
|
||||
|
||||
GenerateImpl(codegen);
|
||||
|
||||
if (encoder->IsLabelValid(label_back_)) {
|
||||
codegen->GetEncoder()->EncodeJump(GetBackLabel());
|
||||
}
|
||||
#ifndef NDEBUG
|
||||
generated_ = true;
|
||||
#endif
|
||||
}
|
||||
|
||||
// ARRAY_INDEX_OUT_OF_BOUNDS_EXCEPTION, STRING_INDEX_OUT_OF_BOUNDS_EXCEPTION
|
||||
bool SlowPathEntrypoint::GenerateThrowOutOfBoundsException(Codegen *codegen)
|
||||
{
|
||||
auto len_reg = codegen->ConvertRegister(GetInst()->GetSrcReg(0), GetInst()->GetInputType(0));
|
||||
if (GetInst()->GetOpcode() == Opcode::BoundsCheckI) {
|
||||
ScopedTmpReg index_reg(codegen->GetEncoder());
|
||||
codegen->GetEncoder()->EncodeMov(index_reg, Imm(GetInst()->CastToBoundsCheckI()->GetImm()));
|
||||
codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {index_reg, len_reg});
|
||||
} else {
|
||||
ASSERT(GetInst()->GetOpcode() == Opcode::BoundsCheck);
|
||||
auto index_reg = codegen->ConvertRegister(GetInst()->GetSrcReg(1), GetInst()->GetInputType(1));
|
||||
codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {index_reg, len_reg});
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// INITIALIZE_CLASS
|
||||
bool SlowPathEntrypoint::GenerateInitializeClass(Codegen *codegen)
|
||||
{
|
||||
auto inst = GetInst();
|
||||
if (GetInst()->GetDstReg() != INVALID_REG) {
|
||||
ASSERT(inst->GetOpcode() == Opcode::LoadAndInitClass);
|
||||
Reg klass_reg {codegen->ConvertRegister(GetInst()->GetDstReg(), DataType::REFERENCE)};
|
||||
RegMask preserved_regs;
|
||||
codegen->GetEncoder()->SetRegister(&preserved_regs, nullptr, klass_reg);
|
||||
codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {klass_reg}, preserved_regs);
|
||||
} else {
|
||||
ASSERT(inst->GetOpcode() == Opcode::InitClass);
|
||||
ASSERT(!codegen->GetGraph()->IsAotMode());
|
||||
auto klass = reinterpret_cast<uintptr_t>(inst->CastToInitClass()->GetClass());
|
||||
codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {Imm(klass)});
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// IS_INSTANCE
|
||||
bool SlowPathEntrypoint::GenerateIsInstance(Codegen *codegen)
|
||||
{
|
||||
auto src = codegen->ConvertRegister(GetInst()->GetSrcReg(0), DataType::REFERENCE); // obj
|
||||
auto klass = codegen->ConvertRegister(GetInst()->GetSrcReg(1), DataType::REFERENCE);
|
||||
auto dst = codegen->ConvertRegister(GetInst()->GetDstReg(), GetInst()->GetType());
|
||||
codegen->CallRuntime(GetInst(), EntrypointId::IS_INSTANCE, dst, {src, klass});
|
||||
return true;
|
||||
}
|
||||
|
||||
// CHECK_CAST
|
||||
bool SlowPathEntrypoint::GenerateCheckCast(Codegen *codegen)
|
||||
{
|
||||
auto src = codegen->ConvertRegister(GetInst()->GetSrcReg(0), DataType::REFERENCE); // obj
|
||||
auto klass = codegen->ConvertRegister(GetInst()->GetSrcReg(1), DataType::REFERENCE);
|
||||
codegen->CallRuntime(GetInst(), EntrypointId::CHECK_CAST, INVALID_REGISTER, {src, klass});
|
||||
return true;
|
||||
}
|
||||
|
||||
// DEOPTIMIZE
|
||||
bool SlowPathEntrypoint::GenerateDeoptimize(Codegen *codegen)
|
||||
{
|
||||
DeoptimizeType type = DeoptimizeType::INVALID;
|
||||
if (GetInst()->GetOpcode() == Opcode::Deoptimize) {
|
||||
type = GetInst()->CastToDeoptimize()->GetDeoptimizeType();
|
||||
} else if (GetInst()->GetOpcode() == Opcode::DeoptimizeIf) {
|
||||
type = GetInst()->CastToDeoptimizeIf()->GetDeoptimizeType();
|
||||
} else if (GetInst()->GetOpcode() == Opcode::DeoptimizeCompare) {
|
||||
type = GetInst()->CastToDeoptimizeCompare()->GetDeoptimizeType();
|
||||
} else if (GetInst()->GetOpcode() == Opcode::DeoptimizeCompareImm) {
|
||||
type = GetInst()->CastToDeoptimizeCompareImm()->GetDeoptimizeType();
|
||||
} else if (GetInst()->GetOpcode() == Opcode::AnyTypeCheck) {
|
||||
type = DeoptimizeType::ANY_TYPE_CHECK;
|
||||
} else if (GetInst()->GetOpcode() == Opcode::AddOverflowCheck) {
|
||||
type = DeoptimizeType::DEOPT_OVERFLOW;
|
||||
} else if (GetInst()->GetOpcode() == Opcode::SubOverflowCheck) {
|
||||
type = DeoptimizeType::DEOPT_OVERFLOW;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {Imm(static_cast<uint8_t>(type))});
|
||||
return true;
|
||||
}
|
||||
|
||||
// CREATE_OBJECT
|
||||
bool SlowPathEntrypoint::GenerateCreateObject(Codegen *codegen)
|
||||
{
|
||||
auto inst = GetInst();
|
||||
auto dst = codegen->ConvertRegister(inst->GetDstReg(), inst->GetType());
|
||||
auto src = codegen->ConvertRegister(inst->GetSrcReg(0), inst->GetInputType(0));
|
||||
|
||||
codegen->CallRuntime(inst, EntrypointId::CREATE_OBJECT_BY_CLASS, dst, {src});
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SlowPathEntrypoint::GenerateByEntry(Codegen *codegen)
|
||||
{
|
||||
switch (GetEntrypoint()) {
|
||||
case EntrypointId::THROW_EXCEPTION: {
|
||||
auto src = codegen->ConvertRegister(GetInst()->GetSrcReg(0), DataType::Type::REFERENCE);
|
||||
codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {src});
|
||||
return true;
|
||||
}
|
||||
case EntrypointId::NULL_POINTER_EXCEPTION:
|
||||
case EntrypointId::ARITHMETIC_EXCEPTION:
|
||||
codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {});
|
||||
return true;
|
||||
case EntrypointId::ARRAY_INDEX_OUT_OF_BOUNDS_EXCEPTION:
|
||||
case EntrypointId::STRING_INDEX_OUT_OF_BOUNDS_EXCEPTION:
|
||||
return GenerateThrowOutOfBoundsException(codegen);
|
||||
case EntrypointId::NEGATIVE_ARRAY_SIZE_EXCEPTION: {
|
||||
auto size = codegen->ConvertRegister(GetInst()->GetSrcReg(0), GetInst()->GetInputType(0));
|
||||
codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {size});
|
||||
return true;
|
||||
}
|
||||
case EntrypointId::INITIALIZE_CLASS:
|
||||
return GenerateInitializeClass(codegen);
|
||||
case EntrypointId::IS_INSTANCE:
|
||||
return GenerateIsInstance(codegen);
|
||||
case EntrypointId::CHECK_CAST:
|
||||
return GenerateCheckCast(codegen);
|
||||
case EntrypointId::CREATE_OBJECT_BY_CLASS:
|
||||
return GenerateCreateObject(codegen);
|
||||
case EntrypointId::SAFEPOINT:
|
||||
codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {});
|
||||
return true;
|
||||
case EntrypointId::DEOPTIMIZE: {
|
||||
return GenerateDeoptimize(codegen);
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void SlowPathEntrypoint::GenerateImpl(Codegen *codegen)
|
||||
{
|
||||
if (!GenerateByEntry(codegen)) {
|
||||
switch (GetEntrypoint()) {
|
||||
case EntrypointId::GET_UNKNOWN_CALLEE_METHOD:
|
||||
case EntrypointId::RESOLVE_UNKNOWN_VIRTUAL_CALL:
|
||||
case EntrypointId::GET_FIELD_OFFSET:
|
||||
case EntrypointId::GET_UNKNOWN_STATIC_FIELD_MEMORY_ADDRESS:
|
||||
case EntrypointId::GET_UNKNOWN_STATIC_FIELD_PTR:
|
||||
case EntrypointId::RESOLVE_CLASS_OBJECT:
|
||||
case EntrypointId::RESOLVE_CLASS:
|
||||
case EntrypointId::ABSTRACT_METHOD_ERROR:
|
||||
case EntrypointId::INITIALIZE_CLASS_BY_ID:
|
||||
case EntrypointId::CHECK_STORE_ARRAY_REFERENCE:
|
||||
case EntrypointId::RESOLVE_STRING_AOT:
|
||||
case EntrypointId::CLASS_CAST_EXCEPTION:
|
||||
break;
|
||||
default:
|
||||
LOG(FATAL, COMPILER) << "Unsupported entrypoint!";
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SlowPathIntrinsic::GenerateImpl(Codegen *codegen)
|
||||
{
|
||||
codegen->CreateCallIntrinsic(GetInst()->CastToIntrinsic());
|
||||
}
|
||||
|
||||
void SlowPathImplicitNullCheck::GenerateImpl(Codegen *codegen)
|
||||
{
|
||||
ASSERT(!GetInst()->CastToNullCheck()->IsImplicit());
|
||||
SlowPathEntrypoint::GenerateImpl(codegen);
|
||||
}
|
||||
|
||||
void SlowPathShared::GenerateImpl(Codegen *codegen)
|
||||
{
|
||||
ASSERT(tmp_reg_ != INVALID_REGISTER);
|
||||
[[maybe_unused]] ScopedTmpReg tmp_reg(codegen->GetEncoder(), tmp_reg_);
|
||||
ASSERT(tmp_reg.GetReg().GetId() == tmp_reg_.GetId());
|
||||
auto graph = codegen->GetGraph();
|
||||
ASSERT(graph->IsAotMode());
|
||||
auto aot_data = graph->GetAotData();
|
||||
aot_data->SetSharedSlowPathOffset(GetEntrypoint(), codegen->GetEncoder()->GetCursorOffset());
|
||||
MemRef entry(codegen->ThreadReg(), graph->GetRuntime()->GetEntrypointTlsOffset(graph->GetArch(), GetEntrypoint()));
|
||||
ScopedTmpReg tmp1_reg(codegen->GetEncoder());
|
||||
codegen->GetEncoder()->EncodeLdr(tmp1_reg, false, entry);
|
||||
codegen->GetEncoder()->EncodeJump(tmp1_reg);
|
||||
}
|
||||
|
||||
void SlowPathResolveStringAot::GenerateImpl(Codegen *codegen)
|
||||
{
|
||||
ScopedTmpRegU64 tmp_addr_reg(codegen->GetEncoder());
|
||||
// Slot address was loaded into temporary register before we jumped into slow path, but it is already released
|
||||
// because temporary registers are scoped. Try to allocate a new one and check that it is the same register
|
||||
// as was allocated in codegen. If it is a different register then copy the slot address into it.
|
||||
if (tmp_addr_reg.GetReg() != addr_reg_) {
|
||||
codegen->GetEncoder()->EncodeMov(tmp_addr_reg, addr_reg_);
|
||||
}
|
||||
codegen->CallRuntimeWithMethod(GetInst(), method_, GetEntrypoint(), dst_reg_, Imm(string_id_), tmp_addr_reg);
|
||||
}
|
||||
|
||||
void SlowPathRefCheck::GenerateImpl(Codegen *codegen)
|
||||
{
|
||||
ASSERT(array_reg_ != INVALID_REGISTER);
|
||||
ASSERT(ref_reg_ != INVALID_REGISTER);
|
||||
codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {array_reg_, ref_reg_});
|
||||
}
|
||||
|
||||
void SlowPathAbstract::GenerateImpl(Codegen *codegen)
|
||||
{
|
||||
SCOPED_DISASM_STR(codegen, std::string("SlowPath for Abstract method ") + std::to_string(GetInst()->GetId()));
|
||||
ASSERT(method_reg_ != INVALID_REGISTER);
|
||||
ScopedTmpReg method_reg(codegen->GetEncoder(), method_reg_);
|
||||
ASSERT(method_reg.GetReg().GetId() == method_reg_.GetId());
|
||||
codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {method_reg.GetReg()});
|
||||
}
|
||||
|
||||
void SlowPathCheckCast::GenerateImpl(Codegen *codegen)
|
||||
{
|
||||
SCOPED_DISASM_STR(codegen, std::string("SlowPath for CheckCast exception") + std::to_string(GetInst()->GetId()));
|
||||
auto inst = GetInst();
|
||||
auto src = codegen->ConvertRegister(inst->GetSrcReg(0), inst->GetInputType(0));
|
||||
|
||||
codegen->CallRuntime(GetInst(), GetEntrypoint(), INVALID_REGISTER, {class_reg_, src});
|
||||
}
|
||||
|
||||
void SlowPathUnresolved::GenerateImpl(Codegen *codegen)
|
||||
{
|
||||
SlowPathEntrypoint::GenerateImpl(codegen);
|
||||
|
||||
ASSERT(method_ != nullptr);
|
||||
ASSERT(type_id_ != 0);
|
||||
ASSERT(slot_addr_ != 0);
|
||||
|
||||
ScopedTmpReg value_reg(codegen->GetEncoder());
|
||||
if (GetInst()->GetOpcode() == Opcode::UnresolvedCallVirtual) {
|
||||
codegen->CallRuntimeWithMethod(GetInst(), method_, GetEntrypoint(), value_reg, arg_reg_, Imm(type_id_),
|
||||
Imm(slot_addr_));
|
||||
} else if (GetEntrypoint() == EntrypointId::GET_UNKNOWN_CALLEE_METHOD ||
|
||||
GetEntrypoint() == EntrypointId::GET_UNKNOWN_STATIC_FIELD_MEMORY_ADDRESS ||
|
||||
GetEntrypoint() == EntrypointId::GET_UNKNOWN_STATIC_FIELD_PTR) {
|
||||
codegen->CallRuntimeWithMethod(GetInst(), method_, GetEntrypoint(), value_reg, Imm(type_id_), Imm(slot_addr_));
|
||||
} else {
|
||||
codegen->CallRuntimeWithMethod(GetInst(), method_, GetEntrypoint(), value_reg, Imm(type_id_));
|
||||
|
||||
ScopedTmpReg addr_reg(codegen->GetEncoder());
|
||||
codegen->GetEncoder()->EncodeMov(addr_reg, Imm(slot_addr_));
|
||||
codegen->GetEncoder()->EncodeStr(value_reg, MemRef(addr_reg));
|
||||
}
|
||||
|
||||
if (dst_reg_.IsValid()) {
|
||||
codegen->GetEncoder()->EncodeMov(dst_reg_, value_reg);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace panda::compiler
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user