merge LiteCG to ets_runtime

Signed-off-by: zhangyiwei <zhangyiwei11@huawei.com>
Change-Id: Ia3de0eb1fa353945fa784369ffbe603c90229def
This commit is contained in:
zhangyiwei 2023-11-07 16:53:04 +08:00
parent 8a6b2605d3
commit 79b6fe93e4
541 changed files with 232419 additions and 0 deletions

View File

@ -0,0 +1,119 @@
# Copyright (c) 2023 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import("//arkcompiler/ets_runtime/js_runtime_config.gni")
config("mapleallcompilecfg") {
cflags = []
cflags_cc = [ "-frtti" ]
cflags_c = []
if (is_debug) {
cflags_cc += [
"-O0",
"-g3",
"-ftrapv",
"-fstack-check",
"-DDEBUG",
]
cflags_c += [
"-O0",
"-g3",
"-ftrapv",
"-fstack-check",
"-DDEBUG",
]
} else {
cflags_cc += [
"-O2",
"-fno-strict-aliasing",
"-D_FORTIFY_SOURCE=2",
]
cflags_c += [
"-O2",
"-fno-strict-aliasing",
"-D_FORTIFY_SOURCE=2",
]
}
cflags_c += [
"-Wall",
"-fstack-protector-strong",
"-fvisibility=hidden",
"-pipe",
"-Werror",
"-Wdate-time",
"-Wfloat-equal",
"-Wno-c99-designator",
]
cflags_cc += [
"-Wall",
"-fstack-protector-strong",
"-fvisibility=hidden",
"-pipe",
"-Wno-c99-designator",
"-Wno-range-loop-construct",
"-Werror",
"-Wdate-time",
"-Wfloat-equal",
"-DDYNAMICLANG",
"-DRC_NO_MMAP",
"-DMIR_FEATURE_FULL=1",
"-DMIR_JAVA=1",
"-std=c++17",
"-fno-common",
]
if (current_os != "mingw") {
cflags_c += [
"-fPIC",
"-fPIE",
]
cflags_cc += [
"-fPIC",
"-fPIE",
]
} else {
cflags_c += [ "-fno-stack-protector" ]
cflags_cc += [ "-fno-stack-protector" ]
}
if (TARGET == "aarch64") {
cflags_cc += [ "-DTARGAARCH64" ]
}
if (TARGET == "x86_64") {
cflags_cc += [ "-DTARGX86_64" ]
}
if (TARGET == "riscv64") {
cflags_cc += [ "-DTARGRISCV64" ]
}
if (TARGET == "ark") {
cflags_cc += [ "-DTARGARK" ]
}
if (HOST_ARCH == 64) {
ldflags = []
ldflags += [
"-rdynamic",
"-lpthread",
"-Wl,-z,relro",
"-Wl,-z,now",
"-Wl,-z,noexecstack",
"-pie",
]
}
}

View File

@ -0,0 +1,20 @@
#!/bin/bash
#
# Copyright (c) 2023 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
OUTPUT=$1
CORE_ALL_JAR=$2
shift 2
javac -g -d . -bootclasspath ${CORE_ALL_JAR} $@
jar -cvf ${OUTPUT} *.class

View File

@ -0,0 +1,365 @@
# Copyright (c) 2023 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import("//arkcompiler/ets_runtime/js_runtime_config.gni")
include_directories = [
"${MAPLEALL_ROOT}/maple_be/include/cg",
"${MAPLEALL_ROOT}/maple_be/include/litecg",
"${MAPLEALL_ROOT}/maple_be/include/ad",
"${MAPLEALL_ROOT}/maple_be/include/ad/target",
"${MAPLEALL_ROOT}/maple_be/include/be",
"${MAPLEALL_ROOT}/maple_driver/include",
"${MAPLEALL_ROOT}/maple_util/include",
"${MAPLEALL_ROOT}/maple_ir/include",
"${MAPLEALL_ROOT}/maple_me/include",
"${MAPLEALL_ROOT}/mpl2mpl/include",
"${MAPLEALL_ROOT}/mempool/include",
"${MAPLEALL_ROOT}/maple_ipa/include",
"${MAPLEALL_ROOT}/maple_ipa/include/old",
"${MAPLEALL_ROOT}/maple_phase/include",
"${MAPLEALL_THIRD_PARTY_ROOT}/bounds_checking_function/include",
]
deps_libcg = [
":libmplbe",
":libmplad",
"$js_root/ecmascript/mapleall/mempool:libmempool",
"$js_root/ecmascript/mapleall/maple_phase:libmplphase",
"$js_root/ecmascript/mapleall/mpl2mpl:libmpl2mpl",
"$js_root/ecmascript/mapleall/maple_ir:libmplir",
"$js_root/ecmascript/mapleall/maple_util:libmplutil",
"$js_root/ecmascript/mapleall/maple_me:libmplme",
]
deps_libmplbe = [ ":libcglowerer" ]
if (TARGET == "aarch64") {
include_directories += [
"${MAPLEALL_ROOT}/maple_be/include/cg/aarch64",
"${MAPLEALL_ROOT}/maple_be/include/be/aarch64",
]
deps_libcg += [
":libcgaarch64",
":libcgphases",
"${MAPLEALL_ROOT}/maple_driver:libmaple_driver",
]
}
if (TARGET == "x86_64") {
include_directories += [
"${MAPLEALL_ROOT}/maple_be/include/cg/x86_64",
"${MAPLEALL_ROOT}/maple_be/include/be/x86_64",
]
deps_libcg += [
":libcgx8664",
":libcgx86phases",
]
}
if (TARGET == "riscv64") {
include_directories += [
"${MAPLEALL_ROOT}/maple_be/include/cg/riscv64",
"${MAPLEALL_ROOT}/maple_be/include/be/riscv64",
]
deps_libcg += [ ":libcgriscv64" ]
}
if (TARGET == "ark") {
include_directories += [
"${MAPLEALL_ROOT}/maple_be/include/cg/ark",
"${MAPLEALL_ROOT}/maple_be/include/be/ark",
]
deps_libcg += [ ":libcgark" ]
}
src_libmplad = [ "src/ad/mad.cpp" ]
src_libcglowerer = [
"src/be/bbt.cpp",
"src/be/trycatchblockslower.cpp",
"src/be/lower.cpp",
]
src_libmplbe = [
"src/be/becommon.cpp",
"src/be/switch_lowerer.cpp",
"src/be/rt.cpp",
]
src_libcgaarch64 = [
"src/cg/aarch64/aarch64_abi.cpp",
"src/cg/aarch64/aarch64_call_conv.cpp",
"src/cg/aarch64/mpl_atomic.cpp",
"src/cg/aarch64/aarch64_cgfunc.cpp",
"src/cg/aarch64/aarch64_dependence.cpp",
"src/cg/aarch64/aarch64_ebo.cpp",
"src/cg/aarch64/aarch64_emitter.cpp",
"src/cg/aarch64/aarch64_obj_emitter.cpp",
"src/cg/aarch64/aarch64_fixshortbranch.cpp",
"src/cg/aarch64/aarch64_global.cpp",
"src/cg/aarch64/aarch64_proepilog.cpp",
"src/cg/aarch64/aarch64_operand.cpp",
"src/cg/aarch64/aarch64_color_ra.cpp",
"src/cg/aarch64/aarch64_reg_info.cpp",
"src/cg/aarch64/aarch64_ssa.cpp",
"src/cg/aarch64/aarch64_prop.cpp",
"src/cg/aarch64/aarch64_dce.cpp",
"src/cg/aarch64/aarch64_phi_elimination.cpp",
"src/cg/aarch64/aarch64_reg_coalesce.cpp",
"src/cg/aarch64/aarch64_ico.cpp",
"src/cg/aarch64/aarch64_insn.cpp",
"src/cg/aarch64/aarch64_isa.cpp",
"src/cg/aarch64/aarch64_memlayout.cpp",
"src/cg/aarch64/aarch64_args.cpp",
"src/cg/aarch64/aarch64_live.cpp",
"src/cg/aarch64/aarch64_yieldpoint.cpp",
"src/cg/aarch64/aarch64_offset_adjust.cpp",
"src/cg/aarch64/aarch64_optimize_common.cpp",
"src/cg/aarch64/aarch64_peep.cpp",
"src/cg/aarch64/aarch64_reaching.cpp",
"src/cg/aarch64/aarch64_schedule.cpp",
"src/cg/aarch64/aarch64_strldr.cpp",
"src/cg/aarch64/aarch64_ra_opt.cpp",
"src/cg/aarch64/aarch64_alignment.cpp",
"src/cg/aarch64/aarch64_regsaves.cpp",
"src/cg/aarch64/aarch64_utils.cpp",
"src/cg/aarch64/aarch64_cg.cpp",
"src/cg/aarch64/aarch64_validbit_opt.cpp",
"src/cg/aarch64/aarch64_cfgo.cpp",
]
src_libcgx86phases = [
"src/cg/peep.cpp",
"src/cg/alignment.cpp",
"src/cg/reaching.cpp",
"src/cg/local_opt.cpp",
"src/cg/cfgo.cpp",
]
src_libcgx8664 = [
"src/cg/x86_64/x64_cg.cpp",
"src/cg/x86_64/x64_MPIsel.cpp",
"src/cg/x86_64/x64_cgfunc.cpp",
"src/cg/x86_64/x64_memlayout.cpp",
"src/cg/x86_64/x64_emitter.cpp",
"src/cg/x86_64/x64_abi.cpp",
"src/cg/x86_64/x64_call_conv.cpp",
"src/cg/x86_64/x64_standardize.cpp",
"src/cg/x86_64/x64_live.cpp",
"src/cg/x86_64/x64_reg_info.cpp",
"src/cg/x86_64/x64_proepilog.cpp",
"src/cg/x86_64/x64_args.cpp",
"src/cg/x86_64/x64_peep.cpp",
"src/cg/x86_64/x64_reaching.cpp",
"src/cg/x86_64/x64_local_opt.cpp",
"src/cg/x86_64/x64_cfgo.cpp",
"src/cg/x86_64/x64_isa.cpp",
"src/cg/x86_64/x64_optimize_common.cpp",
"src/cg/x86_64/elf_assembler.cpp",
"src/cg/x86_64/asm_assembler.cpp",
]
src_libcgriscv64 = [
"src/cg/riscv64/mpl_atomic.cpp",
"src/cg/riscv64/riscv64_abi.cpp",
"src/cg/riscv64/riscv64_args.cpp",
"src/cg/riscv64/riscv64_cg.cpp",
"src/cg/riscv64/riscv64_cgfunc.cpp",
"src/cg/riscv64/riscv64_color_ra.cpp",
"src/cg/riscv64/riscv64_dependence.cpp",
"src/cg/riscv64/riscv64_ebo.cpp",
"src/cg/riscv64/riscv64_emitter.cpp",
"src/cg/riscv64/riscv64_fixshortbranch.cpp",
"src/cg/riscv64/riscv64_global.cpp",
"src/cg/riscv64/riscv64_ico.cpp",
"src/cg/riscv64/riscv64_immediate.cpp",
"src/cg/riscv64/riscv64_insn.cpp",
"src/cg/riscv64/riscv64_isa.cpp",
"src/cg/riscv64/riscv64_live.cpp",
"src/cg/riscv64/riscv64_lsra.cpp",
"src/cg/riscv64/riscv64_memlayout.cpp",
"src/cg/riscv64/riscv64_offset_adjust.cpp",
"src/cg/riscv64/riscv64_operand.cpp",
"src/cg/riscv64/riscv64_optimize_common.cpp",
"src/cg/riscv64/riscv64_peep.cpp",
"src/cg/riscv64/riscv64_proepilog.cpp",
"src/cg/riscv64/riscv64_reaching.cpp",
"src/cg/riscv64/riscv64_reg_alloc.cpp",
"src/cg/riscv64/riscv64_schedule.cpp",
"src/cg/riscv64/riscv64_strldr.cpp",
"src/cg/riscv64/riscv64_yieldpoint.cpp",
"src/cg/riscv64/riscv64_ra_opt.cpp",
]
src_libcgark = [ "src/cg/ark/foo.cpp" ]
src_libcgphases = [
"src/cg/cfgo.cpp",
"src/cg/local_opt.cpp",
"src/cg/ebo.cpp",
"src/cg/ra_opt.cpp",
"src/cg/cg_ssa.cpp",
"src/cg/cg_prop.cpp",
"src/cg/cg_dce.cpp",
"src/cg/cg_phi_elimination.cpp",
"src/cg/reg_coalesce.cpp",
"src/cg/global.cpp",
"src/cg/ico.cpp",
"src/cg/peep.cpp",
"src/cg/pressure.cpp",
"src/cg/reaching.cpp",
"src/cg/schedule.cpp",
"src/cg/strldr.cpp",
"src/cg/cg_dominance.cpp",
"src/cg/cg_pre.cpp",
"src/cg/cg_occur.cpp",
"src/cg/cg_ssu_pre.cpp",
"src/cg/cg_ssa_pre.cpp",
"src/cg/regsaves.cpp",
"src/cg/cg_critical_edge.cpp",
"src/cg/alignment.cpp",
"src/cg/cg_validbit_opt.cpp",
]
src_libcg = [
"src/cg/args.cpp",
"src/cg/cg_irbuilder.cpp",
"src/cg/cfi.cpp",
"src/cg/cgbb.cpp",
"src/cg/operand.cpp",
"src/cg/cgfunc.cpp",
"src/cg/cg_cfg.cpp",
"src/cg/cg_option.cpp",
"src/cg/cg_options.cpp",
"src/cg/dbg.cpp",
"src/cg/optimize_common.cpp",
"src/cg/eh_func.cpp",
"src/cg/emit.cpp",
"src/cg/obj_emit.cpp",
"src/cg/ifile.cpp",
"src/cg/live.cpp",
"src/cg/loop.cpp",
"src/cg/isel.cpp",
"src/cg/standardize.cpp",
"src/cg/memlayout.cpp",
"src/cg/yieldpoint.cpp",
"src/cg/label_creation.cpp",
"src/cg/offset_adjust.cpp",
"src/cg/reg_alloc.cpp",
"src/cg/reg_alloc_basic.cpp",
"src/cg/reg_alloc_lsra.cpp",
"src/cg/proepilog.cpp",
"src/cg/cg.cpp",
"src/cg/isa.cpp",
"src/cg/insn.cpp",
"src/cg/cg_phasemanager.cpp",
"src/litecg/litecg.cpp",
"src/litecg/lmir_builder.cpp",
]
ohos_static_library("libmplad") {
stack_protector_ret = false
configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ]
sources = src_libmplad
include_dirs = include_directories
output_dir = "${root_out_dir}/lib/${HOST_ARCH}"
part_name = "ets_runtime"
subsystem_name = "arkcompiler"
}
ohos_source_set("libcglowerer") {
stack_protector_ret = false
configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ]
sources = src_libcglowerer
include_dirs = include_directories
part_name = "ets_runtime"
subsystem_name = "arkcompiler"
}
ohos_static_library("libmplbe") {
stack_protector_ret = false
configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ]
sources = src_libmplbe
deps = deps_libmplbe
include_dirs = include_directories
output_dir = "${root_out_dir}/lib/${HOST_ARCH}"
part_name = "ets_runtime"
subsystem_name = "arkcompiler"
}
ohos_source_set("libcgaarch64") {
stack_protector_ret = false
configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ]
sources = src_libcgaarch64
include_dirs = include_directories
part_name = "ets_runtime"
subsystem_name = "arkcompiler"
}
ohos_source_set("libcgx8664") {
stack_protector_ret = false
configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ]
sources = src_libcgx8664
include_dirs = include_directories
part_name = "ets_runtime"
subsystem_name = "arkcompiler"
}
ohos_source_set("libcgx86phases") {
stack_protector_ret = false
configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ]
sources = src_libcgx86phases
include_dirs = include_directories
part_name = "ets_runtime"
subsystem_name = "arkcompiler"
}
ohos_source_set("libcgriscv64") {
stack_protector_ret = false
configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ]
sources = src_libcgriscv64
include_dirs = include_directories
part_name = "ets_runtime"
subsystem_name = "arkcompiler"
}
ohos_static_library("libcgark") {
stack_protector_ret = false
configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ]
sources = src_libcgark
include_dirs = include_directories
part_name = "ets_runtime"
subsystem_name = "arkcompiler"
}
ohos_source_set("libcgphases") {
stack_protector_ret = false
configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ]
sources = src_libcgphases
include_dirs = include_directories
part_name = "ets_runtime"
subsystem_name = "arkcompiler"
}
ohos_static_library("libcg") {
stack_protector_ret = false
configs = [ "${MAPLEALL_ROOT}:mapleallcompilecfg" ]
sources = src_libcg
include_dirs = include_directories
deps = deps_libcg
output_dir = "${root_out_dir}/lib/${HOST_ARCH}"
part_name = "ets_runtime"
subsystem_name = "arkcompiler"
}

View File

@ -0,0 +1,172 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
DefType UnitType = Primary, And, Or;
DefType BypassType = Accumulator, Store, AluShift;
// Architecture name
Class ArchitectureName <string>;
// Parallelism number
Class Parallelism <int>;
Def ArchitectureName {cortex_a55};
Def Parallelism {2};
// class parameters can be set as default.
// default parameters can only be placed at the end
// Class Unit :Name <UnitType, componentUnits>
Class Unit :string <UnitType, Unit[]>;
// Class Reservation :Name <Latency, dependUnits>
Class Reservation : string <int, Unit[]>;
// AnonClass Bypass : BypassNum, fromTypeReservation, toTypeReservation, BypassType
Class Bypass <int, Reservation[], Reservation[], BypassType>;
Def Unit : kUnitIdSlot0 {Primary};
Def Unit : kUnitIdSlot1 {Primary};
Def Unit : kUnitIdAgen {Primary};
Def Unit : kUnitIdHazard {Primary};
Def Unit : kUnitIdCrypto {Primary};
Def Unit : kUnitIdMul {Primary};
Def Unit : kUnitIdDiv {Primary};
Def Unit : kUnitIdBranch {Primary};
Def Unit : kUnitIdStAgu {Primary};
Def Unit : kUnitIdLdAgu {Primary};
Def Unit : kUnitIdFpAluLo {Primary};
Def Unit : kUnitIdFpAluHi {Primary};
Def Unit : kUnitIdFpMulLo {Primary};
Def Unit : kUnitIdFpMulHi {Primary};
Def Unit : kUnitIdFpDivLo {Primary};
Def Unit : kUnitIdFpDivHi {Primary};
Def Unit : kUnitIdSlotS {Or, [kUnitIdSlot0, kUnitIdSlot1]};
Def Unit : kUnitIdFpAluS {Or, [kUnitIdFpAluLo, kUnitIdFpAluHi]};
Def Unit : kUnitIdFpMulS {Or, [kUnitIdFpMulLo, kUnitIdFpMulHi]};
Def Unit : kUnitIdFpDivS {Or, [kUnitIdFpDivLo, kUnitIdFpDivHi]};
Def Unit : kUnitIdSlotD {And, [kUnitIdSlot0, kUnitIdSlot1]};
Def Unit : kUnitIdFpAluD {And, [kUnitIdFpAluLo, kUnitIdFpAluHi]};
Def Unit : kUnitIdFpMulD {And, [kUnitIdFpMulLo, kUnitIdFpMulHi]};
Def Unit : kUnitIdFpDivD {And, [kUnitIdFpDivLo, kUnitIdFpDivHi]};
Def Unit : kUnitIdSlotSHazard {And, [kUnitIdSlotS, kUnitIdHazard]};
Def Unit : kUnitIdSlotSMul {And, [kUnitIdSlotS, kUnitIdMul]};
Def Unit : kUnitIdSlotSBranch {And, [kUnitIdSlotS, kUnitIdBranch]};
Def Unit : kUnitIdSlotSAgen {And, [kUnitIdSlotS, kUnitIdAgen]};
Def Unit : kUnitIdSlotDAgen {And, [kUnitIdSlot0, kUnitIdSlot1, kUnitIdAgen]};
Def Unit : kUnitIdSlot0LdAgu {And, [kUnitIdSlot0, kUnitIdLdAgu]};
Def Unit : kUnitIdSlot0StAgu {And, [kUnitIdSlot0, kUnitIdStAgu]};
Def Unit : nothing {};
Def Reservation : kLtUndef {0};
Def Reservation : kLtShift {2, [kUnitIdSlotS]};
Def Reservation : kLtShiftReg {2, [ kUnitIdSlotS, kUnitIdHazard]};
Def Reservation : kLtAlu {3, [kUnitIdSlotS]};
Def Reservation : kLtAluShift {3, [kUnitIdSlotS]};
Def Reservation : kLtAluShiftReg {3, [kUnitIdSlotS, kUnitIdHazard]};
Def Reservation : kLtAluExtr {3, [kUnitIdSlot1]};
Def Reservation : kLtMul {4, [kUnitIdSlotS, kUnitIdMul]};
Def Reservation : kLtDiv {4, [kUnitIdSlot0, kUnitIdDiv, kUnitIdDiv]};
Def Reservation : kLtLoad1 {4, [kUnitIdSlotSAgen, kUnitIdLdAgu]};
Def Reservation : kLtStore1 {2, [kUnitIdSlotSAgen, kUnitIdStAgu]};
Def Reservation : kLtLoad2 {4, [kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu]};
Def Reservation : kLtStore2 {2, [ kUnitIdSlotSAgen, kUnitIdStAgu]};
Def Reservation : kLtLoad3plus {6, [kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu]};
Def Reservation : kLtStore3plus {2, [kUnitIdSlotDAgen, kUnitIdSlot0StAgu, kUnitIdStAgu]};
Def Reservation : kLtBranch {0, [kUnitIdSlotSBranch]};
Def Reservation : kLtFpalu {4, [kUnitIdSlotS, kUnitIdFpAluS]};
Def Reservation : kLtFconst {2, [kUnitIdSlotS, kUnitIdFpAluS]};
Def Reservation : kLtFpmul {4, [kUnitIdSlotS, kUnitIdFpMulS]};
Def Reservation : kLtFpmac {8, [kUnitIdSlotS, kUnitIdFpMulS, nothing, nothing, nothing, kUnitIdFpAluS]};
Def Reservation : kLtR2f {2, [kUnitIdSlotS, kUnitIdFpAluS]};
Def Reservation : kLtF2r {4, [kUnitIdSlotS, kUnitIdFpAluS]};
Def Reservation : kLtR2fCvt {4, [kUnitIdSlotS, kUnitIdFpAluS]};
Def Reservation : kLtF2rCvt {5, [kUnitIdSlotS, kUnitIdFpAluS]};
Def Reservation : kLtFFlags {5, [kUnitIdSlotS]};
Def Reservation : kLtFLoad64 {3, [kUnitIdSlotSAgen, kUnitIdLdAgu]};
Def Reservation : kLtFLoadMany {4, [kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu]};
Def Reservation : kLtFStore64 {0, [kUnitIdSlotSAgen, kUnitIdStAgu]};
Def Reservation : kLtFStoreMany {0, [kUnitIdSlotSAgen, kUnitIdSlot0StAgu, kUnitIdStAgu]};
Def Reservation : kLtAdvsimdAlu {4, [kUnitIdSlotS, kUnitIdFpAluS]};
Def Reservation : kLtAdvsimdAluQ {4, [kUnitIdSlot0, kUnitIdFpAluD]};
Def Reservation : kLtAdvsimdMul {4, [kUnitIdSlotS, kUnitIdFpMulS]};
Def Reservation : kLtAdvsimdMulQ {4, [kUnitIdSlot0, kUnitIdFpMulD]};
Def Reservation : kLtAdvsimdDivS {14, [kUnitIdSlot0, kUnitIdFpMulS, kUnitIdFpDivS]};
Def Reservation : kLtAdvsimdDivD {29, [kUnitIdSlot0, kUnitIdFpMulS, kUnitIdFpDivS]};
Def Reservation : kLtAdvsimdDivSQ {14, [kUnitIdSlotD, kUnitIdFpMulD, kUnitIdFpDivD]};
Def Reservation : kLtAdvsimdDivdQ {29, [kUnitIdSlotD, kUnitIdFpMulD, kUnitIdFpDivD]};
Def Reservation : kLtCryptoAese {3, [kUnitIdSlot0]};
Def Reservation : kLtCryptoAesmc {3, [kUnitIdSlotS]};
Def Reservation : kLtClinit {14, [kUnitIdSlotS, nothing, kUnitIdSlotDAgen, kUnitIdSlot0LdAgu,
kUnitIdLdAgu, nothing, kUnitIdSlotDAgen, kUnitIdSlot0LdAgu,
kUnitIdLdAgu, nothing, kUnitIdSlotDAgen, kUnitIdSlot0LdAgu,
kUnitIdLdAgu]};
Def Reservation : kLtAdrpLdr {6, [kUnitIdSlotS, nothing, kUnitIdSlotDAgen, kUnitIdSlot0LdAgu,
kUnitIdLdAgu]};
Def Reservation : kLtClinitTail {8, [kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu, nothing,
kUnitIdSlotDAgen, kUnitIdSlot0LdAgu, kUnitIdLdAgu]};
Def Bypass {0, [kLtShift, kLtShiftReg], [kLtAlu]};
Def Bypass {1, [kLtShift], [kLtShift, kLtShiftReg, kLtAluShift, kLtAluShiftReg]};
Def Bypass {1, [kLtShiftReg], [kLtShift, kLtShiftReg, kLtAluShift, kLtAluShiftReg]};
Def Bypass {1, [kLtAlu, kLtAluShift, kLtAluShiftReg], [kLtAlu]};
Def Bypass {1, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluShift], AluShift};
Def Bypass {1, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluShiftReg], AluShift};
Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluShift]};
Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluShiftReg]};
Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtAluExtr]};
Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtShift]};
Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtShiftReg]};
Def Bypass {2, [kLtMul], [kLtMul], Accumulator};
Def Bypass {2, [kLtMul], [kLtAlu]};
Def Bypass {3, [kLtMul], [kLtAluShift, kLtAluShiftReg, kLtAluExtr, kLtShift, kLtShiftReg]};
Def Bypass {2, [kLtLoad1], [kLtAlu]};
Def Bypass {3, [kLtLoad1], [kLtAluShift, kLtAluShiftReg, kLtAluExtr, kLtShift, kLtShiftReg]};
Def Bypass {3, [kLtLoad2], [kLtAlu]};
Def Bypass {0, [kLtAlu], [kLtStore1, kLtStore2, kLtStore3plus], Store};
Def Bypass {0, [kLtAluShift], [kLtStore1, kLtStore2, kLtStore3plus], Store};
Def Bypass {0, [kLtAluShiftReg], [kLtStore1, kLtStore2, kLtStore3plus], Store};
Def Bypass {0, [kLtAluExtr], [ kLtStore1, kLtStore2, kLtStore3plus], Store};
Def Bypass {0, [kLtShift], [kLtStore1, kLtStore2, kLtStore3plus], Store};
Def Bypass {0, [kLtShiftReg], [kLtStore1, kLtStore2, kLtStore3plus], Store};
Def Bypass {1, [kLtMul], [kLtStore1, kLtStore2, kLtStore3plus], Store};
Def Bypass {1, [kLtLoad1], [kLtStore1, kLtStore2, kLtStore3plus], Store};
Def Bypass {1, [kLtLoad2], [kLtStore1, kLtStore2, kLtStore3plus], Store};
Def Bypass {1, [kLtLoad3plus], [kLtStore1, kLtStore2, kLtStore3plus], Store};
Def Bypass {0, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr, kLtShift, kLtShiftReg], [kLtR2f]};
Def Bypass {1, [kLtMul, kLtLoad1, kLtLoad2], [kLtR2f]};
Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr], [kLtR2fCvt]};
Def Bypass {3, [kLtMul, kLtLoad1, kLtLoad2], [kLtR2fCvt]};
Def Bypass {0, [kLtAlu, kLtAluShift, kLtAluShiftReg, kLtAluExtr, kLtShift, kLtShiftReg], [kLtBranch]};
Def Bypass {1, [kLtFpalu, kLtFpmul, kLtR2f, kLtR2fCvt, kLtFconst], [kLtFpmac], Accumulator};
Def Bypass {1, [kLtFLoad64, kLtFLoadMany], [kLtFpmac]};
Def Bypass {4, [kLtFpmac], [kLtFpmac], Accumulator};
Def Bypass {0, [kLtCryptoAese], [kLtCryptoAesmc]};
Def Bypass {1, [kLtShiftReg], [kLtClinit]};
Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluExtr], [kLtClinit]};
Def Bypass {3, [kLtMul, kLtLoad1], [kLtClinit]};
Def Bypass {13, [kLtAlu], [kLtClinit]};
Def Bypass {11, [kLtClinit], [kLtStore1, kLtStore3plus], Store};
Def Bypass {11, [kLtClinit], [kLtR2f]};
Def Bypass {13, [kLtClinit], [kLtR2fCvt]};
Def Bypass {1, [kLtShiftReg], [kLtAdrpLdr]};
Def Bypass {2, [kLtAlu, kLtAluShift, kLtAluExtr], [kLtAdrpLdr]};
Def Bypass {3, [kLtMul, kLtLoad1], [kLtAdrpLdr]};
Def Bypass {5, [kLtAdrpLdr], [kLtAlu]};
Def Bypass {3, [kLtAdrpLdr], [kLtStore1, kLtStore3plus], Store};
Def Bypass {3, [kLtAdrpLdr], [kLtR2f]};
Def Bypass {5, [kLtAdrpLdr], [kLtR2fCvt]};
Def Bypass {7, [kLtClinitTail], [kLtAlu]};
Def Bypass {5, [kLtClinitTail], [kLtStore1, kLtStore3plus], Store};
Def Bypass {5, [kLtClinitTail], [kLtR2f]};
Def Bypass {7, [kLtClinitTail], [kLtR2fCvt]};

View File

@ -0,0 +1,264 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_AD_MAD_H
#define MAPLEBE_INCLUDE_AD_MAD_H
#include <vector>
#include "types_def.h"
#include "mpl_logging.h"
#include "insn.h"
namespace maplebe {
enum UnitId : maple::uint32 {
#include "mplad_unit_id.def"
kUnitIdLast
};
enum UnitType : maple::uint8 { kUnitTypePrimart, kUnitTypeOr, kUnitTypeAnd, KUnitTypeNone };
enum RealUnitKind : maple::uint32 {
kUnitKindUndef,
#include "mplad_unit_kind.def"
kUnitKindLast = 13
};
enum SlotType : maple::uint8 {
kSlotNone,
kSlot0,
kSlot1,
kSlotAny,
kSlots,
};
/* machine model */
enum LatencyType : maple::uint32 {
/* LT: latency */
#include "mplad_latency_type.def"
kLtLast,
};
class Unit {
public:
explicit Unit(enum UnitId theUnitId);
Unit(enum UnitType theUnitType, enum UnitId theUnitId, int numOfUnits, ...);
~Unit() = default;
enum UnitType GetUnitType() const
{
return unitType;
}
enum UnitId GetUnitId() const
{
return unitId;
};
const std::vector<Unit *> &GetCompositeUnits() const;
std::string GetName() const;
bool IsFree(maple::uint32 cycle) const;
void Occupy(const Insn &insn, maple::uint32 cycle);
void Release();
void AdvanceCycle();
void Dump(int indent = 0) const;
maple::uint32 GetOccupancyTable() const;
void SetOccupancyTable(maple::uint32 table)
{
occupancyTable = table;
}
private:
void PrintIndent(int indent) const;
enum UnitId unitId;
enum UnitType unitType;
maple::uint32 occupancyTable;
std::vector<Unit *> compositeUnits;
};
class Reservation {
public:
Reservation(LatencyType t, int l, int n, ...);
~Reservation() = default;
bool IsEqual(maple::uint32 typ) const
{
return typ == type;
}
int GetLatency() const
{
return latency;
}
uint32 GetUnitNum() const
{
return unitNum;
}
enum SlotType GetSlot() const
{
return slot;
}
const std::string &GetSlotName() const;
Unit *const *GetUnit() const
{
return units;
}
private:
static const int kMaxUnit = 13;
LatencyType type;
int latency;
uint32 unitNum;
Unit *units[kMaxUnit];
enum SlotType slot = kSlotNone;
SlotType GetSlotType(UnitId unitID) const;
};
class Bypass {
public:
Bypass(LatencyType d, LatencyType u, int l) : def(d), use(u), latency(l) {}
virtual ~Bypass() = default;
virtual bool CanBypass(const Insn &defInsn, const Insn &useInsn) const;
int GetLatency() const
{
return latency;
}
LatencyType GetDefType() const
{
return def;
}
LatencyType GetUseType() const
{
return use;
}
private:
LatencyType def;
LatencyType use;
int latency;
};
class MAD {
public:
MAD()
{
InitUnits();
InitParallelism();
InitReservation();
InitBypass();
}
~MAD();
using BypassVector = std::vector<Bypass *>;
void InitUnits() const;
void InitParallelism() const;
void InitReservation() const;
void InitBypass() const;
bool IsSlot0Free() const;
bool IsFullIssued() const;
int GetLatency(const Insn &def, const Insn &use) const;
int DefaultLatency(const Insn &insn) const;
Reservation *FindReservation(const Insn &insn) const;
void AdvanceCycle() const;
void ReleaseAllUnits() const;
void SaveStates(std::vector<maple::uint32> &occupyTable, int size) const;
void RestoreStates(std::vector<maple::uint32> &occupyTable, int size) const;
int GetMaxParallelism() const
{
return parallelism;
}
const Unit *GetUnitByUnitId(enum UnitId uId) const
{
CHECK_FATAL(!allUnits.empty(), "CHECK_CONTAINER_EMPTY");
return allUnits[uId];
}
static void AddUnit(Unit &u)
{
allUnits.emplace_back(&u);
}
static maple::uint32 GetAllUnitsSize()
{
return allUnits.size();
}
static void AddReservation(Reservation &rev)
{
allReservations.emplace_back(&rev);
}
static void AddBypass(Bypass &bp)
{
DEBUG_ASSERT(bp.GetDefType() < kLtLast, "out of range");
DEBUG_ASSERT(bp.GetUseType() < kLtLast, "out of range");
(bypassArrays[bp.GetDefType()][bp.GetUseType()]).push_back(&bp);
}
protected:
static void SetMaxParallelism(int num)
{
parallelism = num;
}
int BypassLatency(const Insn &def, const Insn &use) const;
private:
static int parallelism;
static std::vector<Unit *> allUnits;
static std::vector<Reservation *> allReservations;
static std::array<std::array<BypassVector, kLtLast>, kLtLast> bypassArrays;
};
class AluShiftBypass : public Bypass {
public:
AluShiftBypass(LatencyType d, LatencyType u, int l) : Bypass(d, u, l) {}
~AluShiftBypass() override = default;
bool CanBypass(const Insn &defInsn, const Insn &useInsn) const override;
};
class AccumulatorBypass : public Bypass {
public:
AccumulatorBypass(LatencyType d, LatencyType u, int l) : Bypass(d, u, l) {}
~AccumulatorBypass() override = default;
bool CanBypass(const Insn &defInsn, const Insn &useInsn) const override;
};
class StoreBypass : public Bypass {
public:
StoreBypass(LatencyType d, LatencyType u, int l) : Bypass(d, u, l) {}
~StoreBypass() override = default;
bool CanBypass(const Insn &defInsn, const Insn &useInsn) const override;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_AD_MAD_H */

View File

@ -0,0 +1,29 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* cortex_a55 function unit ID definition: */
kUnitKindSlot0 = 1,
kUnitKindAgen = 2,
kUnitKindHazard = 4,
kUnitKindCrypto = 8,
kUnitKindMul = 16,
kUnitKindDiv = 32,
kUnitKindBranch = 64,
kUnitKindStAgu = 128,
kUnitKindLdAgu = 256,
kUnitKindFpAlu = 512,
kUnitKindFpMul = 1024,
kUnitKindFpDiv = 2048,

View File

@ -0,0 +1,18 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"ALjava_2Flang_2FObject_3B",
"ALjava_2Flang_2FClass_3B",
"ALjava_2Flang_2FString_3B"

View File

@ -0,0 +1,27 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"ALjava_2Flang_2FObject_3B",
"ALjava_2Flang_2FClass_3B",
"ALjava_2Flang_2FString_3B",
"ALjava_2Futil_2FFormatter_24Flags_3B",
"ALjava_2Futil_2FHashMap_24Node_3B",
"ALjava_2Futil_2FFormatter_24FormatString_3B",
"ALjava_2Flang_2FCharSequence_3B",
"ALjava_2Flang_2FThreadLocal_24ThreadLocalMap_24Entry_3B",
"ALjava_2Futil_2FHashtable_24HashtableEntry_3B",
"ALlibcore_2Freflect_2FAnnotationMember_3B",
"ALsun_2Fsecurity_2Futil_2FDerValue_3B",
"ALsun_2Fsecurity_2Fx509_2FAVA_3B"

View File

@ -0,0 +1,156 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_BE_BBT_H
#define MAPLEBE_INCLUDE_BE_BBT_H
/* MapleIR headers. */
#include "mir_nodes.h"
#include "mir_lower.h"
namespace maplebe {
using namespace maple;
class BBT {
/*
* if stmt is a switch/rangegoto, succs gets defined, and condJumpBranch == fallthruBranch == nullptr.
* otherwise, succs.size() ==0 &&
* 1. for cond br stmt, both condJumpBranch and fallthruBranch are defined.
* 2. if bb ends with 'throw', both fields get nullptr.
* 3. for the others, condJumpBranch == nullptr && only fallthruBranch is defined
*/
public:
enum BBTType : uint8 { kBBPlain, kBBTry, kBBEndTry, kBBCatch };
BBT(StmtNode *s, StmtNode *e, MemPool *memPool)
: alloc(memPool),
type(kBBPlain),
succs(alloc.Adapter()),
labelIdx(MIRLabelTable::GetDummyLabel()),
firstStmt(s != nullptr ? s : e),
lastStmt(e)
{
}
~BBT() = default;
void Extend(const StmtNode *sNode, StmtNode *eNode)
{
CHECK_FATAL(lastStmt != nullptr, "nullptr check");
CHECK_FATAL(sNode != nullptr ? lastStmt->GetNext() == sNode : lastStmt->GetNext() == eNode, "Extend fail");
lastStmt = eNode;
}
void SetLabelIdx(LabelIdx li)
{
labelIdx = li;
}
bool IsLabeled() const
{
return labelIdx != MIRLabelTable::GetDummyLabel();
}
LabelIdx GetLabelIdx() const
{
return labelIdx;
}
void SetType(BBTType t, StmtNode &k)
{
type = t;
keyStmt = &k;
}
bool IsTry() const
{
return type == kBBTry;
}
bool IsEndTry() const
{
return type == kBBEndTry;
}
bool IsCatch() const
{
return type == kBBCatch;
}
void AddSuccs(BBT *bb)
{
succs.emplace_back(bb);
}
void SetCondJumpBranch(BBT *bb)
{
condJumpBranch = bb;
}
BBT *GetCondJumpBranch()
{
return condJumpBranch;
}
void SetFallthruBranch(BBT *bb)
{
fallthruBranch = bb;
}
BBT *GetFallthruBranch()
{
return fallthruBranch;
}
StmtNode *GetFirstStmt()
{
return firstStmt;
}
void SetFirstStmt(StmtNode &stmt)
{
firstStmt = &stmt;
}
StmtNode *GetLastStmt()
{
return lastStmt;
}
void SetLastStmt(StmtNode &stmt)
{
lastStmt = &stmt;
}
StmtNode *GetKeyStmt()
{
return keyStmt;
}
#if DEBUG
void Dump(const MIRModule &mod) const;
static void ValidateStmtList(StmtNode *head, StmtNode *detached = nullptr);
#endif
private:
MapleAllocator alloc;
BBTType type;
BBT *condJumpBranch = nullptr;
BBT *fallthruBranch = nullptr;
MapleVector<BBT *> succs;
LabelIdx labelIdx;
StmtNode *firstStmt;
StmtNode *lastStmt;
StmtNode *keyStmt = nullptr;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_BE_BBT_H */

View File

@ -0,0 +1,274 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_BE_BECOMMON_H
#define MAPLEBE_INCLUDE_BE_BECOMMON_H
/* C++ headers. */
#include <cstddef>
#include <utility>
/* Basic Maple-independent utility functions */
#include "common_utils.h"
/* MapleIR headers. */
#include "mir_nodes.h" /* maple_ir/include, for BaseNode */
#include "mir_type.h" /* maple_ir/include, for MIRType */
#include "mir_module.h" /* maple_ir/include, for mirModule */
namespace maplebe {
using namespace maple;
enum BitsPerByte : uint8 { kBitsPerByte = 8, kLog2BitsPerByte = 3 };
class JClassFieldInfo { /* common java class field info */
public:
/* constructors */
JClassFieldInfo() : isRef(false), isUnowned(false), isWeak(false), offset(0) {}
JClassFieldInfo(bool isRef, bool isUnowned, bool isWeak, uint32 offset)
: isRef(isRef), isUnowned(isUnowned), isWeak(isWeak), offset(offset)
{
}
~JClassFieldInfo() = default;
bool IsRef() const
{
return isRef;
}
bool IsUnowned() const
{
return isUnowned;
}
bool IsWeak() const
{
return isWeak;
}
uint32 GetOffset() const
{
return offset;
}
private:
bool isRef; /* used to generate object-map */
bool isUnowned; /* used to mark unowned fields for RC */
bool isWeak; /* used to mark weak fields for RC */
uint32 offset; /* offset from the start of the java object */
};
using JClassLayout = MapleVector<JClassFieldInfo>; /* java class layout info */
class BECommon {
public:
explicit BECommon(MIRModule &mod);
~BECommon() = default;
void LowerTypeAttribute(MIRType &ty);
void LowerJavaTypeAttribute(MIRType &ty);
void LowerJavaVolatileInClassType(MIRClassType &ty);
void LowerJavaVolatileForSymbol(MIRSymbol &sym) const;
void ComputeTypeSizesAligns(MIRType &type, uint8 align = 0);
void GenFieldOffsetMap(const std::string &className);
void GenFieldOffsetMap(MIRClassType &classType, FILE &outFile);
void GenObjSize(const MIRClassType &classType, FILE &outFile);
std::pair<int32, int32> GetFieldOffset(MIRStructType &structType, FieldID fieldID);
bool IsRefField(MIRStructType &structType, FieldID fieldID) const;
/* some class may has incomplete type definition. provide an interface to check them. */
bool HasJClassLayout(MIRClassType &klass) const
{
return (jClassLayoutTable.find(&klass) != jClassLayoutTable.end());
}
const JClassLayout &GetJClassLayout(MIRClassType &klass) const
{
return *(jClassLayoutTable.at(&klass));
}
void AddNewTypeAfterBecommon(uint32 oldTypeTableSize, uint32 newTypeTableSize);
void AddElementToJClassLayout(MIRClassType &klass, JClassFieldInfo info);
bool HasFuncReturnType(MIRFunction &func) const
{
return (funcReturnType.find(&func) != funcReturnType.end());
}
const TyIdx GetFuncReturnType(MIRFunction &func) const
{
return (funcReturnType.at(&func));
}
void AddElementToFuncReturnType(MIRFunction &func, const TyIdx tyIdx);
MIRType *BeGetOrCreatePointerType(const MIRType &pointedType);
MIRType *BeGetOrCreateFunctionType(TyIdx tyIdx, const std::vector<TyIdx> &vecTy,
const std::vector<TypeAttrs> &vecAt);
BaseNode *GetAddressOfNode(const BaseNode &node);
bool CallIsOfAttr(FuncAttrKind attr, const StmtNode *narynode) const;
PrimType GetAddressPrimType() const
{
return GetLoweredPtrType();
}
/* update typeSizeTable and typeAlignTable when new type is created */
void UpdateTypeTable(MIRType &ty)
{
if (!TyIsInSizeAlignTable(ty)) {
AddAndComputeSizeAlign(ty);
}
}
/* Global type table might be updated during lowering for C/C++. */
void FinalizeTypeTable(const MIRType &ty);
uint32 GetFieldIdxIncrement(const MIRType &ty) const
{
if (ty.GetKind() == kTypeClass) {
/* number of fields + 2 */
return static_cast<const MIRClassType &>(ty).GetFieldsSize() + 2;
} else if (ty.GetKind() == kTypeStruct) {
/* number of fields + 1 */
return static_cast<const MIRStructType &>(ty).GetFieldsSize() + 1;
}
return 1;
}
MIRModule &GetMIRModule() const
{
return mirModule;
}
uint64 GetTypeSize(uint32 idx) const
{
return typeSizeTable.at(idx);
}
uint32 GetSizeOfTypeSizeTable() const
{
return typeSizeTable.size();
}
bool IsEmptyOfTypeSizeTable() const
{
return typeSizeTable.empty();
}
void SetTypeSize(uint32 idx, uint64 value)
{
typeSizeTable.at(idx) = value;
}
void AddTypeSize(uint64 value)
{
typeSizeTable.emplace_back(value);
}
void AddTypeSizeAndAlign(const TyIdx tyIdx, uint64 value)
{
if (typeSizeTable.size() == tyIdx) {
typeSizeTable.emplace_back(value);
typeAlignTable.emplace_back(value);
} else {
CHECK_FATAL(typeSizeTable.size() > tyIdx, "there are some types haven't set type size and align, %d");
}
}
uint8 GetTypeAlign(uint32 idx) const
{
return typeAlignTable.at(idx);
}
size_t GetSizeOfTypeAlignTable() const
{
return typeAlignTable.size();
}
bool IsEmptyOfTypeAlignTable() const
{
return typeAlignTable.empty();
}
void SetTypeAlign(uint32 idx, uint8 value)
{
typeAlignTable.at(idx) = value;
}
void AddTypeAlign(uint8 value)
{
typeAlignTable.emplace_back(value);
}
bool GetHasFlexibleArray(uint32 idx) const
{
return typeHasFlexibleArray.at(idx);
}
void SetHasFlexibleArray(uint32 idx, bool value)
{
typeHasFlexibleArray.at(idx) = value;
}
FieldID GetStructFieldCount(uint32 idx) const
{
return structFieldCountTable.at(idx);
}
uint32 GetSizeOfStructFieldCountTable() const
{
return structFieldCountTable.size();
}
void SetStructFieldCount(uint32 idx, FieldID value)
{
structFieldCountTable.at(idx) = value;
}
void AppendStructFieldCount(uint32 idx, FieldID value)
{
structFieldCountTable.at(idx) += value;
}
private:
bool TyIsInSizeAlignTable(const MIRType &) const;
void AddAndComputeSizeAlign(MIRType &);
void ComputeStructTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx);
void ComputeClassTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx, uint8 align = 0);
void ComputeArrayTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx);
void ComputeFArrayOrJArrayTypeSizesAligns(MIRType &ty, const TyIdx &tyIdx);
MIRModule &mirModule;
MapleVector<uint64> typeSizeTable; /* index is TyIdx */
MapleVector<uint8> typeAlignTable; /* index is TyIdx */
MapleVector<bool> typeHasFlexibleArray; /* struct with flexible array */
/*
* gives number of fields inside
* each struct inclusive of nested structs, for speeding up
* traversal for locating the field for a given fieldID
*/
MapleVector<FieldID> structFieldCountTable;
/*
* a lookup table for class layout. the vector is indexed by field-id
* Note: currently only for java class types.
*/
MapleUnorderedMap<MIRClassType *, JClassLayout *> jClassLayoutTable;
MapleUnorderedMap<MIRFunction *, TyIdx> funcReturnType;
}; /* class BECommon */
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_BE_BECOMMON_H */

View File

@ -0,0 +1,349 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_BE_COMMON_UTILS_H
#define MAPLEBE_INCLUDE_BE_COMMON_UTILS_H
#include <cstdint>
#include "types_def.h"
#include "mpl_logging.h"
namespace maplebe {
using namespace maple;
constexpr uint32 kOffsetAlignmentOf8Bit = 0;
constexpr uint32 kOffsetAlignmentOf16Bit = 1;
constexpr uint32 kOffsetAlignmentOf32Bit = 2;
constexpr uint32 kOffsetAlignmentOf64Bit = 3;
constexpr uint32 kOffsetAlignmentOf128Bit = 4;
constexpr uint32 kBaseOffsetAlignment = 3;
/*
* The constexpr implementations, without assertions. Suitable for using in
* constants.
*/
constexpr uint32 k1FConst = 31;
constexpr uint32 k0BitSize = 0;
constexpr uint32 k1BitSize = 1;
constexpr uint32 k2BitSize = 2;
constexpr uint32 k3BitSize = 3;
constexpr uint32 k4BitSize = 4;
constexpr uint32 k5BitSize = 5;
constexpr uint32 k6BitSize = 6;
constexpr uint32 k7BitSize = 7;
constexpr uint32 k8BitSize = 8;
constexpr uint32 k16BitSize = 16;
constexpr uint32 k24BitSize = 24;
constexpr uint32 k32BitSize = 32;
constexpr uint32 k48BitSize = 48;
constexpr uint32 k56BitSize = 56;
constexpr uint32 k64BitSize = 64;
constexpr uint32 k128BitSize = 128;
constexpr uint32 k256BitSize = 256;
constexpr uint32 k512BitSize = 512;
constexpr uint32 k1024BitSize = 1024;
constexpr uint32 k2048BitSize = 2048;
constexpr int32 k1FConstInt = 31;
constexpr int32 k0BitSizeInt = 0;
constexpr int32 k1BitSizeInt = 1;
constexpr int32 k2BitSizeInt = 2;
constexpr int32 k3BitSizeInt = 3;
constexpr int32 k4BitSizeInt = 4;
constexpr int32 k5BitSizeInt = 5;
constexpr int32 k6BitSizeInt = 6;
constexpr int32 k7BitSizeInt = 7;
constexpr int32 k8BitSizeInt = 8;
constexpr int32 k16BitSizeInt = 16;
constexpr int32 k24BitSizeInt = 24;
constexpr int32 k32BitSizeInt = 32;
constexpr int32 k48BitSizeInt = 48;
constexpr int32 k56BitSizeInt = 56;
constexpr int32 k64BitSizeInt = 64;
constexpr int32 k128BitSizeInt = 128;
constexpr int32 k256BitSizeInt = 256;
constexpr int32 k512BitSizeInt = 512;
constexpr int32 k1024BitSizeInt = 1024;
constexpr int32 kNegative256BitSize = -256;
constexpr int32 kNegative512BitSize = -512;
constexpr int32 kNegative1024BitSize = -1024;
constexpr uint32 k1ByteSize = 1;
constexpr uint32 k2ByteSize = 2;
constexpr uint32 k3ByteSize = 3;
constexpr uint32 k4ByteSize = 4;
constexpr uint32 k8ByteSize = 8;
constexpr uint32 k9ByteSize = 9;
constexpr uint32 k12ByteSize = 12;
constexpr uint32 k14ByteSize = 14;
constexpr uint32 k15ByteSize = 15;
constexpr uint32 k16ByteSize = 16;
constexpr uint32 k32ByteSize = 32;
constexpr int32 k1ByteSizeInt = 1;
constexpr int32 k2ByteSizeInt = 2;
constexpr int32 k3ByteSizeInt = 3;
constexpr int32 k4ByteSizeInt = 4;
constexpr int32 k8ByteSizeInt = 8;
constexpr int32 k9ByteSizeInt = 9;
constexpr int32 k12ByteSizeInt = 12;
constexpr int32 k14ByteSizeInt = 14;
constexpr int32 k15ByteSizeInt = 15;
constexpr int32 k16ByteSizeInt = 16;
constexpr int32 k32ByteSizeInt = 32;
constexpr uint32 k1EightBytesSize = 8;
constexpr uint32 k2EightBytesSize = 16;
constexpr uint32 k3EightBytesSize = 24;
constexpr uint32 k4EightBytesSize = 32;
constexpr uint32 k4BitShift = 2; /* 4 is 1 << 2; */
constexpr uint32 k8BitShift = 3; /* 8 is 1 << 3; */
constexpr uint32 k16BitShift = 4; /* 16 is 1 << 4 */
constexpr uint32 kDwordSizeTwo = 2;
constexpr uint32 k4ByteFloatSize = 4;
constexpr uint32 k8ByteDoubleSize = 8;
/* Storage location of operands in one insn */
constexpr int32 kInsnFirstOpnd = 0;
constexpr int32 kInsnSecondOpnd = 1;
constexpr int32 kInsnThirdOpnd = 2;
constexpr int32 kInsnFourthOpnd = 3;
constexpr int32 kInsnFifthOpnd = 4;
constexpr int32 kInsnSixthOpnd = 5;
constexpr int32 kInsnSeventhOpnd = 6;
constexpr int32 kInsnEighthOpnd = 7;
constexpr int32 kInsnMaxOpnd = 8;
/* Reg of CCLocInfo */
constexpr uint32 kFirstReg = 0;
constexpr uint32 kSecondReg = 1;
constexpr uint32 kThirdReg = 2;
constexpr uint32 kFourthReg = 3;
/* inline asm operand designations */
constexpr uint32 kAsmStringOpnd = 0;
constexpr uint32 kAsmOutputListOpnd = 1;
constexpr uint32 kAsmClobberListOpnd = 2;
constexpr uint32 kAsmInputListOpnd = 3;
constexpr uint32 kAsmOutputConstraintOpnd = 4;
constexpr uint32 kAsmInputConstraintOpnd = 5;
constexpr uint32 kAsmOutputRegPrefixOpnd = 6;
constexpr uint32 kAsmInputRegPrefixOpnd = 7;
/* Number of registers */
constexpr uint32 kOneRegister = 1;
constexpr uint32 kTwoRegister = 2;
constexpr uint32 kThreeRegister = 3;
constexpr uint32 kFourRegister = 4;
/* position of an operand within an instruction */
constexpr uint32 kOperandPosition0 = 0;
constexpr uint32 kOperandPosition1 = 1;
constexpr uint32 kOperandPosition2 = 2;
/* Size of struct for memcpy */
constexpr uint32 kParmMemcpySize = 40;
/* Check whether the value is an even number. */
constexpr int32 kDivide2 = 2;
constexpr int32 kRegNum2 = 2;
constexpr int32 kStepNum2 = 2;
constexpr int32 kSign4ByteSize = 4;
/* alignment in bytes of uint8 */
constexpr uint8 kAlignOfU8 = 3;
/*
* if the number of local refvar is less than 12, use stp or str to init local refvar
* else call function MCC_InitializeLocalStackRef to init.
*/
constexpr int32 kRefNum12 = 12;
/* mod function max argument size */
constexpr uint32 kMaxModFuncArgSize = 8;
/* string length of spacial name "__EARetTemp__" */
constexpr int32 kEARetTempNameSize = 10;
/*
* Aarch64 data processing instructions have 12 bits of space for values in their instuction word
* This is arranged as a four-bit rotate value and an eight-bit immediate value:
*/
constexpr uint32 kMaxImmVal5Bits = 5;
constexpr uint32 kMaxImmVal6Bits = 6;
constexpr uint32 kMaxImmVal8Bits = 8;
constexpr uint32 kMaxImmVal12Bits = 12;
constexpr uint32 kMaxImmVal13Bits = 13;
constexpr uint32 kMaxImmVal16Bits = 16;
constexpr int32 kMaxPimm8 = 4095;
constexpr int32 kMaxPimm16 = 8190;
constexpr int32 kMaxPimm32 = 16380;
constexpr int32 kMaxPimm64 = 32760;
constexpr int32 kMaxPimm128 = 65520;
constexpr int32 kMaxPimm[k5BitSize] = {kMaxPimm8, kMaxPimm16, kMaxPimm32, kMaxPimm64, kMaxPimm128};
constexpr int32 kMaxPairPimm[k3BitSize] = {k256BitSize, k512BitSize, k512BitSize};
constexpr int32 kMaxSimm32 = 255;
constexpr int32 kMaxSimm32Pair = 252;
constexpr int32 kMinSimm32 = kNegative256BitSize;
constexpr int32 kMaxSimm64Pair = 504;
constexpr int32 kMinSimm64 = kNegative512BitSize;
constexpr int32 kMax12UnsignedImm = 4096;
constexpr int32 kMax13UnsignedImm = 8192;
constexpr int32 kMax16UnsignedImm = 65535;
/* Dedicated for Vector */
constexpr int32 kMinImmVal = -128;
constexpr int32 kMaxImmVal = 255;
/* aarch64 assembly takes up to 24-bits */
constexpr uint32 kMaxImmVal24Bits = 24;
constexpr uint32 kDecimalMax = 10;
constexpr double kMicroSecPerMilliSec = 1000.0;
constexpr double kPercent = 100.0;
enum ConditionCode : uint8 {
CC_EQ, /* equal */
CC_NE, /* not equal */
CC_CS, /* carry set (== HS) */
CC_HS, /* unsigned higher or same (== CS) */
CC_CC, /* carry clear (== LO) */
CC_LO, /* Unsigned lower (== CC) */
CC_MI, /* Minus or negative result */
CC_PL, /* positive or zero result */
CC_VS, /* overflow */
CC_VC, /* no overflow */
CC_HI, /* unsigned higher */
CC_LS, /* unsigned lower or same */
CC_GE, /* signed greater than or equal */
CC_LT, /* signed less than */
CC_GT, /* signed greater than */
CC_LE, /* signed less than or equal */
CC_AL, /* always, this is the default. usually omitted. */
kCcLast
};
inline ConditionCode GetReverseCC(ConditionCode cc)
{
switch (cc) {
case CC_NE:
return CC_EQ;
case CC_EQ:
return CC_NE;
case CC_HS:
return CC_LO;
case CC_LO:
return CC_HS;
case CC_MI:
return CC_PL;
case CC_PL:
return CC_MI;
case CC_VS:
return CC_VC;
case CC_VC:
return CC_VS;
case CC_HI:
return CC_LS;
case CC_LS:
return CC_HI;
case CC_LT:
return CC_GE;
case CC_GE:
return CC_LT;
case CC_GT:
return CC_LE;
case CC_LE:
return CC_GT;
default:
CHECK_FATAL(0, "unknown condition code");
}
return kCcLast;
}
inline ConditionCode GetReverseBasicCC(ConditionCode cc)
{
switch (cc) {
case CC_NE:
return CC_EQ;
case CC_EQ:
return CC_NE;
case CC_LT:
return CC_GE;
case CC_GE:
return CC_LT;
case CC_GT:
return CC_LE;
case CC_LE:
return CC_GT;
default:
CHECK_FATAL(false, "Not support yet.");
}
return kCcLast;
}
inline bool IsPowerOf2Const(uint64 i)
{
return (i & (i - 1)) == 0;
}
inline uint64 RoundUpConst(uint64 offset, uint64 align)
{
return (-align) & (offset + align - 1);
}
inline bool IsPowerOf2(uint64 i)
{
return IsPowerOf2Const(i);
}
/* align must be a power of 2 */
inline uint64 RoundUp(uint64 offset, uint64 align)
{
if (align == 0) {
return offset;
}
DEBUG_ASSERT(IsPowerOf2(align), "align must be power of 2!");
return RoundUpConst(offset, align);
}
inline int64 RoundDownConst(int64 offset, int64 align)
{
return (-align) & offset;
}
// align must be a power of 2
inline int64 RoundDown(int64 offset, int64 align)
{
if (align == 0) {
return offset;
}
DEBUG_ASSERT(IsPowerOf2(align), "align must be power of 2!");
return RoundDownConst(offset, align);
}
inline bool IsAlignedTo(uint64 offset, uint64 align)
{
DEBUG_ASSERT(IsPowerOf2(align), "align must be power of 2!");
return (offset & (align - 1)) == 0;
}
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_BE_COMMON_UTILS_H */

View File

@ -0,0 +1,343 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_BE_LOWERER_H
#define MAPLEBE_INCLUDE_BE_LOWERER_H
/* C++ headers. */
#include <vector>
#include <unordered_map>
#include <utility>
#include <cstddef>
#include <cstdarg>
#include <regex>
#include "intrinsics.h" /* For IntrinDesc. This includes 'intrinsic_op.h' as well */
#include "becommon.h"
#include "cg.h"
#include "bbt.h"
/* MapleIR headers. */
#include "mir_nodes.h"
#include "mir_module.h"
#include "mir_function.h"
#include "mir_lower.h"
#include "simplify.h"
namespace maplebe {
class CGLowerer {
enum Option : uint64 {
kUndefined = 0,
kGenEh = 1ULL << 0,
kVerboseCG = 1ULL << 1,
};
using BuiltinFunctionID = uint32;
using OptionFlag = uint64;
public:
CGLowerer(MIRModule &mod, BECommon &common, MIRFunction *func = nullptr) : mirModule(mod), beCommon(common)
{
SetOptions(kGenEh);
mirBuilder = mod.GetMIRBuilder();
SetCurrentFunc(func);
}
CGLowerer(MIRModule &mod, BECommon &common, bool genEh, bool verboseCG) : mirModule(mod), beCommon(common)
{
OptionFlag option = 0;
if (genEh) {
option |= kGenEh;
}
if (verboseCG) {
option |= kVerboseCG;
}
SetOptions(option);
mirBuilder = mod.GetMIRBuilder();
SetCurrentFunc(nullptr);
}
~CGLowerer()
{
mirBuilder = nullptr;
currentBlock = nullptr;
}
MIRFunction *RegisterFunctionVoidStarToVoid(BuiltinFunctionID id, const std::string &name,
const std::string &paramName);
void RegisterBuiltIns();
void LowerFunc(MIRFunction &func);
BaseNode *LowerIntrinsicop(const BaseNode &, IntrinsicopNode &, BlockNode &);
BaseNode *LowerIntrinsicopwithtype(const BaseNode &, IntrinsicopNode &, BlockNode &);
StmtNode *LowerIntrinsicMplClearStack(const IntrinsiccallNode &intrinCall, BlockNode &newBlk);
StmtNode *LowerIntrinsicRCCall(const IntrinsiccallNode &intrinCall);
void LowerArrayStore(const IntrinsiccallNode &intrinCall, BlockNode &newBlk);
StmtNode *LowerDefaultIntrinsicCall(IntrinsiccallNode &intrinCall, MIRSymbol &st, MIRFunction &fn);
StmtNode *LowerIntrinsicMplCleanupLocalRefVarsSkip(IntrinsiccallNode &intrinCall);
StmtNode *LowerIntrinsiccall(IntrinsiccallNode &intrinCall, BlockNode &);
StmtNode *LowerSyncEnterSyncExit(StmtNode &stmt);
MIRFunction *GetCurrentFunc() const
{
return mirModule.CurFunction();
}
BaseNode *LowerExpr(BaseNode &, BaseNode &, BlockNode &);
BaseNode *LowerDread(DreadNode &dread, const BlockNode &block);
BaseNode *LowerIread(IreadNode &iread)
{
/* use PTY_u8 for boolean type in dread/iread */
if (iread.GetPrimType() == PTY_u1) {
iread.SetPrimType(PTY_u8);
}
return (iread.GetFieldID() == 0 ? &iread : LowerIreadBitfield(iread));
}
BaseNode *LowerCastExpr(BaseNode &expr);
BaseNode *ExtractSymbolAddress(const StIdx &stIdx);
BaseNode *LowerDreadToThreadLocal(BaseNode &expr, const BlockNode &block);
StmtNode *LowerDassignToThreadLocal(StmtNode &stmt, const BlockNode &block);
void LowerDassign(DassignNode &dassign, BlockNode &block);
void LowerResetStmt(StmtNode &stmt, BlockNode &block);
void LowerIassign(IassignNode &iassign, BlockNode &block);
void LowerRegassign(RegassignNode &regAssign, BlockNode &block);
void AddElemToPrintf(MapleVector<BaseNode *> &argsPrintf, int num, ...) const;
std::string AssertBoundaryGetFileName(StmtNode &stmt)
{
size_t pos = mirModule.GetFileNameFromFileNum(stmt.GetSrcPos().FileNum()).rfind('/');
return mirModule.GetFileNameFromFileNum(stmt.GetSrcPos().FileNum()).substr(pos + 1);
}
std::string GetFileNameSymbolName(const std::string &fileName) const;
void SwitchAssertBoundary(StmtNode &stmt, MapleVector<BaseNode *> &argsPrintf);
void LowerAssertBoundary(StmtNode &stmt, BlockNode &block, BlockNode &newBlk, std::vector<StmtNode *> &abortNode);
StmtNode *LowerIntrinsicopDassign(const DassignNode &dassign, IntrinsicopNode &intrinsic, BlockNode &block);
void LowerGCMalloc(const BaseNode &node, const GCMallocNode &gcNode, BlockNode &blkNode, bool perm = false);
std::string GetNewArrayFuncName(const uint32 elemSize, const bool perm) const;
void LowerJarrayMalloc(const StmtNode &stmt, const JarrayMallocNode &node, BlockNode &block, bool perm = false);
BaseNode *LowerAddrof(AddrofNode &addrof) const
{
return &addrof;
}
BaseNode *LowerIaddrof(const IreadNode &iaddrof);
BaseNode *SplitBinaryNodeOpnd1(BinaryNode &bNode, BlockNode &blkNode);
BaseNode *SplitTernaryNodeResult(TernaryNode &tNode, BaseNode &parent, BlockNode &blkNode);
bool IsComplexSelect(const TernaryNode &tNode) const;
int32 FindTheCurrentStmtFreq(const StmtNode *stmt) const;
BaseNode *LowerComplexSelect(const TernaryNode &tNode, BaseNode &parent, BlockNode &blkNode);
BaseNode *LowerFarray(ArrayNode &array);
BaseNode *LowerArrayDim(ArrayNode &array, int32 dim);
BaseNode *LowerArrayForLazyBiding(BaseNode &baseNode, BaseNode &offsetNode, const BaseNode &parent);
BaseNode *LowerArray(ArrayNode &array, const BaseNode &parent);
BaseNode *LowerCArray(ArrayNode &array);
DassignNode *SaveReturnValueInLocal(StIdx, uint16);
void LowerCallStmt(StmtNode &, StmtNode *&, BlockNode &, MIRType *retty = nullptr, bool uselvar = false,
bool isIntrinAssign = false);
BlockNode *LowerIntrinsiccallAassignedToAssignStmt(IntrinsiccallNode &intrinsicCall);
BlockNode *LowerCallAssignedStmt(StmtNode &stmt, bool uselvar = false);
bool LowerStructReturn(BlockNode &blk, StmtNode *stmt, StmtNode *&nextStmt, bool &lvar, BlockNode *oldblk);
BlockNode *LowerMemop(StmtNode &);
BaseNode *LowerRem(BaseNode &rem, BlockNode &block);
void LowerStmt(StmtNode &stmt, BlockNode &block);
void LowerSwitchOpnd(StmtNode &stmt, BlockNode &block);
MIRSymbol *CreateNewRetVar(const MIRType &ty, const std::string &prefix);
void RegisterExternalLibraryFunctions();
BlockNode *LowerBlock(BlockNode &block);
void SimplifyBlock(BlockNode &block) const;
void LowerTryCatchBlocks(BlockNode &body);
#if TARGARM32 || TARGAARCH64 || TARGRISCV64 || TARGX86_64
BlockNode *LowerReturnStructUsingFakeParm(NaryStmtNode &retNode);
#endif
BlockNode *LowerReturn(NaryStmtNode &retNode);
void LowerEntry(MIRFunction &func);
StmtNode *LowerCall(CallNode &call, StmtNode *&stmt, BlockNode &block, MIRType *retty = nullptr,
bool uselvar = false);
void SplitCallArg(CallNode &callNode, BaseNode *newOpnd, size_t i, BlockNode &newBlk);
void CleanupBranches(MIRFunction &func) const;
void LowerTypePtr(BaseNode &expr) const;
BaseNode *GetBitField(int32 byteOffset, BaseNode *baseAddr, PrimType fieldPrimType);
StmtNode *WriteBitField(const std::pair<int32, int32> &byteBitOffsets, const MIRBitFieldType *fieldType,
BaseNode *baseAddr, BaseNode *rhs, BlockNode *block);
BaseNode *ReadBitField(const std::pair<int32, int32> &byteBitOffsets, const MIRBitFieldType *fieldType,
BaseNode *baseAddr);
BaseNode *LowerDreadBitfield(DreadNode &dread);
BaseNode *LowerIreadBitfield(IreadNode &iread);
StmtNode *LowerDassignBitfield(DassignNode &dassign, BlockNode &block);
StmtNode *LowerIassignBitfield(IassignNode &iassign, BlockNode &block);
void LowerAsmStmt(AsmNode *asmNode, BlockNode *blk);
bool ShouldOptarray() const
{
DEBUG_ASSERT(mirModule.CurFunction() != nullptr, "nullptr check");
return MIRLower::ShouldOptArrayMrt(*mirModule.CurFunction());
}
BaseNode *NodeConvert(PrimType mtype, BaseNode &expr);
/* Lower pointer/reference types if found in pseudo registers. */
void LowerPseudoRegs(const MIRFunction &func) const;
/* A pseudo register refers to a symbol when DreadNode is converted to RegreadNode. */
StIdx GetSymbolReferredToByPseudoRegister(PregIdx regNO) const
{
(void)regNO;
return StIdx();
}
void SetOptions(OptionFlag option)
{
options = option;
}
void SetCheckLoadStore(bool value)
{
checkLoadStore = value;
}
/* if it defines a built-in to use for the given intrinsic, return the name. otherwise, return nullptr */
PUIdx GetBuiltinToUse(BuiltinFunctionID id) const;
void InitArrayClassCacheTableIndex();
MIRModule &mirModule;
BECommon &beCommon;
BlockNode *currentBlock = nullptr; /* current block for lowered statements to be inserted to */
bool checkLoadStore = false;
int64 seed = 0;
SimplifyMemOp simplifyMemOp;
static const std::string kIntrnRetValPrefix;
static const std::string kUserRetValPrefix;
static constexpr PUIdx kFuncNotFound = PUIdx(-1);
static constexpr int kThreeDimArray = 3;
static constexpr int kNodeThirdOpnd = 2;
static constexpr int kMCCSyncEnterFast0 = 0;
static constexpr int kMCCSyncEnterFast1 = 1;
static constexpr int kMCCSyncEnterFast2 = 2;
static constexpr int kMCCSyncEnterFast3 = 3;
protected:
/*
* true if the lower level (e.g. mplcg) can handle the intrinsic directly.
* For example, the INTRN_MPL_ATOMIC_EXCHANGE_PTR can be directly handled by mplcg,
* and generate machine code sequences not containing any function calls.
* Such intrinsics will bypass the lowering of "assigned",
* and let mplcg handle the intrinsic results which are not return values.
*/
bool IsIntrinsicCallHandledAtLowerLevel(MIRIntrinsicID intrinsic) const;
bool IsIntrinsicOpHandledAtLowerLevel(MIRIntrinsicID intrinsic) const;
private:
void SetCurrentFunc(MIRFunction *func)
{
mirModule.SetCurFunction(func);
simplifyMemOp.SetFunction(func);
if (func != nullptr) {
const std::string &dumpFunc = CGOptions::GetDumpFunc();
const bool debug = CGOptions::GetDumpPhases().find("cglower") != CGOptions::GetDumpPhases().end() &&
(dumpFunc == "*" || dumpFunc == func->GetName());
simplifyMemOp.SetDebug(debug);
}
}
bool ShouldAddAdditionalComment() const
{
return (options & kVerboseCG) != 0;
}
bool GenerateExceptionHandlingCode() const
{
return (options & kGenEh) != 0;
}
BaseNode *MergeToCvtType(PrimType dtyp, PrimType styp, BaseNode &src) const;
BaseNode *LowerJavascriptIntrinsicop(IntrinsicopNode &intrinNode, const IntrinDesc &desc);
StmtNode *CreateStmtCallWithReturnValue(const IntrinsicopNode &intrinNode, const MIRSymbol &ret, PUIdx bFunc,
BaseNode *extraInfo = nullptr) const;
StmtNode *CreateStmtCallWithReturnValue(const IntrinsicopNode &intrinNode, PregIdx retPregIdx, PUIdx bFunc,
BaseNode *extraInfo = nullptr) const;
BaseNode *LowerIntrinsicop(const BaseNode &parent, IntrinsicopNode &intrinNode);
BaseNode *LowerIntrinJavaMerge(const BaseNode &parent, IntrinsicopNode &intrinNode);
BaseNode *LowerIntrinJavaArrayLength(const BaseNode &parent, IntrinsicopNode &intrinNode);
BaseNode *LowerIntrinsicopWithType(const BaseNode &parent, IntrinsicopNode &intrinNode);
MIRType *GetArrayNodeType(BaseNode &baseNode);
IreadNode &GetLenNode(BaseNode &opnd0);
LabelIdx GetLabelIdx(MIRFunction &curFunc) const;
void ProcessArrayExpr(BaseNode &expr, BlockNode &blkNode);
void ProcessClassInfo(MIRType &classType, bool &classInfoFromRt, std::string &classInfo) const;
StmtNode *GenCallNode(const StmtNode &stmt, PUIdx &funcCalled, CallNode &origCall);
StmtNode *GenIntrinsiccallNode(const StmtNode &stmt, PUIdx &funcCalled, bool &handledAtLowerLevel,
IntrinsiccallNode &origCall);
StmtNode *GenIcallNode(PUIdx &funcCalled, IcallNode &origCall);
BlockNode *GenBlockNode(StmtNode &newCall, const CallReturnVector &p2nRets, const Opcode &opcode,
const PUIdx &funcCalled, bool handledAtLowerLevel, bool uselvar);
BaseNode *GetClassInfoExprFromRuntime(const std::string &classInfo);
BaseNode *GetClassInfoExprFromArrayClassCache(const std::string &classInfo);
BaseNode *GetClassInfoExpr(const std::string &classInfo) const;
BaseNode *GetBaseNodeFromCurFunc(MIRFunction &curFunc, bool isJarray);
OptionFlag options = 0;
bool needBranchCleanup = false;
bool hasTry = false;
static std::vector<std::pair<BuiltinFunctionID, PUIdx>> builtinFuncIDs;
MIRBuilder *mirBuilder = nullptr;
uint32 labelIdx = 0;
static std::unordered_map<IntrinDesc *, PUIdx> intrinFuncIDs;
static std::unordered_map<std::string, size_t> arrayClassCacheIndex;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_BE_LOWERER_H */

View File

@ -0,0 +1,96 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_BE_RT_H
#define MAPLEBE_INCLUDE_BE_RT_H
#include <cstdint>
#include <string>
namespace maplebe {
/*
* This class contains constants about the ABI of the runtime, such as symbols
* for GC-related metadata in generated binary files.
*/
class RTSupport {
public:
static RTSupport &GetRTSupportInstance()
{
static RTSupport RtSupport;
return RtSupport;
}
uint64_t GetObjectAlignment() const
{
return kObjectAlignment;
}
int64_t GetArrayContentOffset() const
{
return kArrayContentOffset;
}
int64_t GetArrayLengthOffset() const
{
return kArrayLengthOffset;
}
uint64_t GetFieldSize() const
{
return kRefFieldSize;
}
uint64_t GetFieldAlign() const
{
return kRefFieldAlign;
}
protected:
uint64_t kObjectAlignment; /* Word size. Suitable for all Java types. */
uint64_t kObjectHeaderSize; /* java object header used by MM. */
#ifdef USE_32BIT_REF
uint32_t kRefFieldSize; /* reference field in java object */
uint32_t kRefFieldAlign;
#else
uint32_t kRefFieldSize; /* reference field in java object */
uint32_t kRefFieldAlign;
#endif /* USE_32BIT_REF */
/* The array length offset is fixed since CONTENT_OFFSET is fixed to simplify code */
int64_t kArrayLengthOffset; /* shadow + monitor + [padding] */
/* The array content offset is aligned to 8B to alow hosting of size-8B elements */
int64_t kArrayContentOffset; /* fixed */
int64_t kGcTibOffset;
int64_t kGcTibOffsetAbs;
private:
RTSupport()
{
kObjectAlignment = 8;
kObjectHeaderSize = 8;
#ifdef USE_32BIT_REF
kRefFieldSize = 4;
kRefFieldAlign = 4;
#else
kRefFieldSize = 8;
kRefFieldAlign = 8;
#endif /* USE_32BIT_REF */
kArrayLengthOffset = 12;
kArrayContentOffset = 16;
kGcTibOffset = -8;
kGcTibOffsetAbs = -kGcTibOffset;
}
static const std::string kObjectMapSectionName;
static const std::string kGctibLabelArrayOfObject;
static const std::string kGctibLabelJavaObject;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_BE_RT_H */

View File

@ -0,0 +1,67 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_BE_SWITCH_LOWERER_H
#define MAPLEBE_INCLUDE_BE_SWITCH_LOWERER_H
#include "mir_nodes.h"
#include "mir_module.h"
namespace maplebe {
class BELowerer;
class SwitchLowerer {
public:
SwitchLowerer(maple::MIRModule &mod, maple::SwitchNode &stmt, maple::MapleAllocator &allocator)
: mirModule(mod), stmt(&stmt), switchItems(allocator.Adapter()), ownAllocator(&allocator)
{
}
~SwitchLowerer() = default;
maple::BlockNode *LowerSwitch();
private:
using Cluster = std::pair<maple::int32, maple::int32>;
using SwitchItem = std::pair<maple::int32, maple::int32>;
maple::MIRModule &mirModule;
maple::SwitchNode *stmt;
/*
* the original switch table is sorted and then each dense (in terms of the
* case tags) region is condensed into 1 switch item; in the switchItems
* table, each item either corresponds to an original entry in the original
* switch table (pair's second is 0), or to a dense region (pair's second
* gives the upper limit of the dense range)
*/
maple::MapleVector<SwitchItem> switchItems; /* uint32 is index in switchTable */
maple::MapleAllocator *ownAllocator;
const maple::int32 kClusterSwitchCutoff = 5;
const float kClusterSwitchDensityHigh = 0.4;
const float kClusterSwitchDensityLow = 0.2;
const maple::int32 kMaxRangeGotoTableSize = 127;
bool jumpToDefaultBlockGenerated = false;
void FindClusters(maple::MapleVector<Cluster> &clusters) const;
void InitSwitchItems(maple::MapleVector<Cluster> &clusters);
maple::RangeGotoNode *BuildRangeGotoNode(maple::int32 startIdx, maple::int32 endIdx);
maple::CompareNode *BuildCmpNode(maple::Opcode opCode, maple::uint32 idx);
maple::GotoNode *BuildGotoNode(maple::int32 idx);
maple::CondGotoNode *BuildCondGotoNode(maple::int32 idx, maple::Opcode opCode, maple::BaseNode &cond);
maple::BlockNode *BuildCodeForSwitchItems(maple::int32 start, maple::int32 end, bool lowBNdChecked,
bool highBNdChecked);
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_BE_SWITCH_LOWERER_H */

View File

@ -0,0 +1,182 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_BE_TRY_CATCH_H
#define MAPLEBE_INCLUDE_BE_TRY_CATCH_H
#include "bbt.h"
/* MapleIR headers. */
#include "mir_nodes.h"
#include "mir_lower.h"
namespace maplebe {
using namespace maple;
class TryEndTryBlock {
public:
explicit TryEndTryBlock(MemPool &memPool)
: allocator(&memPool),
enclosedBBs(allocator.Adapter()),
labeledBBsInTry(allocator.Adapter()),
bbsToRelocate(allocator.Adapter())
{
}
~TryEndTryBlock() = default;
void Init()
{
startTryBB = nullptr;
endTryBB = nullptr;
tryStmt = nullptr;
enclosedBBs.clear();
labeledBBsInTry.clear();
bbsToRelocate.clear();
}
void Reset(BBT &startBB)
{
startTryBB = &startBB;
CHECK_NULL_FATAL(startTryBB->GetKeyStmt());
tryStmt = startTryBB->GetKeyStmt();
CHECK_FATAL(tryStmt->GetOpCode() == OP_try, "expect OPT_try");
endTryBB = nullptr;
enclosedBBs.clear();
labeledBBsInTry.clear();
bbsToRelocate.clear();
}
void SetStartTryBB(BBT *bb)
{
startTryBB = bb;
}
BBT *GetStartTryBB()
{
return startTryBB;
}
void SetEndTryBB(BBT *bb)
{
endTryBB = bb;
}
BBT *GetEndTryBB()
{
return endTryBB;
}
StmtNode *GetTryStmtNode()
{
return tryStmt;
}
MapleVector<BBT *> &GetEnclosedBBs()
{
return enclosedBBs;
}
size_t GetEnclosedBBsSize() const
{
return enclosedBBs.size();
}
const BBT *GetEnclosedBBsElem(size_t index) const
{
DEBUG_ASSERT(index < enclosedBBs.size(), "out of range");
return enclosedBBs[index];
}
void PushToEnclosedBBs(BBT &bb)
{
enclosedBBs.emplace_back(&bb);
}
MapleVector<BBT *> &GetLabeledBBsInTry()
{
return labeledBBsInTry;
}
MapleVector<BBT *> &GetBBsToRelocate()
{
return bbsToRelocate;
}
private:
MapleAllocator allocator;
BBT *startTryBB = nullptr;
BBT *endTryBB = nullptr;
StmtNode *tryStmt = nullptr;
MapleVector<BBT *> enclosedBBs;
MapleVector<BBT *> labeledBBsInTry;
MapleVector<BBT *> bbsToRelocate;
};
class TryCatchBlocksLower {
public:
TryCatchBlocksLower(MemPool &memPool, BlockNode &body, MIRModule &mirModule)
: memPool(memPool),
allocator(&memPool),
body(body),
mirModule(mirModule),
tryEndTryBlock(memPool),
bbList(allocator.Adapter()),
prevBBOfTry(allocator.Adapter()),
firstStmtToBBMap(allocator.Adapter()),
catchesSeenSoFar(allocator.Adapter())
{
}
~TryCatchBlocksLower() = default;
void RecoverBasicBlock();
void TraverseBBList();
void CheckTryCatchPattern() const;
void SetGenerateEHCode(bool val)
{
generateEHCode = val;
}
private:
MemPool &memPool;
MapleAllocator allocator;
BlockNode &body;
MIRModule &mirModule;
TryEndTryBlock tryEndTryBlock;
StmtNode *bodyFirst = nullptr;
bool bodyEndWithEndTry = false;
bool generateEHCode = false;
MapleVector<BBT *> bbList;
MapleUnorderedMap<BBT *, BBT *> prevBBOfTry;
MapleUnorderedMap<StmtNode *, BBT *> firstStmtToBBMap;
MapleVector<BBT *> catchesSeenSoFar;
void ProcessEnclosedBBBetweenTryEndTry();
void ConnectRemainBB();
BBT *FindInsertAfterBB();
void PlaceRelocatedBB(BBT &insertAfter);
void PalceCatchSeenSofar(BBT &insertAfter);
BBT *CreateNewBB(StmtNode *first, StmtNode *last);
bool CheckAndProcessCatchNodeInCurrTryBlock(BBT &ebb, LabelIdx ebbLabel, uint32 index);
BBT *CollectCatchAndFallthruUntilNextCatchBB(BBT *&ebb, uint32 &nextEnclosedIdx, std::vector<BBT *> &currBBThread);
void WrapCatchWithTryEndTryBlock(std::vector<BBT *> &currBBThread, BBT *&nextBBThreadHead, uint32 &nextEnclosedIdx,
bool hasMoveEndTry);
void SwapEndTryBBAndCurrBBThread(const std::vector<BBT *> &currBBThread, bool &hasMoveEndTry,
const BBT *nextBBThreadHead);
void ProcessThreadTail(BBT &threadTail, BBT *const &nextBBThreadHead, bool hasMoveEndTry);
static StmtNode *MoveCondGotoIntoTry(BBT &jtBB, BBT &condbrBB, const MapleVector<BBT *> &labeledBBsInTry);
static BBT *FindTargetBBlock(LabelIdx idx, const std::vector<BBT *> &bbs);
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_BE_TRY_CATCH_H */

View File

@ -0,0 +1,14 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ABI_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ABI_H
#include "aarch64_isa.h"
#include "types_def.h"
#include "becommon.h"
namespace maplebe {
using namespace maple;
namespace AArch64Abi {
constexpr int32 kNumIntParmRegs = 8;
constexpr int32 kNumFloatParmRegs = 8;
constexpr int32 kYieldPointReservedReg = 19;
constexpr uint32 kNormalUseOperandNum = 3;
constexpr uint32 kMaxInstrForCondBr = 260000; // approximately less than (2^18);
constexpr AArch64reg intReturnRegs[kNumIntParmRegs] = {R0, R1, R2, R3, R4, R5, R6, R7};
constexpr AArch64reg floatReturnRegs[kNumFloatParmRegs] = {V0, V1, V2, V3, V4, V5, V6, V7};
constexpr AArch64reg intParmRegs[kNumIntParmRegs] = {R0, R1, R2, R3, R4, R5, R6, R7};
constexpr AArch64reg floatParmRegs[kNumFloatParmRegs] = {V0, V1, V2, V3, V4, V5, V6, V7};
/*
* Refer to ARM IHI 0055C_beta: Procedure Call Standard for
* ARM 64-bit Architecture. Section 5.5
*/
bool IsAvailableReg(AArch64reg reg);
bool IsCalleeSavedReg(AArch64reg reg);
bool IsCallerSaveReg(AArch64reg reg);
bool IsParamReg(AArch64reg reg);
bool IsSpillReg(AArch64reg reg);
bool IsExtraSpillReg(AArch64reg reg);
bool IsSpillRegInRA(AArch64reg regNO, bool has3RegOpnd);
PrimType IsVectorArrayType(MIRType *ty, uint32 &arraySize);
} /* namespace AArch64Abi */
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ABI_H */

View File

@ -0,0 +1,67 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ALIGNMENT_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ALIGNMENT_H
#include "alignment.h"
#include "aarch64_cgfunc.h"
namespace maplebe {
constexpr uint32 kAlignRegionPower = 4;
constexpr uint32 kAlignInsnLength = 4;
constexpr uint32 kAlignMaxNopNum = 1;
struct AArch64AlignInfo {
/* if bb size in (16byte, 96byte) , the bb need align */
uint32 alignMinBBSize = 16;
uint32 alignMaxBBSize = 96;
/* default loop & jump align power, related to the target machine. eg. 2^5 */
uint32 loopAlign = 4;
uint32 jumpAlign = 5;
/* record func_align_power in CGFunc */
};
class AArch64AlignAnalysis : public AlignAnalysis {
public:
AArch64AlignAnalysis(CGFunc &func, MemPool &memPool) : AlignAnalysis(func, memPool)
{
aarFunc = static_cast<AArch64CGFunc *>(&func);
}
~AArch64AlignAnalysis() override = default;
void FindLoopHeader() override;
void FindJumpTarget() override;
void ComputeLoopAlign() override;
void ComputeJumpAlign() override;
void ComputeCondBranchAlign() override;
bool MarkCondBranchAlign();
bool MarkShortBranchSplit();
void AddNopAfterMark();
void UpdateInsnId();
uint32 GetAlignRange(uint32 alignedVal, uint32 addr) const;
/* filter condition */
bool IsIncludeCall(BB &bb) override;
bool IsInSizeRange(BB &bb) override;
bool HasFallthruEdge(BB &bb) override;
bool IsInSameAlignedRegion(uint32 addr1, uint32 addr2, uint32 alignedRegionSize) const;
private:
AArch64CGFunc *aarFunc = nullptr;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ALIGNMENT_H */

View File

@ -0,0 +1,66 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ARGS_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ARGS_H
#include "args.h"
#include "aarch64_cgfunc.h"
namespace maplebe {
using namespace maple;
struct ArgInfo {
AArch64reg reg;
MIRType *mirTy;
uint32 symSize;
uint32 stkSize;
RegType regType;
MIRSymbol *sym;
const AArch64SymbolAlloc *symLoc;
uint8 memPairSecondRegSize; /* struct arg requiring two regs, size of 2nd reg */
bool doMemPairOpt;
bool createTwoStores;
bool isTwoRegParm;
};
class AArch64MoveRegArgs : public MoveRegArgs {
public:
explicit AArch64MoveRegArgs(CGFunc &func) : MoveRegArgs(func) {}
~AArch64MoveRegArgs() override = default;
void Run() override;
private:
RegOperand *baseReg = nullptr;
const MemSegment *lastSegment = nullptr;
void CollectRegisterArgs(std::map<uint32, AArch64reg> &argsList, std::vector<uint32> &indexList,
std::map<uint32, AArch64reg> &pairReg, std::vector<uint32> &numFpRegs,
std::vector<uint32> &fpSize) const;
ArgInfo GetArgInfo(std::map<uint32, AArch64reg> &argsList, std::vector<uint32> &numFpRegs,
std::vector<uint32> &fpSize, uint32 argIndex) const;
bool IsInSameSegment(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo) const;
void GenOneInsn(const ArgInfo &argInfo, RegOperand &baseOpnd, uint32 stBitSize, AArch64reg dest,
int32 offset) const;
void GenerateStpInsn(const ArgInfo &firstArgInfo, const ArgInfo &secondArgInfo);
void GenerateStrInsn(const ArgInfo &argInfo, AArch64reg reg2, uint32 numFpRegs, uint32 fpSize);
void MoveRegisterArgs();
void MoveVRegisterArgs();
void MoveLocalRefVarToRefLocals(MIRSymbol &mirSym) const;
void LoadStackArgsToVReg(MIRSymbol &mirSym) const;
void MoveArgsToVReg(const CCLocInfo &ploc, MIRSymbol &mirSym) const;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ARGS_H */

View File

@ -0,0 +1,272 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CALL_CONV_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CALL_CONV_H
#include "types_def.h"
#include "becommon.h"
#include "call_conv.h"
#include "aarch64_abi.h"
#include "abi.h"
namespace maplebe {
using namespace maple;
/*
* We use the names used in ARM IHI 0055C_beta. $ 5.4.2.
* nextGeneralRegNO (= _int_parm_num) : Next General-purpose Register number
* nextFloatRegNO (= _float_parm_num): Next SIMD and Floating-point Register Number
* nextStackArgAdress (= _last_memOffset): Next Stacked Argument Address
* for processing an incoming or outgoing parameter list
*/
class AArch64CallConvImpl : public CCImpl {
public:
explicit AArch64CallConvImpl(BECommon &be) : CCImpl(), beCommon(be) {}
~AArch64CallConvImpl() = default;
/* Return size of aggregate structure copy on stack. */
int32 LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst = false, MIRFunction *func = nullptr) override;
int32 LocateRetVal(MIRType &retType, CCLocInfo &ploc) override;
/* for lmbc */
uint32 FloatParamRegRequired(MIRStructType &structType, uint32 &fpSize);
/* return value related */
void InitReturnInfo(MIRType &retTy, CCLocInfo &pLoc) override;
void SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &pLoc) const override;
void SetupToReturnThroughMemory(CCLocInfo &pLoc) const
{
pLoc.regCount = 1;
pLoc.reg0 = R8;
pLoc.primTypeOfReg0 = PTY_u64;
}
void Init() override
{
paramNum = 0;
nextGeneralRegNO = 0;
nextFloatRegNO = 0;
nextStackArgAdress = 0;
}
private:
BECommon &beCommon;
uint64 paramNum = 0; /* number of all types of parameters processed so far */
int32 nextGeneralRegNO = 0; /* number of integer parameters processed so far */
uint32 nextFloatRegNO = 0; /* number of float parameters processed so far */
AArch64reg AllocateGPRegister()
{
DEBUG_ASSERT(nextGeneralRegNO >= 0, "nextGeneralRegNO can not be neg");
return (nextGeneralRegNO < AArch64Abi::kNumIntParmRegs) ? AArch64Abi::intParmRegs[nextGeneralRegNO++]
: kRinvalid;
}
void AllocateTwoGPRegisters(CCLocInfo &pLoc)
{
if ((nextGeneralRegNO + 1) < AArch64Abi::kNumIntParmRegs) {
pLoc.reg0 = AArch64Abi::intParmRegs[nextGeneralRegNO++];
pLoc.reg1 = AArch64Abi::intParmRegs[nextGeneralRegNO++];
} else {
pLoc.reg0 = kRinvalid;
}
}
AArch64reg AllocateSIMDFPRegister()
{
return (nextFloatRegNO < AArch64Abi::kNumFloatParmRegs) ? AArch64Abi::floatParmRegs[nextFloatRegNO++]
: kRinvalid;
}
void AllocateNSIMDFPRegisters(CCLocInfo &ploc, uint32 num)
{
if ((nextFloatRegNO + num - 1) < AArch64Abi::kNumFloatParmRegs) {
switch (num) {
case kOneRegister:
ploc.reg0 = AArch64Abi::floatParmRegs[nextFloatRegNO++];
break;
case kTwoRegister:
ploc.reg0 = AArch64Abi::floatParmRegs[nextFloatRegNO++];
ploc.reg1 = AArch64Abi::floatParmRegs[nextFloatRegNO++];
break;
case kThreeRegister:
ploc.reg0 = AArch64Abi::floatParmRegs[nextFloatRegNO++];
ploc.reg1 = AArch64Abi::floatParmRegs[nextFloatRegNO++];
ploc.reg2 = AArch64Abi::floatParmRegs[nextFloatRegNO++];
break;
case kFourRegister:
ploc.reg0 = AArch64Abi::floatParmRegs[nextFloatRegNO++];
ploc.reg1 = AArch64Abi::floatParmRegs[nextFloatRegNO++];
ploc.reg2 = AArch64Abi::floatParmRegs[nextFloatRegNO++];
ploc.reg3 = AArch64Abi::floatParmRegs[nextFloatRegNO++];
break;
default:
CHECK_FATAL(0, "AllocateNSIMDFPRegisters: unsupported");
}
} else {
ploc.reg0 = kRinvalid;
}
}
void RoundNGRNUpToNextEven()
{
nextGeneralRegNO = static_cast<int32>((nextGeneralRegNO + 1) & ~static_cast<int32>(1));
}
int32 ProcessPtyAggWhenLocateNextParm(MIRType &mirType, CCLocInfo &pLoc, uint64 &typeSize, int32 typeAlign);
};
class AArch64WebKitJSCC : public CCImpl {
public:
explicit AArch64WebKitJSCC(BECommon &be) : CCImpl(), beCommon(be) {}
~AArch64WebKitJSCC() = default;
/* Return size of aggregate structure copy on stack. */
int32 LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst = false, MIRFunction *func = nullptr) override;
int32 LocateRetVal(MIRType &retType, CCLocInfo &ploc) override;
/* return value related */
void InitReturnInfo(MIRType &retTy, CCLocInfo &pLoc) override;
// invalid interface
void SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &pLoc) const override;
void Init() override
{
nextGeneralRegNO = 0;
nextFloatRegNO = 0;
nextStackArgAdress = 0;
}
private:
BECommon &beCommon;
int32 nextGeneralRegNO = 0; /* number of integer parameters processed so far */
uint32 nextFloatRegNO = 0; /* number of float parameters processed so far */
static constexpr int32 kNumIntRetRegs = 8;
static constexpr int32 kNumFloatRetRegs = 8;
static constexpr int32 kNumIntParmRegs = 1;
static constexpr int32 kNumFloatParmRegs = 0;
static constexpr AArch64reg intReturnRegs[kNumIntRetRegs] = {R0, R1, R2, R3, R4, R5, R6, R7};
static constexpr AArch64reg floatReturnRegs[kNumFloatRetRegs] = {V0, V1, V2, V3, V4, V5, V6, V7};
static constexpr AArch64reg intParmRegs[kNumIntParmRegs] = {R0};
static constexpr AArch64reg floatParmRegs[kNumFloatParmRegs] = {};
int32 ClassificationArg(const BECommon &be, MIRType &mirType, std::vector<ArgumentClass> &classes) const;
int32 ClassificationRet(const BECommon &be, MIRType &mirType, std::vector<ArgumentClass> &classes) const;
AArch64reg AllocateGPParmRegister()
{
DEBUG_ASSERT(nextGeneralRegNO >= 0, "nextGeneralRegNO can not be neg");
return (nextGeneralRegNO < AArch64WebKitJSCC::kNumIntParmRegs)
? AArch64WebKitJSCC::intParmRegs[nextGeneralRegNO++]
: kRinvalid;
}
AArch64reg AllocateGPRetRegister()
{
DEBUG_ASSERT(nextGeneralRegNO >= 0, "nextGeneralRegNO can not be neg");
return (nextGeneralRegNO < AArch64WebKitJSCC::kNumIntRetRegs)
? AArch64WebKitJSCC::intReturnRegs[nextGeneralRegNO++]
: kRinvalid;
}
AArch64reg AllocateSIMDFPRetRegister()
{
return (nextFloatRegNO < AArch64WebKitJSCC::kNumFloatParmRegs)
? AArch64WebKitJSCC::floatReturnRegs[nextFloatRegNO++]
: kRinvalid;
}
};
class GHCCC : public CCImpl {
public:
explicit GHCCC(BECommon &be) : CCImpl(), beCommon(be) {}
~GHCCC() = default;
/* Return size of aggregate structure copy on stack. */
int32 LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst = false, MIRFunction *func = nullptr) override;
int32 LocateRetVal(MIRType &retType, CCLocInfo &ploc) override;
/* return value related */
void InitReturnInfo(MIRType &retTy, CCLocInfo &pLoc) override;
// invalid interface
void SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &pLoc) const override;
void Init() override
{
nextGeneralRegNO = 0;
nextFloatRegNOF32 = 0;
nextFloatRegNOF64 = 0;
nextFloatRegNOF128 = 0;
nextStackArgAdress = 0;
}
private:
BECommon &beCommon;
int32 nextGeneralRegNO = 0; /* number of integer parameters processed so far */
uint32 nextFloatRegNOF32 = 0;
uint32 nextFloatRegNOF64 = 0;
uint32 nextFloatRegNOF128 = 0;
static constexpr int32 kNumIntParmRegs = 8;
static constexpr int32 kNumFloatParmRegsF32 = 4;
static constexpr int32 kNumFloatParmRegsF64 = 4;
static constexpr int32 kNumFloatParmRegsF128 = 2;
static constexpr AArch64reg intParmRegs[kNumIntParmRegs] = {R19, R20, R21, R22, R23, R24, R25, R26};
static constexpr AArch64reg floatParmRegsF32[kNumFloatParmRegsF32] = {V8, V9, V10, V11};
static constexpr AArch64reg floatParmRegsF64[kNumFloatParmRegsF64] = {V12, V13, V14, V15};
static constexpr AArch64reg floatParmRegsF128[kNumFloatParmRegsF128] = {V4, V5};
int32 ClassificationArg(const BECommon &be, MIRType &mirType, std::vector<ArgumentClass> &classes) const;
AArch64reg AllocateGPParmRegister()
{
DEBUG_ASSERT(nextGeneralRegNO >= 0, "nextGeneralRegNO can not be neg");
return (nextGeneralRegNO < GHCCC::kNumIntParmRegs) ? GHCCC::intParmRegs[nextGeneralRegNO++] : kRinvalid;
}
AArch64reg AllocateSIMDFPParmRegisterF32()
{
return (nextFloatRegNOF32 < GHCCC::kNumFloatParmRegsF32) ? GHCCC::floatParmRegsF32[nextFloatRegNOF32++]
: kRinvalid;
}
AArch64reg AllocateSIMDFPParmRegisterF64()
{
return (nextFloatRegNOF64 < GHCCC::kNumFloatParmRegsF64) ? GHCCC::floatParmRegsF64[nextFloatRegNOF64++]
: kRinvalid;
}
AArch64reg AllocateSIMDFPParmRegisterF128()
{
return (nextFloatRegNOF128 < GHCCC::kNumFloatParmRegsF128) ? GHCCC::floatParmRegsF128[nextFloatRegNOF128++]
: kRinvalid;
}
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CALL_CONV_H */

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
CONDCODE(EQ, 0x0) /* equal */
CONDCODE(NE, 0x1) /* not equal */
CONDCODE(CS, 0x2) /* carry set (== HS) */
CONDCODE(HS, 0x2) /* unsigned higher or same (== CS) */
CONDCODE(CC, 0x3) /* carry clear (== LO) */
CONDCODE(LO, 0x3) /* Unsigned lower (== CC) */
CONDCODE(MI, 0x4) /* Minus or negative result */
CONDCODE(PL, 0x5) /* positive or zero result */
CONDCODE(VS, 0x6) /* overflow */
CONDCODE(VC, 0x7) /* no overflow */
CONDCODE(HI, 0x8) /* unsigned higher */
CONDCODE(LS, 0x9) /* unsigned lower or same */
CONDCODE(GE, 0xa) /* signed greater than or equal */
CONDCODE(LT, 0xb) /* signed less than */
CONDCODE(GT, 0xc) /* signed greater than */
CONDCODE(LE, 0xd) /* signed less than or equal */
CONDCODE(AL, 0xe) /* always, this is the default. usually omitted. */

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CFGO_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CFGO_H
#include "cfgo.h"
namespace maplebe {
class AArch64CFGOptimizer : public CFGOptimizer {
public:
AArch64CFGOptimizer(CGFunc &func, MemPool &memPool) : CFGOptimizer(func, memPool) {}
~AArch64CFGOptimizer() = default;
void InitOptimizePatterns() override;
};
class AArch64FlipBRPattern : public FlipBRPattern {
public:
explicit AArch64FlipBRPattern(CGFunc &func) : FlipBRPattern(func) {}
~AArch64FlipBRPattern() = default;
private:
uint32 GetJumpTargetIdx(const Insn &insn) override;
MOperator FlipConditionOp(MOperator flippedOp) override;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CFGO_H */

View File

@ -0,0 +1,248 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CG_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CG_H
#include "cg.h"
#include "aarch64_cgfunc.h"
#include "aarch64_ssa.h"
#include "aarch64_phi_elimination.h"
#include "aarch64_prop.h"
#include "aarch64_dce.h"
#include "aarch64_live.h"
#include "aarch64_reaching.h"
#include "aarch64_args.h"
#include "aarch64_alignment.h"
#include "aarch64_validbit_opt.h"
#include "aarch64_reg_coalesce.h"
#include "aarch64_cfgo.h"
namespace maplebe {
constexpr int64 kShortBRDistance = (8 * 1024);
constexpr int64 kNegativeImmLowerLimit = -4096;
constexpr int32 kIntRegTypeNum = 5;
constexpr uint32 kAlignPseudoSize = 3;
constexpr uint32 kInsnSize = 4;
constexpr uint32 kAlignMovedFlag = 31;
/* Supporting classes for GCTIB merging */
class GCTIBKey {
public:
GCTIBKey(MapleAllocator &allocator, uint32 rcHeader, std::vector<uint64> &patternWords)
: header(rcHeader), bitMapWords(allocator.Adapter())
{
(void)bitMapWords.insert(bitMapWords.begin(), patternWords.begin(), patternWords.end());
}
~GCTIBKey() = default;
uint32 GetHeader() const
{
return header;
}
const MapleVector<uint64> &GetBitmapWords() const
{
return bitMapWords;
}
private:
uint32 header;
MapleVector<uint64> bitMapWords;
};
class Hasher {
public:
size_t operator()(const GCTIBKey *key) const
{
CHECK_NULL_FATAL(key);
size_t hash = key->GetHeader();
return hash;
}
};
class EqualFn {
public:
bool operator()(const GCTIBKey *firstKey, const GCTIBKey *secondKey) const
{
CHECK_NULL_FATAL(firstKey);
CHECK_NULL_FATAL(secondKey);
const MapleVector<uint64> &firstWords = firstKey->GetBitmapWords();
const MapleVector<uint64> &secondWords = secondKey->GetBitmapWords();
if ((firstKey->GetHeader() != secondKey->GetHeader()) || (firstWords.size() != secondWords.size())) {
return false;
}
for (size_t i = 0; i < firstWords.size(); ++i) {
if (firstWords[i] != secondWords[i]) {
return false;
}
}
return true;
}
};
class GCTIBPattern {
public:
GCTIBPattern(GCTIBKey &patternKey, MemPool &mp) : name(&mp)
{
key = &patternKey;
id = GetId();
name = GCTIB_PREFIX_STR + std::string("PTN_") + std::to_string(id);
}
~GCTIBPattern() = default;
int GetId() const
{
static int id = 0;
return id++;
}
std::string GetName() const
{
DEBUG_ASSERT(!name.empty(), "null name check!");
return std::string(name.c_str());
}
void SetName(const std::string &ptnName)
{
name = ptnName;
}
private:
int id;
MapleString name;
GCTIBKey *key;
};
/* sub Target info & implement */
class AArch64CG : public CG {
public:
AArch64CG(MIRModule &mod, const CGOptions &opts, const std::vector<std::string> &nameVec,
const std::unordered_map<std::string, std::vector<std::string>> &patternMap)
: CG(mod, opts),
ehExclusiveNameVec(nameVec),
cyclePatternMap(patternMap),
keyPatternMap(allocator.Adapter()),
symbolPatternMap(allocator.Adapter())
{
}
~AArch64CG() override = default;
CGFunc *CreateCGFunc(MIRModule &mod, MIRFunction &mirFunc, BECommon &bec, MemPool &memPool, StackMemPool &stackMp,
MapleAllocator &mallocator, uint32 funcId) override
{
return memPool.New<AArch64CGFunc>(mod, *this, mirFunc, bec, memPool, stackMp, mallocator, funcId);
}
void EnrollTargetPhases(MaplePhaseManager *pm) const override;
const std::unordered_map<std::string, std::vector<std::string>> &GetCyclePatternMap() const
{
return cyclePatternMap;
}
void GenerateObjectMaps(BECommon &beCommon) override;
bool IsExclusiveFunc(MIRFunction &) override;
void FindOrCreateRepresentiveSym(std::vector<uint64> &bitmapWords, uint32 rcHeader, const std::string &name);
void CreateRefSymForGlobalPtn(GCTIBPattern &ptn) const;
Insn &BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) override;
PhiOperand &CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) override;
std::string FindGCTIBPatternName(const std::string &name) const override;
LiveAnalysis *CreateLiveAnalysis(MemPool &mp, CGFunc &f) const override
{
return mp.New<AArch64LiveAnalysis>(f, mp);
}
ReachingDefinition *CreateReachingDefinition(MemPool &mp, CGFunc &f) const override
{
return mp.New<AArch64ReachingDefinition>(f, mp);
}
MoveRegArgs *CreateMoveRegArgs(MemPool &mp, CGFunc &f) const override
{
return mp.New<AArch64MoveRegArgs>(f);
}
AlignAnalysis *CreateAlignAnalysis(MemPool &mp, CGFunc &f) const override
{
return mp.New<AArch64AlignAnalysis>(f, mp);
}
CGSSAInfo *CreateCGSSAInfo(MemPool &mp, CGFunc &f, DomAnalysis &da, MemPool &tmp) const override
{
return mp.New<AArch64CGSSAInfo>(f, da, mp, tmp);
}
LiveIntervalAnalysis *CreateLLAnalysis(MemPool &mp, CGFunc &f) const override
{
return mp.New<AArch64LiveIntervalAnalysis>(f, mp);
};
PhiEliminate *CreatePhiElimintor(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override
{
return mp.New<AArch64PhiEliminate>(f, ssaInfo, mp);
}
CGProp *CreateCGProp(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo, LiveIntervalAnalysis &ll) const override
{
return mp.New<AArch64Prop>(mp, f, ssaInfo, ll);
}
CGDce *CreateCGDce(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override
{
return mp.New<AArch64Dce>(mp, f, ssaInfo);
}
ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const override
{
return mp.New<AArch64ValidBitOpt>(f, ssaInfo);
}
CFGOptimizer *CreateCFGOptimizer(MemPool &mp, CGFunc &f) const override
{
return mp.New<AArch64CFGOptimizer>(f, mp);
}
/* Return the copy operand id of reg1 if it is an insn who just do copy from reg1 to reg2.
* i. mov reg2, reg1
* ii. add/sub reg2, reg1, 0/zero register
* iii. mul reg2, reg1, 1
*/
bool IsEffectiveCopy(Insn &insn) const final;
bool IsTargetInsn(MOperator mOp) const final;
bool IsClinitInsn(MOperator mOp) const final;
bool IsPseudoInsn(MOperator mOp) const final;
void DumpTargetOperand(Operand &opnd, const OpndDesc &opndDesc) const final;
const InsnDesc &GetTargetMd(MOperator mOp) const final
{
return kMd[mOp];
}
static const InsnDesc kMd[kMopLast];
enum : uint8 { kR8List, kR16List, kR32List, kR64List, kV64List };
static std::array<std::array<const std::string, kAllRegNum>, kIntRegTypeNum> intRegNames;
static std::array<const std::string, kAllRegNum> vectorRegNames;
private:
const std::vector<std::string> &ehExclusiveNameVec;
const std::unordered_map<std::string, std::vector<std::string>> &cyclePatternMap;
MapleUnorderedMap<GCTIBKey *, GCTIBPattern *, Hasher, EqualFn> keyPatternMap;
MapleUnorderedMap<std::string, GCTIBPattern *> symbolPatternMap;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_CG_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_AARCH64_DCE_H
#define MAPLEBE_INCLUDE_AARCH64_DCE_H
#include "cg_dce.h"
namespace maplebe {
class AArch64Dce : public CGDce {
public:
AArch64Dce(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo) : CGDce(mp, f, sInfo) {}
~AArch64Dce() override = default;
private:
bool RemoveUnuseDef(VRegVersion &defVersion) override;
};
class A64DeleteRegUseVisitor : public DeleteRegUseVisitor {
public:
A64DeleteRegUseVisitor(CGSSAInfo &cgSSAInfo, uint32 dInsnID) : DeleteRegUseVisitor(cgSSAInfo, dInsnID) {}
~A64DeleteRegUseVisitor() override = default;
private:
void Visit(RegOperand *v) final;
void Visit(ListOperand *v) final;
void Visit(MemOperand *v) final;
void Visit(PhiOperand *v) final;
};
} // namespace maplebe
#endif /* MAPLEBE_INCLUDE_AARCH64_DCE_H */

View File

@ -0,0 +1,109 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_DEPENDENCE_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_DEPENDENCE_H
#include "dependence.h"
#include "cgfunc.h"
#include "aarch64_operand.h"
namespace maplebe {
class AArch64DepAnalysis : public DepAnalysis {
public:
AArch64DepAnalysis(CGFunc &func, MemPool &mp, MAD &mad, bool beforeRA);
~AArch64DepAnalysis() override = default;
void Run(BB &bb, MapleVector<DepNode *> &nodes) override;
const std::string &GetDepTypeName(DepType depType) const override;
void DumpDepNode(DepNode &node) const override;
void DumpDepLink(DepLink &link, const DepNode *node) const override;
protected:
void Init(BB &bb, MapleVector<DepNode *> &nodes) override;
void ClearAllDepData() override;
void AnalysisAmbiInsns(BB &bb) override;
void AppendRegUseList(Insn &insn, regno_t regNO) override;
void AddDependence(DepNode &fromNode, DepNode &toNode, DepType depType) override;
void RemoveSelfDeps(Insn &insn) override;
void CombineClinit(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator) override;
void CombineDependence(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator,
bool isMemCombine = false) override;
void CombineMemoryAccessPair(DepNode &firstNode, DepNode &secondNode, bool useFirstOffset) override;
void BuildDepsUseReg(Insn &insn, regno_t regNO) override;
void BuildDepsDefReg(Insn &insn, regno_t regNO) override;
void BuildDepsAmbiInsn(Insn &insn) override;
void BuildDepsMayThrowInsn(Insn &insn) override;
bool NeedBuildDepsMem(const MemOperand &memOpnd, const MemOperand *nextMemOpnd, const Insn &memInsn) const;
void BuildDepsUseMem(Insn &insn, MemOperand &memOpnd) override;
void BuildDepsDefMem(Insn &insn, MemOperand &memOpnd) override;
void BuildAntiDepsDefStackMem(Insn &insn, MemOperand &memOpnd, const MemOperand *nextMemOpnd);
void BuildOutputDepsDefStackMem(Insn &insn, MemOperand &memOpnd, const MemOperand *nextMemOpnd);
void BuildDepsMemBar(Insn &insn) override;
void BuildDepsSeparator(DepNode &newSepNode, MapleVector<DepNode *> &nodes) override;
void BuildDepsControlAll(DepNode &depNode, const MapleVector<DepNode *> &nodes) override;
void BuildDepsAccessStImmMem(Insn &insn, bool isDest) override;
void BuildCallerSavedDeps(Insn &insn) override;
void BuildDepsBetweenControlRegAndCall(Insn &insn, bool isDest) override;
void BuildStackPassArgsDeps(Insn &insn) override;
void BuildDepsDirtyStack(Insn &insn) override;
void BuildDepsUseStack(Insn &insn) override;
void BuildDepsDirtyHeap(Insn &insn) override;
DepNode *BuildSeparatorNode() override;
bool IfInAmbiRegs(regno_t regNO) const override;
bool IsFrameReg(const RegOperand &) const override;
private:
MemOperand *GetNextMemOperand(const Insn &insn, const MemOperand &aarchMemOpnd) const;
void BuildMemOpndDependency(Insn &insn, Operand &opnd, const OpndDesc &regProp);
void BuildOpndDependency(Insn &insn);
void BuildSpecialInsnDependency(Insn &insn, DepNode &depNode, const MapleVector<DepNode *> &nodes);
void SeperateDependenceGraph(MapleVector<DepNode *> &nodes, uint32 &nodeSum);
DepNode *GenerateDepNode(Insn &insn, MapleVector<DepNode *> &nodes, int32 nodeSum,
const MapleVector<Insn *> &comments);
void BuildAmbiInsnDependency(Insn &insn);
void BuildMayThrowInsnDependency(Insn &insn);
void UpdateRegUseAndDef(Insn &insn, const DepNode &depNode, MapleVector<DepNode *> &nodes);
void UpdateStackAndHeapDependency(DepNode &depNode, Insn &insn, const Insn &locInsn);
MemOperand *BuildNextMemOperandByByteSize(const MemOperand &aarchMemOpnd, uint32 byteSize) const;
void AddDependence4InsnInVectorByType(MapleVector<Insn *> &insns, Insn &insn, const DepType &type);
void AddDependence4InsnInVectorByTypeAndCmp(MapleVector<Insn *> &insns, Insn &insn, const DepType &type);
void ReplaceDepNodeWithNewInsn(DepNode &firstNode, DepNode &secondNode, Insn &newInsn, bool isFromClinit) const;
void ClearDepNodeInfo(DepNode &depNode) const;
void AddEndSeparatorNode(MapleVector<DepNode *> &nodes);
Insn **regDefs = nullptr;
RegList **regUses = nullptr;
Insn *memBarInsn = nullptr;
bool hasAmbiRegs = false;
Insn *lastCallInsn = nullptr;
uint32 separatorIndex = 0;
Insn *lastFrameDef = nullptr;
MapleVector<Insn *> stackUses;
MapleVector<Insn *> stackDefs;
MapleVector<Insn *> heapUses;
MapleVector<Insn *> heapDefs;
MapleVector<Insn *> mayThrows;
/* instructions that can not across may throw instructions. */
MapleVector<Insn *> ambiInsns;
/* register number that catch bb and cleanup bb uses. */
MapleSet<regno_t> ehInRegs;
/* the bb to be scheduling currently */
BB *curBB = nullptr;
};
} // namespace maplebe
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_DEPENDENCE_H */

View File

@ -0,0 +1,92 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EBO_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EBO_H
#include "ebo.h"
#include "aarch64_operand.h"
#include "aarch64_cgfunc.h"
namespace maplebe {
class AArch64Ebo : public Ebo {
public:
AArch64Ebo(CGFunc &func, MemPool &memPool, LiveAnalysis *live, bool before, const std::string &phase)
: Ebo(func, memPool, live, before, phase), callerSaveRegTable(eboAllocator.Adapter())
{
a64CGFunc = static_cast<AArch64CGFunc *>(cgFunc);
}
enum ExtOpTable : uint8;
~AArch64Ebo() override = default;
protected:
MapleVector<RegOperand *> callerSaveRegTable;
AArch64CGFunc *a64CGFunc;
int32 GetOffsetVal(const MemOperand &mem) const override;
OpndInfo *OperandInfoDef(BB &currentBB, Insn &currentInsn, Operand &localOpnd) override;
const RegOperand &GetRegOperand(const Operand &opnd) const override;
bool IsGlobalNeeded(Insn &insn) const override;
bool IsDecoupleStaticOp(Insn &insn) const override;
bool OperandEqSpecial(const Operand &op1, const Operand &op2) const override;
bool DoConstProp(Insn &insn, uint32 i, Operand &opnd) override;
bool Csel2Cset(Insn &insn, const MapleVector<Operand *> &opnds) override;
bool SimplifyConstOperand(Insn &insn, const MapleVector<Operand *> &opnds,
const MapleVector<OpndInfo *> &opndInfo) override;
void BuildCallerSaveRegisters() override;
void DefineAsmRegisters(InsnInfo &insnInfo) override;
void DefineCallerSaveRegisters(InsnInfo &insnInfo) override;
void DefineReturnUseRegister(Insn &insn) override;
void DefineCallUseSpecialRegister(Insn &insn) override;
void DefineClinitSpecialRegisters(InsnInfo &insnInfo) override;
bool CombineExtensionAndLoad(Insn *insn, const MapleVector<OpndInfo *> &origInfos, ExtOpTable idx, bool is64Bits);
bool SpecialSequence(Insn &insn, const MapleVector<OpndInfo *> &origInfos) override;
bool IsMovToSIMDVmov(Insn &insn, const Insn &replaceInsn) const override;
bool IsPseudoRet(Insn &insn) const override;
bool ChangeLdrMop(Insn &insn, const Operand &opnd) const override;
bool IsAdd(const Insn &insn) const override;
bool IsFmov(const Insn &insn) const override;
bool IsClinitCheck(const Insn &insn) const override;
bool IsLastAndBranch(BB &bb, Insn &insn) const override;
bool IsSameRedefine(BB &bb, Insn &insn, OpndInfo &opndInfo) const override;
bool ResIsNotDefAndUse(Insn &insn) const override;
bool LiveOutOfBB(const Operand &opnd, const BB &bb) const override;
bool IsInvalidReg(const RegOperand &opnd) const override;
bool IsZeroRegister(const Operand &opnd) const override;
bool IsConstantImmOrReg(const Operand &opnd) const override;
bool OperandLiveAfterInsn(const RegOperand &regOpnd, Insn &insn) const;
bool ValidPatternForCombineExtAndLoad(OpndInfo *prevOpndInfo, Insn *insn, MOperator newMop, MOperator oldMop,
const RegOperand &opnd);
private:
/* The number of elements in callerSaveRegTable must less then 45. */
static constexpr int32 kMaxCallerSaveReg = 45;
MOperator ExtLoadSwitchBitSize(MOperator lowMop) const;
bool CheckCondCode(const CondOperand &cond) const;
bool CombineMultiplyAdd(Insn *insn, const Insn *prevInsn, InsnInfo *insnInfo, Operand *addOpnd, bool is64bits,
bool isFp) const;
bool CheckCanDoMadd(Insn *insn, OpndInfo *opndInfo, int32 pos, bool is64bits, bool isFp);
bool CombineMultiplySub(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp) const;
bool CombineMultiplyNeg(Insn *insn, OpndInfo *opndInfo, bool is64bits, bool isFp) const;
bool SimplifyBothConst(BB &bb, Insn &insn, const ImmOperand &immOperand0, const ImmOperand &immOperand1,
uint32 opndSize) const;
ConditionCode GetReverseCond(const CondOperand &cond) const;
bool CombineLsrAnd(Insn &insn, const OpndInfo &opndInfo, bool is64bits, bool isFp) const;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EBO_H */

View File

@ -0,0 +1,69 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EMITTER_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EMITTER_H
#include "asm_emit.h"
namespace maplebe {
using namespace maple;
class AArch64AsmEmitter : public AsmEmitter {
public:
AArch64AsmEmitter(CG &cg, const std::string &asmFileName) : AsmEmitter(cg, asmFileName) {}
~AArch64AsmEmitter() = default;
void EmitRefToMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) override;
void EmitRefToMethodInfo(FuncEmitInfo &funcEmitInfo, Emitter &emitter) override;
void EmitMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) override;
void EmitFastLSDA(FuncEmitInfo &funcEmitInfo) override;
void EmitFullLSDA(FuncEmitInfo &funcEmitInfo) override;
void EmitBBHeaderLabel(FuncEmitInfo &funcEmitInfo, const std::string &name, LabelIdx labIdx) override;
void EmitJavaInsnAddr(FuncEmitInfo &funcEmitInfo) override;
void RecordRegInfo(FuncEmitInfo &funcEmitInfo) const;
void Run(FuncEmitInfo &funcEmitInfo) override;
private:
/* cfi & dbg need target info ? */
void EmitAArch64CfiInsn(Emitter &emitter, const Insn &insn) const;
void EmitAArch64DbgInsn(Emitter &emitter, const Insn &insn) const;
void EmitAArch64Insn(Emitter &emitter, Insn &insn) const;
void EmitClinit(Emitter &emitter, const Insn &insn) const;
void EmitAdrpLdr(Emitter &emitter, const Insn &insn) const;
void EmitCounter(Emitter &emitter, const Insn &insn) const;
void EmitInlineAsm(Emitter &emitter, const Insn &insn) const;
void EmitClinitTail(Emitter &emitter, const Insn &insn) const;
void EmitLazyLoad(Emitter &emitter, const Insn &insn) const;
void EmitAdrpLabel(Emitter &emitter, const Insn &insn) const;
void EmitLazyLoadStatic(Emitter &emitter, const Insn &insn) const;
void EmitArrayClassCacheLoad(Emitter &emitter, const Insn &insn) const;
void EmitGetAndAddInt(Emitter &emitter, const Insn &insn) const;
void EmitGetAndSetInt(Emitter &emitter, const Insn &insn) const;
void EmitCompareAndSwapInt(Emitter &emitter, const Insn &insn) const;
void EmitStringIndexOf(Emitter &emitter, const Insn &insn) const;
void EmitLazyBindingRoutine(Emitter &emitter, const Insn &insn) const;
void EmitCheckThrowPendingException(Emitter &emitter, Insn &insn) const;
void EmitCTlsDescRel(Emitter &emitter, const Insn &insn) const;
void EmitCTlsDescCall(Emitter &emitter, const Insn &insn) const;
void EmitSyncLockTestSet(Emitter &emitter, const Insn &insn) const;
void PrepareVectorOperand(RegOperand *regOpnd, uint32 &compositeOpnds, Insn &insn) const;
bool CheckInsnRefField(const Insn &insn, size_t opndIndex) const;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_EMITTER_H */

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_FIXSHORTBRANCH_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_FIXSHORTBRANCH_H
#include <vector>
#include "aarch64_cg.h"
#include "optimize_common.h"
#include "mir_builder.h"
namespace maplebe {
class AArch64FixShortBranch {
public:
explicit AArch64FixShortBranch(CGFunc *cf) : cgFunc(cf) {}
~AArch64FixShortBranch() = default;
void FixShortBranches();
private:
CGFunc *cgFunc;
uint32 CalculateAlignRange(const BB &bb, uint32 addr) const;
void SetInsnId() const;
}; /* class AArch64ShortBranch */
MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgFixShortBranch, maplebe::CGFunc)
MAPLE_FUNC_PHASE_DECLARE_END
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_FIXSHORTBRANCH_H */

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* ARM Compiler armasm User Guide version 6.6.
* http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0473j/deb1353594352617.html
* (retrieved on 3/24/2017)
*/
/*
* ID, 128 bit vector prefix, followed by scalar prefixes
* scalar prefixes: 8-bit, 16-bit, 32-bit, 64-bit, 128-bit, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill
* (e.g., we use D0 when V0 contains a 64-bit scalar FP number (aka, double))
*/
FP_SIMD_REG(0 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false)
FP_SIMD_REG(1 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false)
FP_SIMD_REG(2 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false)
FP_SIMD_REG(3 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false)
FP_SIMD_REG(4 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false)
FP_SIMD_REG(5 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false)
FP_SIMD_REG(6 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false)
FP_SIMD_REG(7 , "V", "B", "H", "S", "D", "Q", true, false, true, false, false)
FP_SIMD_REG(8 , "V", "B", "H", "S", "D", "Q", true, true, false, false, false)
FP_SIMD_REG(9 , "V", "B", "H", "S", "D", "Q", true, true, false, false, false)
FP_SIMD_REG(10, "V", "B", "H", "S", "D", "Q", true, true, false, false, false)
FP_SIMD_REG(11, "V", "B", "H", "S", "D", "Q", true, true, false, false, false)
FP_SIMD_REG(12, "V", "B", "H", "S", "D", "Q", true, true, false, false, false)
FP_SIMD_REG(13, "V", "B", "H", "S", "D", "Q", true, true, false, false, false)
FP_SIMD_REG(14, "V", "B", "H", "S", "D", "Q", true, true, false, false, false)
FP_SIMD_REG(15, "V", "B", "H", "S", "D", "Q", true, true, false, false, false)
FP_SIMD_REG(16, "V", "B", "H", "S", "D", "Q", true, false, false, false, false)
FP_SIMD_REG(17, "V", "B", "H", "S", "D", "Q", true, false, false, false, false)
FP_SIMD_REG(18, "V", "B", "H", "S", "D", "Q", true, false, false, false, false)
FP_SIMD_REG(19, "V", "B", "H", "S", "D", "Q", true, false, false, false, false)
FP_SIMD_REG(20, "V", "B", "H", "S", "D", "Q", true, false, false, false, false)
FP_SIMD_REG(21, "V", "B", "H", "S", "D", "Q", true, false, false, false, false)
FP_SIMD_REG(22, "V", "B", "H", "S", "D", "Q", true, false, false, false, false)
FP_SIMD_REG(23, "V", "B", "H", "S", "D", "Q", true, false, false, false, false)
FP_SIMD_REG(24, "V", "B", "H", "S", "D", "Q", true, false, false, false, false)
FP_SIMD_REG(25, "V", "B", "H", "S", "D", "Q", true, false, false, false, false)
FP_SIMD_REG(26, "V", "B", "H", "S", "D", "Q", true, false, false, false, false)
FP_SIMD_REG(27, "V", "B", "H", "S", "D", "Q", true, false, false, false, false)
FP_SIMD_REG(28, "V", "B", "H", "S", "D", "Q", true, false, false, false, false)
FP_SIMD_REG(29, "V", "B", "H", "S", "D", "Q", true, false, false, false, true)
FP_SIMD_REG(30, "V", "B", "H", "S", "D", "Q", true, false, false, true, false)
FP_SIMD_REG(31, "V", "B", "H", "S", "D", "Q", true, false, false, true, false)
/* Alias ID */
FP_SIMD_REG_ALIAS(0)
FP_SIMD_REG_ALIAS(1)
FP_SIMD_REG_ALIAS(2)
FP_SIMD_REG_ALIAS(3)
FP_SIMD_REG_ALIAS(4)
FP_SIMD_REG_ALIAS(5)
FP_SIMD_REG_ALIAS(6)
FP_SIMD_REG_ALIAS(7)
/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(0) */
/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(1) */
/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(2) */
/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(3) */
/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(4) */
/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(5) */
/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(6) */
/* FP_SIMD_REG_ALIAS_64BIT_SCALAR(7) */

View File

@ -0,0 +1,504 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_GLOBAL_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_GLOBAL_H
#include "global.h"
#include "aarch64_operand.h"
namespace maplebe {
using namespace maple;
class AArch64GlobalOpt : public GlobalOpt {
public:
explicit AArch64GlobalOpt(CGFunc &func) : GlobalOpt(func) {}
~AArch64GlobalOpt() override = default;
void Run() override;
};
class OptimizeManager {
public:
explicit OptimizeManager(CGFunc &cgFunc) : cgFunc(cgFunc) {}
~OptimizeManager() = default;
template <typename OptimizePattern>
void Optimize()
{
OptimizePattern optPattern(cgFunc);
optPattern.Run();
}
private:
CGFunc &cgFunc;
};
class OptimizePattern {
public:
explicit OptimizePattern(CGFunc &cgFunc) : cgFunc(cgFunc) {}
virtual ~OptimizePattern() = default;
virtual bool CheckCondition(Insn &insn) = 0;
virtual void Optimize(Insn &insn) = 0;
virtual void Run() = 0;
bool OpndDefByOne(Insn &insn, int32 useIdx) const;
bool OpndDefByZero(Insn &insn, int32 useIdx) const;
bool OpndDefByOneOrZero(Insn &insn, int32 useIdx) const;
void ReplaceAllUsedOpndWithNewOpnd(const InsnSet &useInsnSet, uint32 regNO, Operand &newOpnd,
bool updateInfo) const;
static bool InsnDefOne(const Insn &insn);
static bool InsnDefZero(const Insn &insn);
static bool InsnDefOneOrZero(const Insn &insn);
std::string PhaseName() const
{
return "globalopt";
}
protected:
virtual void Init() = 0;
CGFunc &cgFunc;
};
/*
* Do Forward prop when insn is mov
* mov xx, x1
* ... // BBs and x1 is live
* mOp yy, xx
*
* =>
* mov x1, x1
* ... // BBs and x1 is live
* mOp yy, x1
*/
class ForwardPropPattern : public OptimizePattern {
public:
explicit ForwardPropPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {}
~ForwardPropPattern() override = default;
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final;
private:
InsnSet firstRegUseInsnSet;
void RemoveMopUxtwToMov(Insn &insn);
std::set<BB *, BBIdCmp> modifiedBB;
};
/*
* Do back propagate of vreg/preg when encount following insn:
*
* mov vreg/preg1, vreg2
*
* back propagate reg1 to all vreg2's use points and def points, when all of them is in same bb
*/
class BackPropPattern : public OptimizePattern {
public:
explicit BackPropPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {}
~BackPropPattern() override
{
firstRegOpnd = nullptr;
secondRegOpnd = nullptr;
defInsnForSecondOpnd = nullptr;
}
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final;
private:
bool CheckAndGetOpnd(const Insn &insn);
bool DestOpndHasUseInsns(Insn &insn);
bool DestOpndLiveOutToEHSuccs(Insn &insn) const;
bool CheckSrcOpndDefAndUseInsns(Insn &insn);
bool CheckSrcOpndDefAndUseInsnsGlobal(Insn &insn);
bool CheckPredefineInsn(Insn &insn);
bool CheckRedefineInsn(Insn &insn);
bool CheckReplacedUseInsn(Insn &insn);
RegOperand *firstRegOpnd = nullptr;
RegOperand *secondRegOpnd = nullptr;
uint32 firstRegNO = 0;
uint32 secondRegNO = 0;
InsnSet srcOpndUseInsnSet;
Insn *defInsnForSecondOpnd = nullptr;
bool globalProp = false;
};
/*
* when w0 has only one valid bit, these tranformation will be done
* cmp w0, #0
* cset w1, NE --> mov w1, w0
*
* cmp w0, #0
* cset w1, EQ --> eor w1, w0, 1
*
* cmp w0, #1
* cset w1, NE --> eor w1, w0, 1
*
* cmp w0, #1
* cset w1, EQ --> mov w1, w0
*
* cmp w0, #0
* cset w0, NE -->null
*
* cmp w0, #1
* cset w0, EQ -->null
*
* condition:
* 1. the first operand of cmp instruction must has only one valid bit
* 2. the second operand of cmp instruction must be 0 or 1
* 3. flag register of cmp isntruction must not be used later
*/
class CmpCsetPattern : public OptimizePattern {
public:
explicit CmpCsetPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {}
~CmpCsetPattern() override
{
nextInsn = nullptr;
cmpFirstOpnd = nullptr;
cmpSecondOpnd = nullptr;
csetFirstOpnd = nullptr;
}
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final;
private:
Insn *nextInsn = nullptr;
int64 cmpConstVal = 0;
Operand *cmpFirstOpnd = nullptr;
Operand *cmpSecondOpnd = nullptr;
Operand *csetFirstOpnd = nullptr;
};
/*
* mov w5, #1
* ... --> cset w5, NE
* mov w0, #0
* csel w5, w5, w0, NE
*
* mov w5, #0
* ... --> cset w5,EQ
* mov w0, #1
* csel w5, w5, w0, NE
*
* condition:
* 1.all define points of w5 are defined by: mov w5, #1(#0)
* 2.all define points of w0 are defined by: mov w0, #0(#1)
* 3.w0 will not be used after: csel w5, w5, w0, NE(EQ)
*/
class CselPattern : public OptimizePattern {
public:
explicit CselPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {}
~CselPattern() override = default;
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final {}
};
/*
* uxtb w0, w0 --> null
* uxth w0, w0 --> null
*
* condition:
* 1. validbits(w0)<=8,16,32
* 2. the first operand is same as the second operand
*
* uxtb w0, w1 --> null
* uxth w0, w1 --> null
*
* condition:
* 1. validbits(w1)<=8,16,32
* 2. the use points of w0 has only one define point, that is uxt w0, w1
*/
class RedundantUxtPattern : public OptimizePattern {
public:
explicit RedundantUxtPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {}
~RedundantUxtPattern() override
{
secondOpnd = nullptr;
}
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final;
private:
uint32 GetMaximumValidBit(Insn &insn, uint8 udIdx, InsnSet &insnChecked) const;
static uint32 GetInsnValidBit(const Insn &insn);
InsnSet useInsnSet;
uint32 firstRegNO = 0;
Operand *secondOpnd = nullptr;
};
/*
* bl MCC_NewObj_flexible_cname bl MCC_NewObj_flexible_cname
* mov x21, x0 // [R203]
* str x0, [x29,#16] // local var: Reg0_R6340 [R203] --> str x0, [x29,#16] // local var: Reg0_R6340 [R203]
* ... (has call) ... (has call)
* mov x2, x21 // use of x21 ldr x2, [x29, #16]
* bl *** bl ***
*/
class LocalVarSaveInsnPattern : public OptimizePattern {
public:
explicit LocalVarSaveInsnPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {}
~LocalVarSaveInsnPattern() override
{
firstInsnSrcOpnd = nullptr;
firstInsnDestOpnd = nullptr;
secondInsnSrcOpnd = nullptr;
secondInsnDestOpnd = nullptr;
useInsn = nullptr;
secondInsn = nullptr;
}
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final;
private:
bool CheckFirstInsn(const Insn &firstInsn);
bool CheckSecondInsn();
bool CheckAndGetUseInsn(Insn &firstInsn);
bool CheckLiveRange(const Insn &firstInsn);
Operand *firstInsnSrcOpnd = nullptr;
Operand *firstInsnDestOpnd = nullptr;
Operand *secondInsnSrcOpnd = nullptr;
Operand *secondInsnDestOpnd = nullptr;
Insn *useInsn = nullptr;
Insn *secondInsn = nullptr;
};
class ExtendShiftOptPattern : public OptimizePattern {
public:
explicit ExtendShiftOptPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {}
~ExtendShiftOptPattern() override
{
defInsn = nullptr;
newInsn = nullptr;
}
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
void DoExtendShiftOpt(Insn &insn);
enum ExMOpType : uint8 {
kExUndef,
kExAdd, /* MOP_xaddrrr | MOP_xxwaddrrre | MOP_xaddrrrs */
kEwAdd, /* MOP_waddrrr | MOP_wwwaddrrre | MOP_waddrrrs */
kExSub, /* MOP_xsubrrr | MOP_xxwsubrrre | MOP_xsubrrrs */
kEwSub, /* MOP_wsubrrr | MOP_wwwsubrrre | MOP_wsubrrrs */
kExCmn, /* MOP_xcmnrr | MOP_xwcmnrre | MOP_xcmnrrs */
kEwCmn, /* MOP_wcmnrr | MOP_wwcmnrre | MOP_wcmnrrs */
kExCmp, /* MOP_xcmprr | MOP_xwcmprre | MOP_xcmprrs */
kEwCmp, /* MOP_wcmprr | MOP_wwcmprre | MOP_wcmprrs */
};
enum LsMOpType : uint8 {
kLsUndef,
kLxAdd, /* MOP_xaddrrr | MOP_xaddrrrs */
kLwAdd, /* MOP_waddrrr | MOP_waddrrrs */
kLxSub, /* MOP_xsubrrr | MOP_xsubrrrs */
kLwSub, /* MOP_wsubrrr | MOP_wsubrrrs */
kLxCmn, /* MOP_xcmnrr | MOP_xcmnrrs */
kLwCmn, /* MOP_wcmnrr | MOP_wcmnrrs */
kLxCmp, /* MOP_xcmprr | MOP_xcmprrs */
kLwCmp, /* MOP_wcmprr | MOP_wcmprrs */
kLxEor, /* MOP_xeorrrr | MOP_xeorrrrs */
kLwEor, /* MOP_weorrrr | MOP_weorrrrs */
kLxNeg, /* MOP_xinegrr | MOP_xinegrrs */
kLwNeg, /* MOP_winegrr | MOP_winegrrs */
kLxIor, /* MOP_xiorrrr | MOP_xiorrrrs */
kLwIor, /* MOP_wiorrrr | MOP_wiorrrrs */
};
enum SuffixType : uint8 {
kNoSuffix, /* no suffix or do not perform the optimization. */
kLSL, /* logical shift left */
kLSR, /* logical shift right */
kASR, /* arithmetic shift right */
kExten /* ExtendOp */
};
protected:
void Init() final;
private:
void SelectExtendOrShift(const Insn &def);
bool CheckDefUseInfo(Insn &use, uint32 size);
SuffixType CheckOpType(const Operand &lastOpnd) const;
void ReplaceUseInsn(Insn &use, const Insn &def, uint32 amount);
void SetExMOpType(const Insn &use);
void SetLsMOpType(const Insn &use);
MOperator replaceOp;
uint32 replaceIdx;
ExtendShiftOperand::ExtendOp extendOp;
BitShiftOperand::ShiftOp shiftOp;
Insn *defInsn = nullptr;
Insn *newInsn = nullptr;
bool optSuccess;
bool removeDefInsn;
ExMOpType exMOpType;
LsMOpType lsMOpType;
};
/*
* This pattern do:
* 1)
* uxtw vreg:Rm validBitNum:[64], vreg:Rn validBitNum:[32]
* ------>
* mov vreg:Rm validBitNum:[64], vreg:Rn validBitNum:[32]
* 2)
* ldrh R201, [...]
* and R202, R201, #65520
* uxth R203, R202
* ------->
* ldrh R201, [...]
* and R202, R201, #65520
* mov R203, R202
*/
class ExtenToMovPattern : public OptimizePattern {
public:
explicit ExtenToMovPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {}
~ExtenToMovPattern() override = default;
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final;
private:
bool CheckHideUxtw(const Insn &insn, regno_t regno) const;
bool CheckUxtw(Insn &insn);
bool BitNotAffected(Insn &insn, uint32 validNum); /* check whether significant bits are affected */
bool CheckSrcReg(Insn &insn, regno_t srcRegNo, uint32 validNum);
MOperator replaceMop = MOP_undef;
};
class SameDefPattern : public OptimizePattern {
public:
explicit SameDefPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {}
~SameDefPattern() override
{
currInsn = nullptr;
sameInsn = nullptr;
}
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final;
private:
bool IsSameDef();
bool SrcRegIsRedefined(regno_t regNo);
bool IsSameOperand(Operand &opnd0, Operand &opnd1);
Insn *currInsn = nullptr;
Insn *sameInsn = nullptr;
};
/*
* and r0, r0, #4 (the imm is n power of 2)
* ... (r0 is not used)
* cbz r0, .Label
* ===> tbz r0, #2, .Label
*
* and r0, r0, #4 (the imm is n power of 2)
* ... (r0 is not used)
* cbnz r0, .Label
* ===> tbnz r0, #2, .Label
*/
class AndCbzPattern : public OptimizePattern {
public:
explicit AndCbzPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {}
~AndCbzPattern() override
{
prevInsn = nullptr;
}
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final;
private:
int64 CalculateLogValue(int64 val) const;
bool IsAdjacentArea(Insn &prev, Insn &curr) const;
Insn *prevInsn = nullptr;
};
/*
* [arithmetic operation]
* add/sub/ R202, R201, #1 add/sub/ R202, R201, #1
* ... ...
* add/sub/ R203, R201, #1 ---> mov R203, R202
*
* [copy operation]
* mov R201, #1 mov R201, #1
* ... ...
* mov R202, #1 ---> mov R202, R201
*
* The pattern finds the insn with the same rvalue as the current insn,
* then prop its lvalue, and replaces the current insn with movrr insn.
* The mov can be prop in forwardprop or backprop.
*
* conditions:
* 1. in same BB
* 2. rvalue is not defined between two insns
* 3. lvalue is not defined between two insns
*/
class SameRHSPropPattern : public OptimizePattern {
public:
explicit SameRHSPropPattern(CGFunc &cgFunc) : OptimizePattern(cgFunc) {}
~SameRHSPropPattern() override
{
prevInsn = nullptr;
}
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final;
private:
bool IsSameOperand(Operand *opnd1, Operand *opnd2) const;
bool FindSameRHSInsnInBB(Insn &insn);
Insn *prevInsn = nullptr;
std::vector<MOperator> candidates;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_GLOBAL_H */

View File

@ -0,0 +1,120 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ICO_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ICO_H
#include "ico.h"
#include "aarch64_isa.h"
#include "optimize_common.h"
#include "live.h"
namespace maplebe {
class AArch64IfConversionOptimizer : public IfConversionOptimizer {
public:
AArch64IfConversionOptimizer(CGFunc &func, MemPool &memPool) : IfConversionOptimizer(func, memPool) {}
~AArch64IfConversionOptimizer() override = default;
void InitOptimizePatterns() override;
};
class AArch64ICOPattern : public ICOPattern {
public:
explicit AArch64ICOPattern(CGFunc &func) : ICOPattern(func) {}
~AArch64ICOPattern() override = default;
protected:
ConditionCode Encode(MOperator mOp, bool inverse) const;
Insn *BuildCmpInsn(const Insn &condBr) const;
Insn *BuildCcmpInsn(ConditionCode ccCode, const Insn *cmpInsn) const;
Insn *BuildCondSet(const Insn &branch, RegOperand &reg, bool inverse) const;
Insn *BuildCondSel(const Insn &branch, MOperator mOp, RegOperand &dst, RegOperand &src1, RegOperand &src2) const;
bool IsSetInsn(const Insn &insn, Operand *&dest, std::vector<Operand *> &src) const;
static uint32 GetNZCV(ConditionCode ccCode, bool inverse);
bool CheckMop(MOperator mOperator) const;
};
/* If-Then-Else pattern */
class AArch64ICOIfThenElsePattern : public AArch64ICOPattern {
public:
explicit AArch64ICOIfThenElsePattern(CGFunc &func) : AArch64ICOPattern(func) {}
~AArch64ICOIfThenElsePattern() override = default;
bool Optimize(BB &curBB) override;
protected:
bool BuildCondMovInsn(BB &cmpBB, const BB &bb, const std::map<Operand *, std::vector<Operand *>> &ifDestSrcMap,
const std::map<Operand *, std::vector<Operand *>> &elseDestSrcMap, bool elseBBIsProcessed,
std::vector<Insn *> &generateInsn);
bool DoOpt(BB &cmpBB, BB *ifBB, BB *elseBB, BB &joinBB);
void GenerateInsnForImm(const Insn &branchInsn, Operand &ifDest, Operand &elseDest, RegOperand &destReg,
std::vector<Insn *> &generateInsn);
Operand *GetDestReg(const std::map<Operand *, std::vector<Operand *>> &destSrcMap, const RegOperand &destReg) const;
void GenerateInsnForReg(const Insn &branchInsn, Operand &ifDest, Operand &elseDest, RegOperand &destReg,
std::vector<Insn *> &generateInsn);
RegOperand *GenerateRegAndTempInsn(Operand &dest, const RegOperand &destReg,
std::vector<Insn *> &generateInsn) const;
bool CheckHasSameDest(std::vector<Insn *> &lInsn, std::vector<Insn *> &rInsn) const;
bool CheckModifiedRegister(Insn &insn, std::map<Operand *, std::vector<Operand *>> &destSrcMap,
std::vector<Operand *> &src, Operand &dest, const Insn *cmpInsn,
const Operand *flagOpnd) const;
bool CheckCondMoveBB(BB *bb, std::map<Operand *, std::vector<Operand *>> &destSrcMap,
std::vector<Operand *> &destRegs, std::vector<Insn *> &setInsn, Operand *flagReg,
Insn *cmpInsn) const;
};
/* If( cmp || cmp ) then or If( cmp && cmp ) then
* cmp w4, #1
* beq .L.886__1(branch1) cmp w4, #1
* .L.886__2: => ccmp w4, #4, #4, NE
* cmp w4, #4 beq .L.886__1
* beq .L.886__1(branch2)
* */
class AArch64ICOSameCondPattern : public AArch64ICOPattern {
public:
explicit AArch64ICOSameCondPattern(CGFunc &func) : AArch64ICOPattern(func) {}
~AArch64ICOSameCondPattern() override = default;
bool Optimize(BB &curBB) override;
protected:
bool DoOpt(BB *firstIfBB, BB &secondIfBB);
};
/* If-Then MorePreds pattern
*
* .L.891__92: .L.891__92:
* cmp x4, w0, UXTW cmp x4, w0, UXTW
* bls .L.891__41 csel x0, x2, x0, LS
* .L.891__42: bls .L.891__94
* sub x0, x4, w0, UXTW =====> .L.891__42:
* cmp x0, x2 sub x0, x4, w0, UXTW
* bls .L.891__41 cmp x0, x2
* ...... csel x0, x2, x0, LS
* .L.891__41: bls .L.891__94
* mov x0, x2
* b .L.891__94
* */
class AArch64ICOMorePredsPattern : public AArch64ICOPattern {
public:
explicit AArch64ICOMorePredsPattern(CGFunc &func) : AArch64ICOPattern(func) {}
~AArch64ICOMorePredsPattern() override = default;
bool Optimize(BB &curBB) override;
protected:
bool DoOpt(BB &gotoBB);
bool CheckGotoBB(BB &gotoBB, std::vector<Insn *> &movInsn) const;
bool MovToCsel(std::vector<Insn *> &movInsn, std::vector<Insn *> &cselInsn, const Insn &branchInsn) const;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ICO_H */

View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_INSN_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_INSN_H
#include "aarch64_isa.h"
#include "insn.h"
#include "string_utils.h"
#include "aarch64_operand.h"
#include "common_utils.h"
namespace maplebe {
class A64OpndEmitVisitor : public OpndEmitVisitor {
public:
A64OpndEmitVisitor(Emitter &emitter, const OpndDesc *operandProp) : OpndEmitVisitor(emitter), opndProp(operandProp)
{
}
~A64OpndEmitVisitor() override
{
opndProp = nullptr;
}
void Visit(RegOperand *v) final;
void Visit(ImmOperand *v) final;
void Visit(MemOperand *v) final;
void Visit(CondOperand *v) final;
void Visit(StImmOperand *v) final;
void Visit(BitShiftOperand *v) final;
void Visit(ExtendShiftOperand *v) final;
void Visit(LabelOperand *v) final;
void Visit(FuncNameOperand *v) final;
void Visit(CommentOperand *v) final;
void Visit(OfstOperand *v) final;
void Visit(ListOperand *v) final;
private:
void EmitVectorOperand(const RegOperand &v);
void EmitIntReg(const RegOperand &v, uint8 opndSz = kMaxSimm32);
const OpndDesc *opndProp;
};
class A64OpndDumpVisitor : public OpndDumpVisitor {
public:
A64OpndDumpVisitor(const OpndDesc &operandDesc) : OpndDumpVisitor(operandDesc) {}
~A64OpndDumpVisitor() override = default;
void Visit(RegOperand *v) final;
void Visit(ImmOperand *v) final;
void Visit(MemOperand *v) final;
void Visit(ListOperand *v) final;
void Visit(CondOperand *v) final;
void Visit(StImmOperand *v) final;
void Visit(BitShiftOperand *v) final;
void Visit(ExtendShiftOperand *v) final;
void Visit(LabelOperand *v) final;
void Visit(FuncNameOperand *v) final;
void Visit(PhiOperand *v) final;
void Visit(CommentOperand *v) final;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_INSN_H */

View File

@ -0,0 +1,78 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* ARM Compiler armasm User Guide version 6.6.
* http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0473j/deb1353594352617.html
* (retrieved on 3/24/2017)
*
* $ 4.1 Registers in AArch64 state
*
* There is no register named W31 or X31.
* Depending on the instruction, register 31 is either the stack
* pointer or the zero register. When used as the stack pointer,
* you refer to it as "SP". When used as the zero register, you refer
* to it as WZR in a 32-bit context or XZR in a 64-bit context.
* The zero register returns 0 when read and discards data when
* written (e.g., when setting the status register for testing).
*/
/* ID, 32-bit prefix, 64-bit prefix, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill */
INT_REG(0 , "W", "X", true, false, true, false, false)
INT_REG(1 , "W", "X", true, false, true, false, false)
INT_REG(2 , "W", "X", true, false, true, false, false)
INT_REG(3 , "W", "X", true, false, true, false, false)
INT_REG(4 , "W", "X", true, false, true, false, false)
INT_REG(5 , "W", "X", true, false, true, false, false)
INT_REG(6 , "W", "X", true, false, true, false, false)
INT_REG(7 , "W", "X", true, false, true, false, false)
INT_REG(8 , "W", "X", true, false, false, false, false)
INT_REG(9 , "W", "X", true, false, false, false, false)
INT_REG(10, "W", "X", true, false, false, false, false)
INT_REG(11, "W", "X", true, false, false, false, false)
INT_REG(12, "W", "X", true, false, false, false, false)
INT_REG(13, "W", "X", true, false, false, false, false)
INT_REG(14, "W", "X", true, false, false, false, false)
INT_REG(15, "W", "X", true, false, false, false, true)
INT_REG(16, "W", "X", true, false, false, true, false)
INT_REG(17, "W", "X", true, false, false, true, false)
INT_REG(18, "W", "X", true, false, false, false, false)
INT_REG(19, "W", "X", true, true, false, false, false)
INT_REG(20, "W", "X", true, true, false, false, false)
INT_REG(21, "W", "X", true, true, false, false, false)
INT_REG(22, "W", "X", true, true, false, false, false)
INT_REG(23, "W", "X", true, true, false, false, false)
INT_REG(24, "W", "X", true, true, false, false, false)
INT_REG(25, "W", "X", true, true, false, false, false)
INT_REG(26, "W", "X", true, true, false, false, false)
INT_REG(27, "W", "X", true, true, false, false, false)
INT_REG(28, "W", "X", true, true, false, false, false)
INT_REG(29, "W", "X", true, true, false, false, false)
INT_REG(30, "W", "X", false, true, false, false, false)
INT_REG(31, "W", "X", false, true, false, false, false)
/*
* Refer to ARM Compiler armasm User Guide version 6.6. $4.5 Predeclared core register names in AArch64 state
* We should not use "W" prefix in 64-bit context, though!!
*/
INT_REG(SP, "W", "" , false, false, false, false, false)
INT_REG(ZR, "W", "X", false, false, false, false, false)
/* Alias ID, ID, 32-bit prefix, 64-bit prefix */
INT_REG_ALIAS(FP, 31, "", "" )
INT_REG_ALIAS(LR, 30, "", "" )
/* R19 is reserved for yieldpoint */
INT_REG_ALIAS(YP, 19, "", "" )
INT_REG_ALIAS(LAST_INT_REG, 31, "", "" )

View File

@ -0,0 +1,181 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISA_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISA_H
#include "isa.h"
#define DEFINE_MOP(op, ...) op,
enum AArch64MopT : maple::uint32 {
#include "aarch64_md.def"
kMopLast
};
#undef DEFINE_MOP
namespace maplebe {
/*
* ARM Architecture Reference Manual (for ARMv8)
* D1.8.2
*/
constexpr int kAarch64StackPtrAlignment = 16;
constexpr int32 kOffsetAlign = 8;
constexpr uint32 kIntregBytelen = 8; /* 64-bit */
constexpr uint32 kFpregBytelen = 8; /* only lower 64 bits are used */
constexpr int kSizeOfFplr = 16;
enum StpLdpImmBound : int {
kStpLdpImm64LowerBound = -512,
kStpLdpImm64UpperBound = 504,
kStpLdpImm32LowerBound = -256,
kStpLdpImm32UpperBound = 252
};
enum StrLdrPerPostBound : int64 { kStrLdrPerPostLowerBound = -256, kStrLdrPerPostUpperBound = 255 };
constexpr int64 kStrAllLdrAllImmLowerBound = 0;
enum StrLdrImmUpperBound : int64 {
kStrLdrImm32UpperBound = 16380, /* must be a multiple of 4 */
kStrLdrImm64UpperBound = 32760, /* must be a multiple of 8 */
kStrbLdrbImmUpperBound = 4095,
kStrhLdrhImmUpperBound = 8190
};
/*
* ARM Compiler armasm User Guide version 6.6.
* http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.dui0473j/deb1353594352617.html
* (retrieved on 3/24/2017)
*
* $ 4.1 Registers in AArch64 state
* ...When you use the 32-bit form of an instruction, the upper
* 32 bits of the source registers are ignored and
* the upper 32 bits of the destination register are set to zero.
* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*
* There is no register named W31 or X31.
* Depending on the instruction, register 31 is either the stack
* pointer or the zero register. When used as the stack pointer,
* you refer to it as "SP". When used as the zero register, you refer
* to it as WZR in a 32-bit context or XZR in a 64-bit context.
* The zero register returns 0 when read and discards data when
* written (e.g., when setting the status register for testing).
*/
enum AArch64reg : uint32 {
kRinvalid = kInvalidRegNO,
/* integer registers */
#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) R##ID,
#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64)
#include "aarch64_int_regs.def"
#undef INT_REG
#undef INT_REG_ALIAS
/* fp-simd registers */
#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill) V##ID,
#define FP_SIMD_REG_ALIAS(ID)
#include "aarch64_fp_simd_regs.def"
#undef FP_SIMD_REG
#undef FP_SIMD_REG_ALIAS
kMaxRegNum,
kRFLAG,
kAllRegNum,
/* alias */
#define INT_REG(ID, PREF32, PREF64, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill)
#define INT_REG_ALIAS(ALIAS, ID, PREF32, PREF64) R##ALIAS = R##ID,
#include "aarch64_int_regs.def"
#undef INT_REG
#undef INT_REG_ALIAS
#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill)
#define FP_SIMD_REG_ALIAS(ID) S##ID = V##ID,
#include "aarch64_fp_simd_regs.def"
#undef FP_SIMD_REG
#undef FP_SIMD_REG_ALIAS
#define FP_SIMD_REG(ID, PV, P8, P16, P32, P64, P128, canBeAssigned, isCalleeSave, isParam, isSpill, isExtraSpill)
#define FP_SIMD_REG_ALIAS(ID) D##ID = V##ID,
#include "aarch64_fp_simd_regs.def"
#undef FP_SIMD_REG
#undef FP_SIMD_REG_ALIAS
};
class Insn;
namespace AArch64isa {
static inline bool IsGPRegister(AArch64reg r)
{
return R0 <= r && r <= RZR;
}
static inline bool IsFPSIMDRegister(AArch64reg r)
{
return V0 <= r && r <= V31;
}
static inline bool IsPhysicalRegister(regno_t r)
{
return r < kMaxRegNum;
}
static inline RegType GetRegType(AArch64reg r)
{
if (IsGPRegister(r)) {
return kRegTyInt;
}
if (IsFPSIMDRegister(r)) {
return kRegTyFloat;
}
DEBUG_ASSERT(false, "No suitable register type to return?");
return kRegTyUndef;
}
enum MemoryOrdering : uint32 {
kMoNone = 0,
kMoAcquire = 1ULL, /* ARMv8 */
kMoAcquireRcpc = (1ULL << 1), /* ARMv8.3 */
kMoLoacquire = (1ULL << 2), /* ARMv8.1 */
kMoRelease = (1ULL << 3), /* ARMv8 */
kMoLorelease = (1ULL << 4) /* ARMv8.1 */
};
static inline bool IsPseudoInstruction(MOperator mOp)
{
return (mOp >= MOP_pseudo_param_def_x && mOp <= MOP_pseudo_eh_def_x);
}
/*
* Precondition: The given insn is a jump instruction.
* Get the jump target label operand index from the given instruction.
* Note: MOP_xbr is a jump instruction, but the target is unknown at compile time,
* because a register instead of label. So we don't take it as a branching instruction.
* However for special long range branch patch, the label is installed in this case.
*/
uint32 GetJumpTargetIdx(const Insn &insn);
MOperator FlipConditionOp(MOperator flippedOp);
} /* namespace AArch64isa */
/*
* We save callee-saved registers from lower stack area to upper stack area.
* If possible, we store a pair of registers (int/int and fp/fp) in the stack.
* The Stack Pointer has to be aligned at 16-byte boundary.
* On AArch64, kIntregBytelen == 8 (see the above)
*/
inline void GetNextOffsetCalleeSaved(int &offset)
{
offset += (kIntregBytelen << 1);
}
MOperator GetMopPair(MOperator mop);
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_ISA_H */

View File

@ -0,0 +1,33 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_LIVE_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_LIVE_H
#include "live.h"
namespace maplebe {
class AArch64LiveAnalysis : public LiveAnalysis {
public:
AArch64LiveAnalysis(CGFunc &func, MemPool &memPool) : LiveAnalysis(func, memPool) {}
~AArch64LiveAnalysis() override = default;
bool CleanupBBIgnoreReg(regno_t reg) override;
void InitEhDefine(BB &bb) override;
void GenerateReturnBBDefUse(BB &bb) const override;
void ProcessCallInsnParam(BB &bb, const Insn &insn) const override;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_LIVE_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,226 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MEMLAYOUT_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MEMLAYOUT_H
#include "memlayout.h"
#include "aarch64_abi.h"
namespace maplebe {
class AArch64SymbolAlloc : public SymbolAlloc {
public:
AArch64SymbolAlloc() = default;
~AArch64SymbolAlloc() = default;
void SetRegisters(AArch64reg r0, AArch64reg r1, AArch64reg r2, AArch64reg r3)
{
reg0 = r0;
reg1 = r1;
reg2 = r2;
reg3 = r3;
}
inline bool IsRegister() const
{
return reg0 != kRinvalid;
}
private:
AArch64reg reg0 = kRinvalid;
AArch64reg reg1 = kRinvalid;
AArch64reg reg2 = kRinvalid;
AArch64reg reg3 = kRinvalid;
};
/*
* On AArch64, stack frames are structured as follows:
*
* The stack grows downward -- full descending (SP points
* to a filled slot).
*
* Any of the parts of a frame is optional, i.e., it is
* possible to write a caller-callee pair in such a way
* that the particular part is absent in the frame.
*
* Before a call is made, the frame looks like:
* | |
* ||----------------------------|
* | args passed on the stack | (we call them up-formals)
* ||----------------------------|<- Stack Pointer
* | |
*
* V1.
* Right after a call is made
* | |
* ||----------------------------|
* | args passed on the stack |
* ||----------------------------|<- Stack Pointer
* | PREV_FP, PREV_LR |
* ||----------------------------|<- Frame Pointer
*
* After the prologue has run,
* | |
* ||----------------------------|
* | args passed on the stack |
* ||----------------------------|
* | PREV_FP, PREV_LR |
* ||----------------------------|<- Frame Pointer
* | callee-saved registers |
* ||----------------------------|
* | empty space. should have |
* | at least 16-byte alignment |
* ||----------------------------|
* | local variables |
* ||----------------------------|
* | variable-sized local vars |
* | (VLAs) |
* ||----------------------------|<- Stack Pointer
*
* callee-saved registers include
* 1. R19-R28
* 2. R8 if return value needs to be returned
* thru memory and callee wants to use R8
* 3. we don't need to save R19 if it is used
* as base register for PIE.
* 4. V8-V15
*
* V2. (this way, we may be able to save
* on SP modifying instruction)
* Right after a call is made
* | |
* ||----------------------------|
* | args passed on the stack |
* ||----------------------------|<- Stack Pointer
* | |
* | empty space |
* | |
* ||----------------------------|
* | PREV_FP, PREV_LR |
* ||----------------------------|<- Frame Pointer
*
* After the prologue has run,
* | |
* ||----------------------------|
* | args passed on the stack |
* ||----------------------------|
* | callee-saved registers |
* | including those used for |
* | parameter passing |
* ||----------------------------|
* | empty space. should have |
* | at least 16-byte alignment |
* ||----------------------------|
* | local variables |
* ||----------------------------|
* | PREV_FP, PREV_LR |
* ||----------------------------|<- Frame Pointer
* | variable-sized local vars |
* | (VLAs) |
* ||----------------------------|
* | args to pass through stack |
* ||----------------------------|
*/
class AArch64MemLayout : public MemLayout {
public:
AArch64MemLayout(BECommon &b, MIRFunction &f, MapleAllocator &mallocator)
: MemLayout(b, f, mallocator, kAarch64StackPtrAlignment)
{
}
~AArch64MemLayout() override = default;
/*
* Returns stack space required for a call
* which is used to pass arguments that cannot be
* passed through registers
*/
uint32 ComputeStackSpaceRequirementForCall(StmtNode &stmt, int32 &aggCopySize, bool isIcall) override;
void LayoutStackFrame(int32 &structCopySize, int32 &maxParmStackSize) override;
void AssignSpillLocationsToPseudoRegisters() override;
SymbolAlloc *AssignLocationToSpillReg(regno_t vrNum) override;
uint64 StackFrameSize() const;
uint32 RealStackFrameSize() const;
const MemSegment &locals() const
{
return segLocals;
}
uint32 GetSizeOfSpillReg() const
{
return segSpillReg.GetSize();
}
uint32 GetSizeOfLocals() const
{
return segLocals.GetSize();
}
void SetSizeOfGRSaveArea(uint32 sz)
{
segGrSaveArea.SetSize(sz);
}
uint32 GetSizeOfGRSaveArea() const
{
return segGrSaveArea.GetSize();
}
inline void SetSizeOfVRSaveArea(uint32 sz)
{
segVrSaveArea.SetSize(sz);
}
uint32 GetSizeOfVRSaveArea() const
{
return segVrSaveArea.GetSize();
}
uint32 GetSizeOfRefLocals() const
{
return segRefLocals.GetSize();
}
int32 GetRefLocBaseLoc() const;
int32 GetGRSaveAreaBaseLoc();
int32 GetVRSaveAreaBaseLoc();
private:
MemSegment segRefLocals = MemSegment(kMsRefLocals);
/* callee saved register R19-R28 (10) */
MemSegment segSpillReg = MemSegment(kMsSpillReg);
MemSegment segLocals = MemSegment(kMsLocals); /* these are accessed via Frame Pointer */
MemSegment segGrSaveArea = MemSegment(kMsGrSaveArea);
MemSegment segVrSaveArea = MemSegment(kMsVrSaveArea);
int32 fixStackSize = 0;
void SetSizeAlignForTypeIdx(uint32 typeIdx, uint32 &size, uint32 &align) const;
void SetSegmentSize(AArch64SymbolAlloc &symbolAlloc, MemSegment &segment, uint32 typeIdx) const;
void LayoutVarargParams();
void LayoutFormalParams();
void LayoutActualParams();
void LayoutLocalVariables(std::vector<MIRSymbol *> &tempVar, std::vector<MIRSymbol *> &returnDelays);
void LayoutEAVariales(std::vector<MIRSymbol *> &tempVar);
void LayoutReturnRef(std::vector<MIRSymbol *> &returnDelays, int32 &structCopySize, int32 &maxParmStackSize);
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_MEMLAYOUT_H */

View File

@ -0,0 +1,210 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OBJ_EMIT_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OBJ_EMIT_H
#include "obj_emit.h"
#include "aarch64_insn.h"
#include "aarch64_cg.h"
namespace maplebe {
enum AArch64FixupKind {
kAArch64PCRelAdrImm21 = kFirstTargetFixupKind,
kAArch64PCRelAdrpImm21,
kAArch64LoadPCRelImm19,
kAArch64CondBranchPCRelImm19,
kAArch64UnCondBranchPCRelImm26,
kAArch64CompareBranchPCRelImm19,
kAArch64TestBranchPCRelImm14,
kAArch64CallPCRelImm26,
kAArch64AddPCRelLo12,
kAArch64LdrPCRelLo12,
};
class AArch64ObjFuncEmitInfo : public ObjFuncEmitInfo {
public:
AArch64ObjFuncEmitInfo(CGFunc &func, MemPool &memPool) : ObjFuncEmitInfo(func, memPool) {}
~AArch64ObjFuncEmitInfo() = default;
void HandleLocalBranchFixup(const std::vector<uint32> &label2Offset,
const std::vector<uint32> &symbol2Offset) override;
};
class AArch64ObjEmitter : public ObjEmitter {
public:
AArch64ObjEmitter(CG &cg, const std::string &objFileName) : ObjEmitter(cg, objFileName) {}
~AArch64ObjEmitter() = default;
void EncodeInstruction(const Insn &insn, const std::vector<uint32> &label2Offset,
ObjFuncEmitInfo &objFuncEmitInfo) override
{
uint32 binInsn = GetBinaryCodeForInsn(insn, label2Offset, objFuncEmitInfo);
objFuncEmitInfo.AppendTextData(binInsn, k4ByteSize);
if (insn.GetMachineOpcode() == MOP_xbl || insn.GetMachineOpcode() == MOP_xblr) {
if (insn.GetStackMap() == nullptr) {
return;
}
objFuncEmitInfo.RecordOffset2StackMapInfo(objFuncEmitInfo.GetTextDataSize(),
insn.GetStackMap()->GetReferenceMap().SerializeInfo(),
insn.GetStackMap()->GetDeoptInfo().SerializeInfo());
}
}
uint32 GetInsnSize(const Insn &insn) const override
{
(void)insn;
return k4ByteSize;
}
FuncEmitInfo &CreateFuncEmitInfo(CGFunc &cgFunc)
{
MemPool *memPool = cgFunc.GetCG()->GetMIRModule()->GetMemPool();
AArch64ObjFuncEmitInfo *content = memPool->New<AArch64ObjFuncEmitInfo>(cgFunc, *memPool);
contents.insert(contents.begin() + cgFunc.GetFunction().GetPuidxOrigin(), content);
return *content;
}
void HandleTextSectionGlobalFixup() override;
void HandleTextSectionFixup();
void AppendTextSectionData() override;
void AppendGlobalLabel() override;
void AppendSymsToSymTabSec() override;
void InitSections() override;
void LayoutSections() override;
void UpdateMachineAndFlags(FileHeader &header) override;
void EmitDataToDynamic();
void EmitDataToHash();
void EmitIntrinsicInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) override;
void EmitSpinIntrinsicInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) override;
uint32 GetBinaryCodeForInsn(const Insn &insn, const std::vector<uint32> &label2Offset,
ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GetOpndMachineValue(const Operand &opnd) const;
uint32 GetAdrLabelOpndValue(const Insn &insn, const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GetLoadLiteralOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GetCondBranchOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GetUnCondBranchOpndValue(const Operand &opnd, const std::vector<uint32> &label2Offset,
ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GetCallFuncOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GetTestBranchOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GetCompareBranchOpndValue(const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GetLo12LitrealOpndValue(MOperator mOp, const Operand &opnd, ObjFuncEmitInfo &objFuncEmitInfo) const;
void InsertNopInsn(ObjFuncEmitInfo &objFuncEmitInfo) const override;
private:
uint32 GenAddSubExtendRegInsn(const Insn &insn) const;
uint32 GenAddSubImmInsn(const Insn &insn) const;
uint32 GenAddSubShiftImmInsn(const Insn &insn) const;
uint32 GenAddSubRegInsn(const Insn &insn) const;
uint32 GenAddSubShiftRegInsn(const Insn &insn) const;
uint32 GenBitfieldInsn(const Insn &insn) const;
uint32 GenExtractInsn(const Insn &insn) const;
uint32 GenBranchImmInsn(const Insn &insn, const std::vector<uint32> &label2Offset,
ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GenBranchRegInsn(const Insn &insn) const;
uint32 GenCompareBranchInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GenCondCompareImmInsn(const Insn &insn) const;
uint32 GenCondCompareRegInsn(const Insn &insn) const;
uint32 GenConditionalSelectInsn(const Insn &insn) const;
uint32 GenDataProcess1SrcInsn(const Insn &insn) const;
uint32 GenDataProcess2SrcInsn(const Insn &insn) const;
uint32 GenDataProcess3SrcInsn(const Insn &insn) const;
uint32 GenFloatIntConversionsInsn(const Insn &insn) const;
uint32 GenFloatCompareInsn(const Insn &insn) const;
uint32 GenFloatDataProcessing1Insn(const Insn &insn) const;
uint32 GenFloatDataProcessing2Insn(const Insn &insn) const;
uint32 GenFloatImmInsn(const Insn &insn) const;
uint32 GenFloatCondSelectInsn(const Insn &insn) const;
uint32 GenLoadStoreModeLiteral(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GenLoadStoreModeBOi(const Insn &insn) const;
uint32 GenLoadStoreModeBOrX(const Insn &insn) const;
uint32 GenLoadStoreRegInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GenLoadStoreARInsn(const Insn &insn) const;
uint32 GenLoadExclusiveInsn(const Insn &insn) const;
uint32 GenLoadExclusivePairInsn(const Insn &insn) const;
uint32 GenStoreExclusiveInsn(const Insn &insn) const;
uint32 GenStoreExclusivePairInsn(const Insn &insn) const;
uint32 GenLoadPairInsn(const Insn &insn) const;
uint32 GenStorePairInsn(const Insn &insn) const;
uint32 GenLoadStoreFloatInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GenLoadPairFloatInsn(const Insn &insn) const;
uint32 GenStorePairFloatInsn(const Insn &insn) const;
uint32 GenLoadLiteralRegInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GenLogicalRegInsn(const Insn &insn) const;
uint32 GenLogicalImmInsn(const Insn &insn) const;
uint32 GenMoveWideInsn(const Insn &insn) const;
uint32 GenPCRelAddrInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GenAddPCRelAddrInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GenSystemInsn(const Insn &insn) const;
uint32 GenTestBranchInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GenCondBranchInsn(const Insn &insn, ObjFuncEmitInfo &objFuncEmitInfo) const;
uint32 GenMovReg(const Insn &insn) const;
uint32 GenMovImm(const Insn &insn) const;
uint32 EncodeLogicaImm(uint64 imm, uint32 size) const;
void HandleCallFixup(ObjFuncEmitInfo &objFuncEmitInfo, const Fixup &fixup);
void HandleAdrFixup(ObjFuncEmitInfo &objFuncEmitInfo, const Fixup &fixup);
void HandleLSDAFixup(ObjFuncEmitInfo &objFuncEmitInfo, const Fixup &fixup);
/* emit intrinsic insn */
void EmitMCCStackMapCall(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitEnv(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitClinit(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitCounter(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitLazyLoad(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitLazyLoadStatic(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitAdrpLdr(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitArrayClassCacheLoad(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitClinitTail(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitGetAndAddInt(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitGetAndSetInt(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitCompareAndSwapInt(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitStringIndexOf(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitStringIndexOf2(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitStringIndexOf3(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitCheckCastNoArray(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitCheckCastIsAssignable(const Insn &insn, std::vector<uint32> &label2Offset,
ObjFuncEmitInfo &objFuncEmitInfo);
void EmitCheckCastNoSubIsAssignable(const Insn &insn, std::vector<uint32> &label2Offset,
ObjFuncEmitInfo &objFuncEmitInfo);
void EmitInstanceOfIsAssignable(const Insn &insn, std::vector<uint32> &label2Offset,
ObjFuncEmitInfo &objFuncEmitInfo);
void EmitInstanceOfNoSubIsAssignable(const Insn &insn, std::vector<uint32> &label2Offset,
ObjFuncEmitInfo &objFuncEmitInfo);
void EmitMovMovkri16(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitMovMovk64ri16(const Insn &insn, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo);
void EmitInsn(MOperator mOp, Operand &opnd1, std::vector<uint32> &label2Offset, ObjFuncEmitInfo &objFuncEmitInfo)
{
Insn &insn = objFuncEmitInfo.GetCGFunc().GetInsnBuilder()->BuildInsn(mOp, opnd1);
EncodeInstruction(insn, label2Offset, objFuncEmitInfo);
}
void EmitInsn(MOperator mOp, Operand &opnd1, Operand &opnd2, std::vector<uint32> &label2Offset,
ObjFuncEmitInfo &objFuncEmitInfo)
{
Insn &insn = objFuncEmitInfo.GetCGFunc().GetInsnBuilder()->BuildInsn(mOp, opnd1, opnd2);
EncodeInstruction(insn, label2Offset, objFuncEmitInfo);
}
void EmitInsn(MOperator mOp, Operand &opnd1, Operand &opnd2, Operand &opnd3, std::vector<uint32> &label2Offset,
ObjFuncEmitInfo &objFuncEmitInfo)
{
Insn &insn = objFuncEmitInfo.GetCGFunc().GetInsnBuilder()->BuildInsn(mOp, opnd1, opnd2, opnd3);
EncodeInstruction(insn, label2Offset, objFuncEmitInfo);
}
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OBJ_EMIT_H */

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OFFSET_ADJUST_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OFFSET_ADJUST_H
#include "offset_adjust.h"
#include "aarch64_cgfunc.h"
namespace maplebe {
using namespace maple;
class AArch64FPLROffsetAdjustment : public FrameFinalize {
public:
explicit AArch64FPLROffsetAdjustment(CGFunc &func) : FrameFinalize(func) {}
~AArch64FPLROffsetAdjustment() override = default;
void Run() override;
private:
void AdjustmentOffsetForOpnd(Insn &insn, AArch64CGFunc &aarchCGFunc);
void AdjustmentOffsetForImmOpnd(Insn &insn, uint32 index, AArch64CGFunc &aarchCGFunc) const;
void AdjustmentOffsetForFPLR();
/* frame pointer(x29) is available as a general-purpose register if useFP is set as false */
void AdjustmentStackPointer(Insn &insn, AArch64CGFunc &aarchCGFunc);
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OFFSET_ADJUST_H */

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPERAND_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPERAND_H
#include <limits>
#include <string>
#include <iomanip>
#include "aarch64_isa.h"
#include "operand.h"
#include "cg.h"
#include "emit.h"
#include "common_utils.h"
namespace std {
template <> /* function-template-specialization */
class std::hash<maplebe::MemOperand> {
public:
size_t operator()(const maplebe::MemOperand &x) const
{
std::size_t seed = 0;
hash_combine<uint8_t>(seed, x.GetAddrMode());
hash_combine<uint32_t>(seed, x.GetSize());
maplebe::RegOperand *xb = x.GetBaseRegister();
maplebe::RegOperand *xi = x.GetIndexRegister();
if (xb != nullptr) {
hash_combine<uint32_t>(seed, xb->GetRegisterNumber());
hash_combine<uint32_t>(seed, xb->GetSize());
}
if (xi != nullptr) {
hash_combine<uint32_t>(seed, xi->GetRegisterNumber());
hash_combine<uint32_t>(seed, xi->GetSize());
}
return seed;
}
};
} // namespace std
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPERAND_H */

View File

@ -0,0 +1,44 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPTIMIZE_COMMON_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPTIMIZE_COMMON_H
#include "aarch64_isa.h"
#include "optimize_common.h"
namespace maplebe {
using namespace maple;
class AArch64InsnVisitor : public InsnVisitor {
public:
explicit AArch64InsnVisitor(CGFunc &func) : InsnVisitor(func) {}
~AArch64InsnVisitor() override = default;
void ModifyJumpTarget(maple::LabelIdx targetLabel, BB &bb) override;
void ModifyJumpTarget(Operand &targetOperand, BB &bb) override;
void ModifyJumpTarget(BB &newTarget, BB &bb) override;
/* Check if it requires to add extra gotos when relocate bb */
Insn *CloneInsn(Insn &originalInsn) override;
LabelIdx GetJumpLabel(const Insn &insn) const override;
bool IsCompareInsn(const Insn &insn) const override;
bool IsCompareAndBranchInsn(const Insn &insn) const override;
bool IsAddOrSubInsn(const Insn &insn) const override;
RegOperand *CreateVregFromReg(const RegOperand &pReg) override;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_OPTIMIZE_COMMON_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,72 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
ADDTARGETPHASE("layoutstackframe", true);
ADDTARGETPHASE("createstartendlabel", true);
ADDTARGETPHASE("buildehfunc", true);
ADDTARGETPHASE("handlefunction", true);
ADDTARGETPHASE("moveargs", true);
if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) {
/* SSA PHASES */
ADDTARGETPHASE("cgssaconstruct", CGOptions::DoCGSSA());
ADDTARGETPHASE("cgcopyprop", CGOptions::DoCGSSA());
ADDTARGETPHASE("cgpeephole", CGOptions::DoCGSSA());
ADDTARGETPHASE("cgvalidbitopt", CGOptions::DoCGSSA());
ADDTARGETPHASE("cgtargetprop", CGOptions::DoCGSSA());
ADDTARGETPHASE("cgdeadcodeelimination", CGOptions::DoCGSSA());
ADDTARGETPHASE("cgsplitcriticaledge", CGOptions::DoCGSSA());
ADDTARGETPHASE("cgphielimination", CGOptions::DoCGSSA());
ADDTARGETPHASE("cgregcoalesce", CGOptions::DoCGSSA());
}
/* Normal OPT PHASES */
ADDTARGETPHASE("cgprepeephole", CGOptions::DoPrePeephole());
ADDTARGETPHASE("ebo", CGOptions::DoEBO());
ADDTARGETPHASE("prepeephole", CGOptions::DoPrePeephole())
ADDTARGETPHASE("ico", CGOptions::DoICO())
ADDTARGETPHASE("cfgo", !GetMIRModule()->IsCModule() && CGOptions::DoCFGO());
if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) {
ADDTARGETPHASE("storeloadopt", CGOptions::DoStoreLoadOpt() && !CGOptions::DoCGSSA());
ADDTARGETPHASE("globalopt", CGOptions::DoGlobalOpt());
}
ADDTARGETPHASE("clearrdinfo", (CGOptions::DoStoreLoadOpt()) || CGOptions::DoGlobalOpt());
ADDTARGETPHASE("prepeephole1", CGOptions::DoPrePeephole());
ADDTARGETPHASE("ebo1", CGOptions::DoEBO());
ADDTARGETPHASE("prescheduling", !GetMIRModule()->IsJavaModule() && CGOptions::DoPreSchedule());
ADDTARGETPHASE("raopt", CGOptions::DoPreLSRAOpt());
ADDTARGETPHASE("cgsplitcriticaledge", GetMIRModule()->IsCModule());
ADDTARGETPHASE("regalloc", true);
ADDTARGETPHASE("regsaves", GetMIRModule()->IsCModule() && CGOptions::DoRegSavesOpt());
if (GetMIRModule()->GetFlavor() != MIRFlavor::kFlavorLmbc) {
ADDTARGETPHASE("storeloadopt", GetMIRModule()->IsCModule() && CGOptions::DoStoreLoadOpt());
ADDTARGETPHASE("globalopt", CGOptions::DoCGSSA());
}
ADDTARGETPHASE("clearrdinfo", GetMIRModule()->IsCModule() && (CGOptions::DoStoreLoadOpt() || CGOptions::DoGlobalOpt()));
ADDTARGETPHASE("generateproepilog", true);
ADDTARGETPHASE("framefinalize", true);
ADDTARGETPHASE("dbgfixcallframeoffsets", true);
ADDTARGETPHASE("cfgo", GetMIRModule()->IsCModule() && CGOptions::DoCFGO());
ADDTARGETPHASE("peephole0", CGOptions::DoPeephole())
ADDTARGETPHASE("postebo", CGOptions::DoEBO());
ADDTARGETPHASE("postcfgo", CGOptions::DoCFGO());
ADDTARGETPHASE("cgpostpeephole", CGOptions::DoPeephole())
ADDTARGETPHASE("peephole", CGOptions::DoPeephole())
ADDTARGETPHASE("gencfi", !GetMIRModule()->IsCModule() || GetMIRModule()->IsWithDbgInfo());
ADDTARGETPHASE("yieldpoint", GetMIRModule()->IsJavaModule() && CGOptions::IsInsertYieldPoint());
ADDTARGETPHASE("scheduling", CGOptions::DoSchedule());
ADDTARGETPHASE("alignanalysis", GetMIRModule()->IsCModule() && CGOptions::DoAlignAnalysis());
ADDTARGETPHASE("fixshortbranch", true);
ADDTARGETPHASE("cgemit", true);

View File

@ -0,0 +1,51 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_CG_INCLUDE_AARCH64_PHI_ELIMINATION_H
#define MAPLEBE_CG_INCLUDE_AARCH64_PHI_ELIMINATION_H
#include "cg_phi_elimination.h"
namespace maplebe {
class AArch64PhiEliminate : public PhiEliminate {
public:
AArch64PhiEliminate(CGFunc &f, CGSSAInfo &ssaAnalysisResult, MemPool &mp) : PhiEliminate(f, ssaAnalysisResult, mp)
{
}
~AArch64PhiEliminate() override = default;
RegOperand &GetCGVirtualOpearnd(RegOperand &ssaOpnd, const Insn &curInsn /* for remat */);
private:
void ReCreateRegOperand(Insn &insn) override;
Insn &CreateMov(RegOperand &destOpnd, RegOperand &fromOpnd) override;
void MaintainRematInfo(RegOperand &destOpnd, RegOperand &fromOpnd, bool isCopy) override;
RegOperand &CreateTempRegForCSSA(RegOperand &oriOpnd) override;
void AppendMovAfterLastVregDef(BB &bb, Insn &movInsn) const override;
};
class A64OperandPhiElmVisitor : public OperandPhiElmVisitor {
public:
A64OperandPhiElmVisitor(AArch64PhiEliminate *a64PhiElm, Insn &cInsn, uint32 idx)
: a64PhiEliminator(a64PhiElm), insn(&cInsn), idx(idx) {};
~A64OperandPhiElmVisitor() override = default;
void Visit(RegOperand *v) final;
void Visit(ListOperand *v) final;
void Visit(MemOperand *v) final;
private:
AArch64PhiEliminate *a64PhiEliminator;
Insn *insn;
uint32 idx;
};
} // namespace maplebe
#endif // MAPLEBE_CG_INCLUDE_AARCH64_PHI_ELIMINATION_H

View File

@ -0,0 +1,120 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PROEPILOG_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PROEPILOG_H
#include "proepilog.h"
#include "cg.h"
#include "operand.h"
#include "aarch64_cgfunc.h"
#include "aarch64_operand.h"
#include "aarch64_insn.h"
namespace maplebe {
using namespace maple;
class AArch64GenProEpilog : public GenProEpilog {
public:
AArch64GenProEpilog(CGFunc &func, MemPool &memPool)
: GenProEpilog(func), tmpAlloc(&memPool), exitBB2CallSitesMap(tmpAlloc.Adapter())
{
useFP = func.UseFP();
if (func.GetMirModule().GetFlavor() == MIRFlavor::kFlavorLmbc) {
stackBaseReg = RFP;
} else {
stackBaseReg = useFP ? R29 : RSP;
}
exitBB2CallSitesMap.clear();
}
~AArch64GenProEpilog() override = default;
bool TailCallOpt() override;
bool NeedProEpilog() override;
static MemOperand *SplitStpLdpOffsetForCalleeSavedWithAddInstruction(CGFunc &cgFunc, const MemOperand &mo,
uint32 bitLen,
AArch64reg baseReg = AArch64reg::kRinvalid);
static void AppendInstructionPushPair(CGFunc &cgFunc, AArch64reg reg0, AArch64reg reg1, RegType rty, int offset);
static void AppendInstructionPushSingle(CGFunc &cgFunc, AArch64reg reg, RegType rty, int offset);
static void AppendInstructionPopSingle(CGFunc &cgFunc, AArch64reg reg, RegType rty, int offset);
static void AppendInstructionPopPair(CGFunc &cgFunc, AArch64reg reg0, AArch64reg reg1, RegType rty, int offset);
void Run() override;
private:
void GenStackGuard(BB &);
BB &GenStackGuardCheckInsn(BB &);
bool HasLoop();
bool OptimizeTailBB(BB &bb, MapleSet<Insn *> &callInsns, const BB &exitBB) const;
void TailCallBBOpt(BB &bb, MapleSet<Insn *> &callInsns, BB &exitBB);
bool InsertOpndRegs(Operand &opnd, std::set<regno_t> &vecRegs) const;
bool InsertInsnRegs(Insn &insn, bool insetSource, std::set<regno_t> &vecSourceRegs, bool insertTarget,
std::set<regno_t> &vecTargetRegs);
bool FindRegs(Operand &insn, std::set<regno_t> &vecRegs) const;
bool BackwardFindDependency(BB &ifbb, std::set<regno_t> &vecReturnSourceReg, std::list<Insn *> &existingInsns,
std::list<Insn *> &moveInsns);
BB *IsolateFastPath(BB &);
void AppendInstructionAllocateCallFrame(AArch64reg reg0, AArch64reg reg1, RegType rty);
void AppendInstructionAllocateCallFrameDebug(AArch64reg reg0, AArch64reg reg1, RegType rty);
void GeneratePushRegs();
void GeneratePushUnnamedVarargRegs();
void AppendInstructionStackCheck(AArch64reg reg, RegType rty, int offset);
void GenerateProlog(BB &);
void GenerateRet(BB &bb);
bool TestPredsOfRetBB(const BB &exitBB);
void AppendInstructionDeallocateCallFrame(AArch64reg reg0, AArch64reg reg1, RegType rty);
void AppendInstructionDeallocateCallFrameDebug(AArch64reg reg0, AArch64reg reg1, RegType rty);
void GeneratePopRegs();
void AppendJump(const MIRSymbol &func);
void GenerateEpilog(BB &);
void GenerateEpilogForCleanup(BB &);
void ConvertToTailCalls(MapleSet<Insn *> &callInsnsMap);
Insn &CreateAndAppendInstructionForAllocateCallFrame(int64 argsToStkPassSize, AArch64reg reg0, AArch64reg reg1,
RegType rty);
Insn &AppendInstructionForAllocateOrDeallocateCallFrame(int64 argsToStkPassSize, AArch64reg reg0, AArch64reg reg1,
RegType rty, bool isAllocate);
MapleMap<BB *, MapleSet<Insn *>> &GetExitBB2CallSitesMap()
{
return exitBB2CallSitesMap;
}
void SetCurTailcallExitBB(BB *bb)
{
curTailcallExitBB = bb;
}
BB *GetCurTailcallExitBB()
{
return curTailcallExitBB;
}
void SetFastPathReturnBB(BB *bb)
{
fastPathReturnBB = bb;
}
BB *GetFastPathReturnBB()
{
return fastPathReturnBB;
}
MapleAllocator tmpAlloc;
static constexpr const int32 kOffset8MemPos = 8;
static constexpr const int32 kOffset16MemPos = 16;
MapleMap<BB *, MapleSet<Insn *>> exitBB2CallSitesMap;
BB *curTailcallExitBB = nullptr;
BB *fastPathReturnBB = nullptr;
bool useFP = true;
/* frame pointer(x29) is available as a general-purpose register if useFP is set as false */
AArch64reg stackBaseReg = RFP;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_PROEPILOG_H */

View File

@ -0,0 +1,411 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_AARCH64_PROP_H
#define MAPLEBE_INCLUDE_AARCH64_PROP_H
#include "cg_prop.h"
#include "aarch64_cgfunc.h"
#include "aarch64_strldr.h"
namespace maplebe {
class AArch64Prop : public CGProp {
public:
AArch64Prop(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo, LiveIntervalAnalysis &ll) : CGProp(mp, f, sInfo, ll) {}
~AArch64Prop() override = default;
/* do not extend life range */
static bool IsInLimitCopyRange(VRegVersion *toBeReplaced);
private:
void CopyProp() override;
/*
* for aarch64
* 1. extended register prop
* 2. shift register prop
* 3. add/ext/shf prop -> str/ldr
* 4. const prop
*/
void TargetProp(Insn &insn) override;
void PropPatternOpt() override;
};
class A64StrLdrProp {
public:
A64StrLdrProp(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo, Insn &insn, CGDce &dce)
: cgFunc(&f),
ssaInfo(&sInfo),
curInsn(&insn),
a64StrLdrAlloc(&mp),
replaceVersions(a64StrLdrAlloc.Adapter()),
cgDce(&dce)
{
}
void DoOpt();
private:
MemOperand *StrLdrPropPreCheck(const Insn &insn, MemPropMode prevMod = kUndef);
static MemPropMode SelectStrLdrPropMode(const MemOperand &currMemOpnd);
bool ReplaceMemOpnd(const MemOperand &currMemOpnd, const Insn *defInsn);
MemOperand *SelectReplaceMem(const Insn &defInsn, const MemOperand &currMemOpnd);
RegOperand *GetReplaceReg(RegOperand &a64Reg);
MemOperand *HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal, uint32 memSize) const;
MemOperand *SelectReplaceExt(const Insn &defInsn, RegOperand &base, uint32 amount, bool isSigned, uint32 memSize);
bool CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx) const;
void DoMemReplace(const RegOperand &replacedReg, MemOperand &newMem, Insn &useInsn);
uint32 GetMemOpndIdx(MemOperand *newMemOpnd, const Insn &insn) const;
bool CheckSameReplace(const RegOperand &replacedReg, const MemOperand *memOpnd) const;
CGFunc *cgFunc;
CGSSAInfo *ssaInfo;
Insn *curInsn;
MapleAllocator a64StrLdrAlloc;
MapleMap<regno_t, VRegVersion *> replaceVersions;
MemPropMode memPropMode = kUndef;
CGDce *cgDce = nullptr;
};
enum ArithmeticType { kAArch64Add, kAArch64Sub, kAArch64Orr, kAArch64Eor, kUndefArith };
class A64ConstProp {
public:
A64ConstProp(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo, Insn &insn)
: constPropMp(&mp), cgFunc(&f), ssaInfo(&sInfo), curInsn(&insn)
{
}
void DoOpt();
/* false : default lsl #0 true: lsl #12 (only support 12 bit left shift in aarch64) */
static MOperator GetRegImmMOP(MOperator regregMop, bool withLeftShift);
static MOperator GetReversalMOP(MOperator arithMop);
static MOperator GetFoldMopAndVal(int64 &newVal, int64 constVal, const Insn &arithInsn);
private:
bool ConstProp(DUInsnInfo &useDUInfo, ImmOperand &constOpnd);
/* use xzr/wzr in aarch64 to shrink register live range */
void ZeroRegProp(DUInsnInfo &useDUInfo, RegOperand &toReplaceReg);
/* replace old Insn with new Insn, update ssa info automatically */
void ReplaceInsnAndUpdateSSA(Insn &oriInsn, Insn &newInsn) const;
ImmOperand *CanDoConstFold(const ImmOperand &value1, const ImmOperand &value2, ArithmeticType aT, bool is64Bit);
/* optimization */
bool MovConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd);
bool ArithmeticConstReplace(DUInsnInfo &useDUInfo, ImmOperand &constOpnd, ArithmeticType aT);
bool ArithmeticConstFold(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd, ArithmeticType aT);
bool ShiftConstReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd);
bool BitInsertReplace(DUInsnInfo &useDUInfo, const ImmOperand &constOpnd);
MemPool *constPropMp;
CGFunc *cgFunc;
CGSSAInfo *ssaInfo;
Insn *curInsn;
};
class CopyRegProp : public PropOptimizePattern {
public:
CopyRegProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo, LiveIntervalAnalysis *ll)
: PropOptimizePattern(cgFunc, cgssaInfo, ll)
{
}
~CopyRegProp() override = default;
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final
{
destVersion = nullptr;
srcVersion = nullptr;
}
private:
bool IsValidCopyProp(const RegOperand &dstReg, const RegOperand &srcReg) const;
void VaildateImplicitCvt(RegOperand &destReg, const RegOperand &srcReg, Insn &movInsn);
VRegVersion *destVersion = nullptr;
VRegVersion *srcVersion = nullptr;
};
class RedundantPhiProp : public PropOptimizePattern {
public:
RedundantPhiProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {}
~RedundantPhiProp() override = default;
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final
{
destVersion = nullptr;
srcVersion = nullptr;
}
private:
VRegVersion *destVersion = nullptr;
VRegVersion *srcVersion = nullptr;
};
class ValidBitNumberProp : public PropOptimizePattern {
public:
ValidBitNumberProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {}
~ValidBitNumberProp() override = default;
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final
{
destVersion = nullptr;
srcVersion = nullptr;
}
private:
VRegVersion *destVersion = nullptr;
VRegVersion *srcVersion = nullptr;
};
/*
* frame pointer and stack pointer will not be varied in function body
* treat them as const
*/
class FpSpConstProp : public PropOptimizePattern {
public:
FpSpConstProp(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {}
~FpSpConstProp() override = default;
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final
{
fpSpBase = nullptr;
shiftOpnd = nullptr;
aT = kUndefArith;
replaced = nullptr;
}
private:
bool GetValidSSAInfo(Operand &opnd);
void PropInMem(DUInsnInfo &useDUInfo, Insn &useInsn);
void PropInArith(DUInsnInfo &useDUInfo, Insn &useInsn, ArithmeticType curAT);
void PropInCopy(DUInsnInfo &useDUInfo, Insn &useInsn, MOperator oriMop);
int64 ArithmeticFold(int64 valInUse, ArithmeticType useAT) const;
RegOperand *fpSpBase = nullptr;
ImmOperand *shiftOpnd = nullptr;
ArithmeticType aT = kUndefArith;
VRegVersion *replaced = nullptr;
};
/*
* This pattern do:
* 1)
* uxtw vreg:Rm validBitNum:[64], vreg:Rn validBitNum:[32]
* ------>
* mov vreg:Rm validBitNum:[64], vreg:Rn validBitNum:[32]
* 2)
* ldrh R201, [...]
* and R202, R201, #65520
* uxth R203, R202
* ------->
* ldrh R201, [...]
* and R202, R201, #65520
* mov R203, R202
*/
class ExtendMovPattern : public PropOptimizePattern {
public:
ExtendMovPattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {}
~ExtendMovPattern() override = default;
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
protected:
void Init() final;
private:
bool BitNotAffected(const Insn &insn, uint32 validNum); /* check whether significant bits are affected */
bool CheckSrcReg(regno_t srcRegNo, uint32 validNum);
MOperator replaceMop = MOP_undef;
};
class ExtendShiftPattern : public PropOptimizePattern {
public:
ExtendShiftPattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {}
~ExtendShiftPattern() override = default;
bool IsSwapInsn(const Insn &insn) const;
void SwapOpnd(Insn &insn);
bool CheckAllOpndCondition(Insn &insn);
bool CheckCondition(Insn &insn) final;
void Optimize(Insn &insn) final;
void Run() final;
void DoExtendShiftOpt(Insn &insn);
enum ExMOpType : uint8 {
kExUndef,
kExAdd, /* MOP_xaddrrr | MOP_xxwaddrrre | MOP_xaddrrrs */
kEwAdd, /* MOP_waddrrr | MOP_wwwaddrrre | MOP_waddrrrs */
kExSub, /* MOP_xsubrrr | MOP_xxwsubrrre | MOP_xsubrrrs */
kEwSub, /* MOP_wsubrrr | MOP_wwwsubrrre | MOP_wsubrrrs */
kExCmn, /* MOP_xcmnrr | MOP_xwcmnrre | MOP_xcmnrrs */
kEwCmn, /* MOP_wcmnrr | MOP_wwcmnrre | MOP_wcmnrrs */
kExCmp, /* MOP_xcmprr | MOP_xwcmprre | MOP_xcmprrs */
kEwCmp, /* MOP_wcmprr | MOP_wwcmprre | MOP_wcmprrs */
};
enum LsMOpType : uint8 {
kLsUndef,
kLxAdd, /* MOP_xaddrrr | MOP_xaddrrrs */
kLwAdd, /* MOP_waddrrr | MOP_waddrrrs */
kLxSub, /* MOP_xsubrrr | MOP_xsubrrrs */
kLwSub, /* MOP_wsubrrr | MOP_wsubrrrs */
kLxCmn, /* MOP_xcmnrr | MOP_xcmnrrs */
kLwCmn, /* MOP_wcmnrr | MOP_wcmnrrs */
kLxCmp, /* MOP_xcmprr | MOP_xcmprrs */
kLwCmp, /* MOP_wcmprr | MOP_wcmprrs */
kLxEor, /* MOP_xeorrrr | MOP_xeorrrrs */
kLwEor, /* MOP_weorrrr | MOP_weorrrrs */
kLxNeg, /* MOP_xinegrr | MOP_xinegrrs */
kLwNeg, /* MOP_winegrr | MOP_winegrrs */
kLxIor, /* MOP_xiorrrr | MOP_xiorrrrs */
kLwIor, /* MOP_wiorrrr | MOP_wiorrrrs */
};
enum SuffixType : uint8 {
kNoSuffix, /* no suffix or do not perform the optimization. */
kLSL, /* logical shift left */
kLSR, /* logical shift right */
kASR, /* arithmetic shift right */
kExten /* ExtendOp */
};
protected:
void Init() final;
private:
void SelectExtendOrShift(const Insn &def);
SuffixType CheckOpType(const Operand &lastOpnd) const;
void ReplaceUseInsn(Insn &use, const Insn &def, uint32 amount);
void SetExMOpType(const Insn &use);
void SetLsMOpType(const Insn &use);
MOperator replaceOp;
uint32 replaceIdx;
ExtendShiftOperand::ExtendOp extendOp;
BitShiftOperand::ShiftOp shiftOp;
Insn *defInsn = nullptr;
Insn *newInsn = nullptr;
Insn *curInsn = nullptr;
bool optSuccess;
ExMOpType exMOpType;
LsMOpType lsMOpType;
};
/*
* optimization for call convention
* example:
* [BB26] [BB43]
* sub R287, R101, R275 sub R279, R101, R275
* \ /
* \ /
* [BB27]
* <---- insert new phi: R403, (R275 <26>, R275 <43>)
* old phi: R297, (R287 <26>, R279 <43>)
* / \
* / \
* [BB28] \
* sub R310, R101, R309 \
* | \
* | \
* [BB17] [BB29] [BB44]
* sub R314, R101, R275 | /
* \ | /
* \ | /
* \ | /
* \ | /
* [BB18]
* <---- insert new phi: R404, (R275 <17>, R309 <29>, R403 <44>)
* old phi: R318, (R314 <17>, R310 <29>, R297 <44>)
* mov R1, R318 ====> sub R1, R101, R404
* / \
* / \
* / \
* [BB19] [BB34]
* sub R336, R101, R335 /
* \ /
* \ /
* \ /
* [BB20]
* <---- insert new phi: R405, (R335 <19>, R404<34>)
* old phi: R340, (R336 <19>, R318 <34>)
* mov R1, R340 ====> sub R1, R101, R405
*/
class A64PregCopyPattern : public PropOptimizePattern {
public:
A64PregCopyPattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : PropOptimizePattern(cgFunc, cgssaInfo) {}
~A64PregCopyPattern() override
{
firstPhiInsn = nullptr;
}
bool CheckCondition(Insn &insn) override;
void Optimize(Insn &insn) override;
void Run() override;
protected:
void Init() override
{
validDefInsns.clear();
firstPhiInsn = nullptr;
differIdx = -1;
differOrigNO = 0;
isCrossPhi = false;
}
private:
bool CheckUselessDefInsn(const Insn *defInsn) const;
bool CheckValidDefInsn(const Insn *defInsn);
bool CheckMultiUsePoints(const Insn *defInsn) const;
bool CheckPhiCaseCondition(Insn &curInsn, Insn &defInsn);
bool DFSFindValidDefInsns(Insn *curDefInsn, RegOperand *lastPhiDef, std::unordered_map<uint32, bool> &visited);
Insn &CreateNewPhiInsn(std::unordered_map<uint32, RegOperand *> &newPhiList, Insn *curInsn);
RegOperand &DFSBuildPhiInsn(Insn *curInsn, std::unordered_map<uint32, RegOperand *> &visited);
RegOperand *CheckAndGetExistPhiDef(Insn &phiInsn, std::vector<regno_t> &validDifferRegNOs) const;
std::vector<Insn *> validDefInsns;
Insn *firstPhiInsn = nullptr;
int differIdx = -1;
regno_t differOrigNO = 0;
bool isCrossPhi = false;
};
class A64ReplaceRegOpndVisitor : public ReplaceRegOpndVisitor {
public:
A64ReplaceRegOpndVisitor(CGFunc &f, Insn &cInsn, uint32 cIdx, RegOperand &oldRegister, RegOperand &newRegister)
: ReplaceRegOpndVisitor(f, cInsn, cIdx, oldRegister, newRegister)
{
}
~A64ReplaceRegOpndVisitor() override = default;
private:
void Visit(RegOperand *v) final;
void Visit(ListOperand *v) final;
void Visit(MemOperand *v) final;
void Visit(PhiOperand *v) final;
};
} // namespace maplebe
#endif /* MAPLEBE_INCLUDE_AARCH64_PROP_H */

View File

@ -0,0 +1,167 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64RAOPT_H
#define MAPLEBE_INCLUDE_CG_AARCH64RAOPT_H
#include "cg.h"
#include "ra_opt.h"
#include "aarch64_cg.h"
#include "aarch64_insn.h"
#include "aarch64_operand.h"
namespace maplebe {
class X0OptInfo {
public:
X0OptInfo() : movSrc(nullptr), replaceReg(0), renameInsn(nullptr), renameOpnd(nullptr), renameReg(0) {}
~X0OptInfo() = default;
inline RegOperand *GetMovSrc() const
{
return movSrc;
}
inline regno_t GetReplaceReg() const
{
return replaceReg;
}
inline Insn *GetRenameInsn() const
{
return renameInsn;
}
inline Operand *GetRenameOpnd() const
{
return renameOpnd;
}
inline regno_t GetRenameReg() const
{
return renameReg;
}
inline void SetMovSrc(RegOperand *srcReg)
{
movSrc = srcReg;
}
inline void SetReplaceReg(regno_t regno)
{
replaceReg = regno;
}
inline void SetRenameInsn(Insn *insn)
{
renameInsn = insn;
}
inline void ResetRenameInsn()
{
renameInsn = nullptr;
}
inline void SetRenameOpnd(Operand *opnd)
{
renameOpnd = opnd;
}
inline void SetRenameReg(regno_t regno)
{
renameReg = regno;
}
private:
RegOperand *movSrc;
regno_t replaceReg;
Insn *renameInsn;
Operand *renameOpnd;
regno_t renameReg;
};
class RaX0Opt {
public:
explicit RaX0Opt(CGFunc *func) : cgFunc(func) {}
~RaX0Opt() = default;
bool PropagateX0CanReplace(Operand *opnd, regno_t replaceReg) const;
bool PropagateRenameReg(Insn *insn, const X0OptInfo &optVal) const;
bool PropagateX0DetectX0(const Insn *insn, X0OptInfo &optVal) const;
bool PropagateX0DetectRedefine(const InsnDesc *md, const Insn *ninsn, const X0OptInfo &optVal, uint32 index) const;
bool PropagateX0Optimize(const BB *bb, const Insn *insn, X0OptInfo &optVal);
bool PropagateX0ForCurrBb(BB *bb, const X0OptInfo &optVal);
void PropagateX0ForNextBb(BB *nextBb, const X0OptInfo &optVal);
void PropagateX0();
private:
CGFunc *cgFunc;
};
class VregRenameInfo {
public:
VregRenameInfo() = default;
~VregRenameInfo() = default;
BB *firstBBLevelSeen = nullptr;
BB *lastBBLevelSeen = nullptr;
uint32 numDefs = 0;
uint32 numUses = 0;
uint32 numInnerDefs = 0;
uint32 numInnerUses = 0;
uint32 largestUnusedDistance = 0;
uint8 innerMostloopLevelSeen = 0;
};
class VregRename {
public:
VregRename(CGFunc *func, MemPool *pool) : cgFunc(func), memPool(pool), alloc(pool), renameInfo(alloc.Adapter())
{
renameInfo.resize(cgFunc->GetMaxRegNum());
ccRegno = static_cast<RegOperand *>(&cgFunc->GetOrCreateRflag())->GetRegisterNumber();
};
~VregRename() = default;
void PrintRenameInfo(regno_t regno) const;
void PrintAllRenameInfo() const;
void RenameFindLoopVregs(const CGFuncLoops *loop);
void RenameFindVregsToRename(const CGFuncLoops *loop);
bool IsProfitableToRename(const VregRenameInfo *info) const;
void RenameProfitableVreg(RegOperand *ropnd, const CGFuncLoops *loop);
void RenameGetFuncVregInfo();
void UpdateVregInfo(regno_t reg, BB *bb, bool isInner, bool isDef);
void VregLongLiveRename();
CGFunc *cgFunc;
MemPool *memPool;
MapleAllocator alloc;
Bfs *bfs = nullptr;
MapleVector<VregRenameInfo *> renameInfo;
uint32 maxRegnoSeen = 0;
regno_t ccRegno;
};
class AArch64RaOpt : public RaOpt {
public:
AArch64RaOpt(CGFunc &func, MemPool &pool) : RaOpt(func, pool) {}
~AArch64RaOpt() override = default;
void Run() override;
private:
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64RAOPT_H */

View File

@ -0,0 +1,74 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REACHING_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REACHING_H
#include "reaching.h"
#include "aarch64_operand.h"
namespace maplebe {
class AArch64ReachingDefinition : public ReachingDefinition {
public:
AArch64ReachingDefinition(CGFunc &func, MemPool &memPool) : ReachingDefinition(func, memPool) {}
~AArch64ReachingDefinition() override = default;
std::vector<Insn *> FindRegDefBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn) const final;
std::vector<Insn *> FindMemDefBetweenInsn(uint32 offset, const Insn *startInsn, Insn *endInsn) const final;
bool FindRegUseBetweenInsn(uint32 regNO, Insn *startInsn, Insn *endInsn, InsnSet &useInsnSet) const final;
bool FindRegUseBetweenInsnGlobal(uint32 regNO, Insn *startInsn, Insn *endInsn, BB *movBB) const final;
bool FindMemUseBetweenInsn(uint32 offset, Insn *startInsn, const Insn *endInsn, InsnSet &useInsnSet) const final;
bool HasRegDefBetweenInsnGlobal(uint32 regNO, Insn &startInsn, Insn &endInsn) const;
bool DFSFindRegDefBetweenBB(const BB &startBB, const BB &endBB, uint32 regNO,
std::vector<VisitStatus> &visitedBB) const;
InsnSet FindDefForRegOpnd(Insn &insn, uint32 indexOrRegNO, bool isRegNO = false) const final;
InsnSet FindDefForMemOpnd(Insn &insn, uint32 indexOrOffset, bool isOffset = false) const final;
InsnSet FindUseForMemOpnd(Insn &insn, uint8 index, bool secondMem = false) const final;
bool FindRegUsingBetweenInsn(uint32 regNO, Insn *startInsn, const Insn *endInsn) const;
protected:
void InitStartGen() final;
void InitEhDefine(BB &bb) final;
void InitGenUse(BB &bb, bool firstTime = true) final;
void GenAllAsmDefRegs(BB &bb, Insn &insn, uint32 index) final;
void GenAllAsmUseRegs(BB &bb, Insn &insn, uint32 index) final;
void GenAllCallerSavedRegs(BB &bb, Insn &insn) final;
bool IsRegKilledByCallInsn(const Insn &insn, regno_t regNO) const final;
bool KilledByCallBetweenInsnInSameBB(const Insn &startInsn, const Insn &endInsn, regno_t regNO) const final;
void AddRetPseudoInsn(BB &bb) final;
void AddRetPseudoInsns() final;
bool IsCallerSavedReg(uint32 regNO) const final;
void FindRegDefInBB(uint32 regNO, BB &bb, InsnSet &defInsnSet) const final;
void FindMemDefInBB(uint32 offset, BB &bb, InsnSet &defInsnSet) const final;
void DFSFindDefForRegOpnd(const BB &startBB, uint32 regNO, std::vector<VisitStatus> &visitedBB,
InsnSet &defInsnSet) const final;
void DFSFindDefForMemOpnd(const BB &startBB, uint32 offset, std::vector<VisitStatus> &visitedBB,
InsnSet &defInsnSet) const final;
int32 GetStackSize() const final;
private:
void InitInfoForMemOperand(Insn &insn, Operand &opnd, bool isDef);
void InitInfoForListOpnd(const BB &bb, Operand &opnd);
void InitInfoForConditionCode(const BB &bb);
void InitInfoForRegOpnd(const BB &bb, Operand &opnd, bool isDef);
void InitMemInfoForClearStackCall(Insn &callInsn);
inline bool CallInsnClearDesignateStackRef(const Insn &callInsn, int64 offset) const;
int64 GetEachMemSizeOfPair(MOperator opCode) const;
bool DFSFindRegInfoBetweenBB(const BB startBB, const BB &endBB, uint32 regNO, std::vector<VisitStatus> &visitedBB,
std::list<bool> &pathStatus, DumpType infoType) const;
bool DFSFindRegDomianBetweenBB(const BB startBB, uint32 regNO, std::vector<VisitStatus> &visitedBB) const;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REACHING_H */

View File

@ -0,0 +1,59 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REGCOALESCE_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REGCOALESCE_H
#include "reg_coalesce.h"
#include "aarch64_isa.h"
#include "live.h"
namespace maplebe {
class AArch64LiveIntervalAnalysis : public LiveIntervalAnalysis {
public:
AArch64LiveIntervalAnalysis(CGFunc &func, MemPool &memPool)
: LiveIntervalAnalysis(func, memPool), vregLive(alloc.Adapter()), candidates(alloc.Adapter())
{
}
~AArch64LiveIntervalAnalysis() override = default;
void ComputeLiveIntervals() override;
bool IsUnconcernedReg(const RegOperand &regOpnd) const;
LiveInterval *GetOrCreateLiveInterval(regno_t regNO);
void UpdateCallInfo();
void SetupLiveIntervalByOp(Operand &op, Insn &insn, bool isDef);
void ComputeLiveIntervalsForEachDefOperand(Insn &insn);
void ComputeLiveIntervalsForEachUseOperand(Insn &insn);
void SetupLiveIntervalInLiveOut(regno_t liveOut, const BB &bb, uint32 currPoint);
void CoalesceRegPair(RegOperand &regDest, RegOperand &regSrc);
void CoalesceRegisters() override;
void CollectMoveForEachBB(BB &bb, std::vector<Insn *> &movInsns) const;
void CoalesceMoves(std::vector<Insn *> &movInsns, bool phiOnly);
void CheckInterference(LiveInterval &li1, LiveInterval &li2) const;
void CollectCandidate();
std::string PhaseName() const
{
return "regcoalesce";
}
private:
static bool IsRegistersCopy(Insn &insn);
MapleUnorderedSet<regno_t> vregLive;
MapleSet<regno_t> candidates;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REGCOALESCE_H */

View File

@ -0,0 +1,153 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REG_INFO_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REG_INFO_H
#include "reg_info.h"
#include "aarch64_operand.h"
#include "aarch64_insn.h"
#include "aarch64_abi.h"
namespace maplebe {
class AArch64RegInfo : public RegisterInfo {
public:
explicit AArch64RegInfo(MapleAllocator &mallocator) : RegisterInfo(mallocator) {}
~AArch64RegInfo() override = default;
bool IsGPRegister(regno_t regNO) const override
{
return AArch64isa::IsGPRegister(static_cast<AArch64reg>(regNO));
}
/* phys reg which can be pre-Assignment */
bool IsPreAssignedReg(regno_t regNO) const override
{
return AArch64Abi::IsParamReg(static_cast<AArch64reg>(regNO));
}
regno_t GetIntRetReg(uint32 idx) override
{
CHECK_FATAL(idx <= AArch64Abi::kNumIntParmRegs, "index out of range in IntRetReg");
return AArch64Abi::intReturnRegs[idx];
}
regno_t GetFpRetReg(uint32 idx) override
{
CHECK_FATAL(idx <= AArch64Abi::kNumFloatParmRegs, "index out of range in IntRetReg");
return AArch64Abi::floatReturnRegs[idx];
}
bool IsAvailableReg(regno_t regNO) const override
{
/* special handle for R9 due to MRT_CallSlowNativeExt */
if (regNO == R9 || regNO == R29) {
return false;
}
return AArch64Abi::IsAvailableReg(static_cast<AArch64reg>(regNO));
}
/* Those registers can not be overwrite. */
bool IsUntouchableReg(regno_t regNO) const override
{
if ((regNO == RSP) || (regNO == RFP) || regNO == RZR) {
return true;
}
/* when yieldpoint is enabled, the RYP(x19) can not be used. */
if (GetCurrFunction()->GetCG()->GenYieldPoint() && (regNO == RYP)) {
return true;
}
return false;
}
uint32 GetIntRegsParmsNum() override
{
return AArch64Abi::kNumIntParmRegs;
}
uint32 GetFloatRegsParmsNum() override
{
return AArch64Abi::kNumFloatParmRegs;
}
uint32 GetIntRetRegsNum() override
{
return AArch64Abi::kNumIntParmRegs;
}
uint32 GetFpRetRegsNum() override
{
return AArch64Abi::kNumFloatParmRegs;
}
uint32 GetNormalUseOperandNum() override
{
return AArch64Abi::kNormalUseOperandNum;
}
uint32 GetIntParamRegIdx(regno_t regNO) const override
{
return static_cast<uint32>(regNO - *GetIntRegs().begin());
}
uint32 GetFpParamRegIdx(regno_t regNO) const override
{
return static_cast<uint32>(regNO - *GetFpRegs().begin());
}
regno_t GetLastParamsIntReg() override
{
return R7;
}
regno_t GetLastParamsFpReg() override
{
return V7;
}
uint32 GetAllRegNum() override
{
return kAllRegNum;
}
regno_t GetInvalidReg() override
{
return kRinvalid;
}
bool IsVirtualRegister(const RegOperand &regOpnd) override
{
return regOpnd.GetRegisterNumber() > kAllRegNum;
}
bool IsVirtualRegister(regno_t regno) override
{
return regno > kAllRegNum;
}
uint32 GetReservedSpillReg() override
{
return R16;
}
uint32 GetSecondReservedSpillReg() override
{
return R17;
}
void Init() override;
void Fini() override;
void SaveCalleeSavedReg(MapleSet<regno_t> savedRegs) override;
bool IsSpecialReg(regno_t regno) const override;
bool IsCalleeSavedReg(regno_t regno) const override;
bool IsYieldPointReg(regno_t regNO) const override;
bool IsUnconcernedReg(regno_t regNO) const override;
bool IsUnconcernedReg(const RegOperand &regOpnd) const override;
bool IsSpillRegInRA(regno_t regNO, bool has3RegOpnd) override;
RegOperand *GetOrCreatePhyRegOperand(regno_t regNO, uint32 size, RegType kind, uint32 flag = 0) override;
ListOperand *CreateListOperand() override;
Insn *BuildMovInstruction(Operand &opnd0, Operand &opnd1) override;
Insn *BuildStrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) override;
Insn *BuildLdrInsn(uint32 regSize, PrimType stype, RegOperand &phyOpnd, MemOperand &memOpnd) override;
Insn *BuildCommentInsn(const std::string &comment) override;
MemOperand *GetOrCreatSpillMem(regno_t vrNum, uint32 bitSize) override;
MemOperand *AdjustMemOperandIfOffsetOutOfRange(MemOperand *memOpnd, regno_t vrNum, bool isDest, Insn &insn,
regno_t regNum, bool &isOutOfRange) override;
void FreeSpillRegMem(regno_t vrNum) override;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_REG_INFO_H */

View File

@ -0,0 +1,276 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64REGSAVESOPT_H
#define MAPLEBE_INCLUDE_CG_AARCH64REGSAVESOPT_H
#include "cg.h"
#include "regsaves.h"
#include "aarch64_cg.h"
#include "aarch64_insn.h"
#include "aarch64_operand.h"
namespace maplebe {
/* Saved reg info. This class is created to avoid the complexity of
nested Maple Containers */
class SavedRegInfo {
public:
bool insertAtLastMinusOne = false;
explicit SavedRegInfo(MapleAllocator &alloc)
: saveSet(alloc.Adapter()), restoreEntrySet(alloc.Adapter()), restoreExitSet(alloc.Adapter())
{
}
bool ContainSaveReg(regno_t r)
{
if (saveSet.find(r) != saveSet.end()) {
return true;
}
return false;
}
bool ContainEntryReg(regno_t r)
{
if (restoreEntrySet.find(r) != restoreEntrySet.end()) {
return true;
}
return false;
}
bool ContainExitReg(regno_t r)
{
if (restoreExitSet.find(r) != restoreExitSet.end()) {
return true;
}
return false;
}
void InsertSaveReg(regno_t r)
{
(void)saveSet.insert(r);
}
void InsertEntryReg(regno_t r)
{
(void)restoreEntrySet.insert(r);
}
void InsertExitReg(regno_t r)
{
(void)restoreExitSet.insert(r);
}
MapleSet<regno_t> &GetSaveSet()
{
return saveSet;
}
MapleSet<regno_t> &GetEntrySet()
{
return restoreEntrySet;
}
MapleSet<regno_t> &GetExitSet()
{
return restoreExitSet;
}
void RemoveSaveReg(regno_t r)
{
(void)saveSet.erase(r);
}
private:
MapleSet<regno_t> saveSet;
MapleSet<regno_t> restoreEntrySet;
MapleSet<regno_t> restoreExitSet;
};
class SavedBBInfo {
public:
explicit SavedBBInfo(MapleAllocator &alloc) : bbList(alloc.Adapter()) {}
MapleSet<BB *> &GetBBList()
{
return bbList;
}
void InsertBB(BB *bb)
{
(void)bbList.insert(bb);
}
void RemoveBB(BB *bb)
{
(void)bbList.erase(bb);
}
private:
MapleSet<BB *> bbList;
};
class AArch64RegSavesOpt : public RegSavesOpt {
public:
AArch64RegSavesOpt(CGFunc &func, MemPool &pool, DomAnalysis &dom, PostDomAnalysis &pdom)
: RegSavesOpt(func, pool),
domInfo(&dom),
pDomInfo(&pdom),
bbSavedRegs(alloc.Adapter()),
regSavedBBs(alloc.Adapter()),
regOffset(alloc.Adapter()),
id2bb(alloc.Adapter())
{
bbSavedRegs.resize(func.NumBBs());
regSavedBBs.resize(sizeof(CalleeBitsType) << 3);
for (size_t i = 0; i < bbSavedRegs.size(); ++i) {
bbSavedRegs[i] = nullptr;
}
for (size_t i = 0; i < regSavedBBs.size(); ++i) {
regSavedBBs[i] = nullptr;
}
}
~AArch64RegSavesOpt() override = default;
using CalleeBitsType = uint64;
void InitData();
void CollectLiveInfo(const BB &bb, const Operand &opnd, bool isDef, bool isUse);
void GenerateReturnBBDefUse(const BB &bb);
void ProcessCallInsnParam(BB &bb);
void ProcessAsmListOpnd(const BB &bb, Operand &opnd, uint32 idx);
void ProcessListOpnd(const BB &bb, Operand &opnd);
void ProcessMemOpnd(const BB &bb, Operand &opnd);
void ProcessCondOpnd(const BB &bb);
void GetLocalDefUse();
void PrintBBs() const;
int CheckCriteria(BB *bb, regno_t reg) const;
bool AlreadySavedInDominatorList(const BB *bb, regno_t reg) const;
void DetermineCalleeSaveLocationsDoms();
void DetermineCalleeSaveLocationsPre();
bool DetermineCalleeRestoreLocations();
int32 FindNextOffsetForCalleeSave() const;
void InsertCalleeSaveCode();
void InsertCalleeRestoreCode();
void Verify(regno_t reg, BB *bb, std::set<BB *, BBIdCmp> *visited, uint32 *s, uint32 *r);
void Run() override;
DomAnalysis *GetDomInfo() const
{
return domInfo;
}
PostDomAnalysis *GetPostDomInfo() const
{
return pDomInfo;
}
Bfs *GetBfs() const
{
return bfs;
}
CalleeBitsType *GetCalleeBitsDef()
{
return calleeBitsDef;
}
CalleeBitsType *GetCalleeBitsUse()
{
return calleeBitsUse;
}
CalleeBitsType GetBBCalleeBits(CalleeBitsType *data, uint32 bid) const
{
return data[bid];
}
void SetCalleeBit(CalleeBitsType *data, uint32 bid, regno_t reg) const
{
CalleeBitsType mask = 1ULL << RegBitMap(reg);
if ((GetBBCalleeBits(data, bid) & mask) == 0) {
data[bid] = GetBBCalleeBits(data, bid) | mask;
}
}
void ResetCalleeBit(CalleeBitsType *data, uint32 bid, regno_t reg) const
{
CalleeBitsType mask = 1ULL << RegBitMap(reg);
data[bid] = GetBBCalleeBits(data, bid) & ~mask;
}
bool IsCalleeBitSet(CalleeBitsType *data, uint32 bid, regno_t reg) const
{
CalleeBitsType mask = 1ULL << RegBitMap(reg);
return GetBBCalleeBits(data, bid) & mask;
}
/* AArch64 specific callee-save registers bit positions
0 9 10 33 -- position
R19 .. R28 V8 .. V15 V16 .. V31 -- regs */
uint32 RegBitMap(regno_t reg) const
{
uint32 r;
if (reg <= R28) {
r = (reg - R19);
} else {
r = ((R28 - R19) + 1) + (reg - V8);
}
return r;
}
regno_t ReverseRegBitMap(uint32 reg) const
{
if (reg < 10) {
return static_cast<AArch64reg>(R19 + reg);
} else {
return static_cast<AArch64reg>((V8 + reg) - (R28 - R19 + 1));
}
}
SavedRegInfo *GetbbSavedRegsEntry(uint32 bid)
{
if (bbSavedRegs[bid] == nullptr) {
bbSavedRegs[bid] = memPool->New<SavedRegInfo>(alloc);
}
return bbSavedRegs[bid];
}
void SetId2bb(BB *bb)
{
id2bb[bb->GetId()] = bb;
}
BB *GetId2bb(uint32 bid)
{
return id2bb[bid];
}
private:
DomAnalysis *domInfo;
PostDomAnalysis *pDomInfo;
Bfs *bfs = nullptr;
CalleeBitsType *calleeBitsDef = nullptr;
CalleeBitsType *calleeBitsUse = nullptr;
MapleVector<SavedRegInfo *> bbSavedRegs; /* set of regs to be saved in a BB */
MapleVector<SavedBBInfo *> regSavedBBs; /* set of BBs to be saved for a reg */
MapleMap<regno_t, uint32> regOffset; /* save offset of each register */
MapleMap<uint32, BB *> id2bb; /* bbid to bb* mapping */
bool oneAtaTime = false;
regno_t oneAtaTimeReg = 0;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64REGSAVESOPT_H */

View File

@ -0,0 +1,330 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_SCHEDULE_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_SCHEDULE_H
#include "schedule.h"
#include "aarch64_operand.h"
namespace maplebe {
enum RegisterType : uint8 {
kRegisterUndef,
kRegisterInt,
kRegisterFloat,
kRegisterCc,
kRegisterLast,
};
class ScheduleProcessInfo {
public:
explicit ScheduleProcessInfo(uint32 size)
{
availableReadyList.reserve(size);
scheduledNodes.reserve(size);
}
virtual ~ScheduleProcessInfo() = default;
uint32 GetLastUpdateCycle() const
{
return lastUpdateCycle;
}
void SetLastUpdateCycle(uint32 updateCycle)
{
lastUpdateCycle = updateCycle;
}
uint32 GetCurrCycle() const
{
return currCycle;
}
void IncCurrCycle()
{
++currCycle;
}
void DecAdvanceCycle()
{
advanceCycle--;
}
uint32 GetAdvanceCycle() const
{
return advanceCycle;
}
void SetAdvanceCycle(uint32 cycle)
{
advanceCycle = cycle;
}
void ClearAvailableReadyList()
{
availableReadyList.clear();
}
void PushElemIntoAvailableReadyList(DepNode *node)
{
availableReadyList.emplace_back(node);
}
size_t SizeOfAvailableReadyList() const
{
return availableReadyList.size();
}
bool AvailableReadyListIsEmpty() const
{
return availableReadyList.empty();
}
void SetAvailableReadyList(const std::vector<DepNode *> &tempReadyList)
{
availableReadyList = tempReadyList;
}
const std::vector<DepNode *> &GetAvailableReadyList() const
{
return availableReadyList;
}
const std::vector<DepNode *> &GetAvailableReadyList()
{
return availableReadyList;
}
void PushElemIntoScheduledNodes(DepNode *node)
{
node->SetState(kScheduled);
node->SetSchedCycle(currCycle);
node->OccupyUnits();
scheduledNodes.emplace_back(node);
}
bool IsFirstSeparator() const
{
return isFirstSeparator;
}
void ResetIsFirstSeparator()
{
isFirstSeparator = false;
}
size_t SizeOfScheduledNodes() const
{
return scheduledNodes.size();
}
const std::vector<DepNode *> &GetScheduledNodes() const
{
return scheduledNodes;
}
private:
std::vector<DepNode *> availableReadyList;
std::vector<DepNode *> scheduledNodes;
uint32 lastUpdateCycle = 0;
uint32 currCycle = 0;
uint32 advanceCycle = 0;
bool isFirstSeparator = true;
};
class AArch64ScheduleProcessInfo : public ScheduleProcessInfo {
public:
explicit AArch64ScheduleProcessInfo(uint32 size) : ScheduleProcessInfo(size) {}
~AArch64ScheduleProcessInfo() override = default;
/* recover register type which is not recorded in live analysis */
static RegType GetRegisterType(CGFunc &f, regno_t regNO);
void VaryLiveRegSet(CGFunc &f, regno_t regNO, bool isInc);
void VaryFreeRegSet(CGFunc &f, std::set<regno_t> regNOs, DepNode &node);
uint32 GetFreeIntRegs(DepNode &node)
{
return freeIntRegNodeSet.count(&node) ? freeIntRegNodeSet.find(&node)->second : 0;
}
void IncFreeIntRegNode(DepNode &node)
{
if (!freeIntRegNodeSet.count(&node)) {
freeIntRegNodeSet.emplace(std::pair<DepNode *, uint32>(&node, 1));
} else {
freeIntRegNodeSet.find(&node)->second++;
}
}
const std::map<DepNode *, uint32> &GetFreeIntRegNodeSet() const
{
return freeIntRegNodeSet;
}
void IncFreeFpRegNode(DepNode &node)
{
if (!freeFpRegNodeSet.count(&node)) {
freeFpRegNodeSet.emplace(std::pair<DepNode *, uint32>(&node, 1));
} else {
freeFpRegNodeSet.find(&node)->second++;
}
}
uint32 GetFreeFpRegs(DepNode &node)
{
return freeFpRegNodeSet.count(&node) ? freeFpRegNodeSet.find(&node)->second : 0;
}
const std::map<DepNode *, uint32> &GetFreeFpRegNodeSet() const
{
return freeFpRegNodeSet;
}
void ClearALLFreeRegNodeSet()
{
freeIntRegNodeSet.clear();
freeFpRegNodeSet.clear();
}
size_t FindIntLiveReg(regno_t reg) const
{
return intLiveRegSet.count(reg);
}
void IncIntLiveRegSet(regno_t reg)
{
intLiveRegSet.emplace(reg);
}
void DecIntLiveRegSet(regno_t reg)
{
intLiveRegSet.erase(reg);
}
size_t FindFpLiveReg(regno_t reg) const
{
return fpLiveRegSet.count(reg);
}
void IncFpLiveRegSet(regno_t reg)
{
fpLiveRegSet.emplace(reg);
}
void DecFpLiveRegSet(regno_t reg)
{
fpLiveRegSet.erase(reg);
}
size_t SizeOfIntLiveRegSet() const
{
return intLiveRegSet.size();
}
size_t SizeOfCalleeSaveLiveRegister(bool isInt)
{
size_t num = 0;
if (isInt) {
for (auto regNO : intLiveRegSet) {
if (regNO > static_cast<regno_t>(R19)) {
num++;
}
}
} else {
for (auto regNO : fpLiveRegSet) {
if (regNO > static_cast<regno_t>(V16)) {
num++;
}
}
}
return num;
}
size_t SizeOfFpLiveRegSet() const
{
return fpLiveRegSet.size();
}
private:
std::set<regno_t> intLiveRegSet;
std::set<regno_t> fpLiveRegSet;
std::map<DepNode *, uint32> freeIntRegNodeSet;
std::map<DepNode *, uint32> freeFpRegNodeSet;
};
class AArch64Schedule : public Schedule {
public:
AArch64Schedule(CGFunc &func, MemPool &memPool, LiveAnalysis &live, const std::string &phaseName)
: Schedule(func, memPool, live, phaseName)
{
intCalleeSaveThreshold = func.UseFP() ? intCalleeSaveThresholdBase : intCalleeSaveThresholdEnhance;
}
~AArch64Schedule() override = default;
protected:
void DumpDepGraph(const MapleVector<DepNode *> &nodes) const;
void DumpScheduleResult(const MapleVector<DepNode *> &nodes, SimulateType type) const;
void GenerateDot(const BB &bb, const MapleVector<DepNode *> &nodes) const;
void EraseNodeFromNodeList(const DepNode &target, MapleVector<DepNode *> &nodeList) override;
void FindAndCombineMemoryAccessPair(const std::vector<DepNode *> &memList) override;
void RegPressureScheduling(BB &bb, MapleVector<DepNode *> &nodes) override;
private:
enum CSRResult : uint8 {
kNode1,
kNode2,
kDoCSP /* can do csp further */
};
void Init() override;
void MemoryAccessPairOpt() override;
void ClinitPairOpt() override;
uint32 DoSchedule() override;
uint32 DoBruteForceSchedule() override;
uint32 SimulateOnly() override;
void UpdateBruteForceSchedCycle() override;
void IterateBruteForce(DepNode &targetNode, MapleVector<DepNode *> &readyList, uint32 currCycle,
MapleVector<DepNode *> &scheduledNodes, uint32 &maxCycleCount,
MapleVector<DepNode *> &optimizedScheduledNodes) override;
bool CanCombine(const Insn &insn) const override;
void ListScheduling(bool beforeRA) override;
void BruteForceScheduling(const BB &bb);
void SimulateScheduling(const BB &bb);
void FinalizeScheduling(BB &bb, const DepAnalysis &depAnalysis) override;
uint32 ComputeEstart(uint32 cycle) override;
void ComputeLstart(uint32 maxEstart) override;
void UpdateELStartsOnCycle(uint32 cycle) override;
void RandomTest() override;
void EraseNodeFromReadyList(const DepNode &target) override;
uint32 GetNextSepIndex() const override;
void CountUnitKind(const DepNode &depNode, uint32 array[], const uint32 arraySize) const override;
static bool IfUseUnitKind(const DepNode &depNode, uint32 index);
void UpdateReadyList(DepNode &targetNode, MapleVector<DepNode *> &readyList, bool updateEStart) override;
void UpdateScheduleProcessInfo(AArch64ScheduleProcessInfo &info);
void UpdateAdvanceCycle(AArch64ScheduleProcessInfo &scheduleInfo, const DepNode &targetNode);
bool CheckSchedulable(AArch64ScheduleProcessInfo &info) const;
void SelectNode(AArch64ScheduleProcessInfo &scheduleInfo);
static void DumpDebugInfo(const ScheduleProcessInfo &scheduleInfo);
bool CompareDepNode(DepNode &node1, DepNode &node2, AArch64ScheduleProcessInfo &scheduleInfo) const;
void CalculateMaxUnitKindCount(ScheduleProcessInfo &scheduleInfo);
void UpdateReleaseRegInfo(AArch64ScheduleProcessInfo &scheduleInfo);
std::set<regno_t> CanFreeRegister(const DepNode &node) const;
void UpdateLiveRegSet(AArch64ScheduleProcessInfo &scheduleInfo, const DepNode &node);
void InitLiveRegSet(AArch64ScheduleProcessInfo &scheduleInfo);
int CalSeriesCycles(const MapleVector<DepNode *> &nodes);
CSRResult DoCSR(DepNode &node1, DepNode &node2, AArch64ScheduleProcessInfo &scheduleInfo) const;
AArch64Schedule::CSRResult ScheduleCrossCall(const DepNode &node1, const DepNode &node2) const;
int intCalleeSaveThreshold = 0;
static uint32 maxUnitIndex;
static int intRegPressureThreshold;
static int fpRegPressureThreshold;
static int intCalleeSaveThresholdBase;
static int intCalleeSaveThresholdEnhance;
static int fpCalleeSaveThreshold;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_SCHEDULE_H */

View File

@ -0,0 +1,108 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_CG_INCLUDE_AARCH64_SSA_H
#define MAPLEBE_CG_INCLUDE_AARCH64_SSA_H
#include "cg_ssa.h"
#include "aarch64_insn.h"
namespace maplebe {
class AArch64CGSSAInfo : public CGSSAInfo {
public:
AArch64CGSSAInfo(CGFunc &f, DomAnalysis &da, MemPool &mp, MemPool &tmp) : CGSSAInfo(f, da, mp, tmp) {}
~AArch64CGSSAInfo() override = default;
void DumpInsnInSSAForm(const Insn &insn) const override;
RegOperand *GetRenamedOperand(RegOperand &vRegOpnd, bool isDef, Insn &curInsn, uint32 idx) override;
MemOperand *CreateMemOperand(MemOperand &memOpnd, bool isOnSSA /* false = on cgfunc */);
void ReplaceInsn(Insn &oriInsn, Insn &newInsn) override;
void ReplaceAllUse(VRegVersion *toBeReplaced, VRegVersion *newVersion) override;
void CreateNewInsnSSAInfo(Insn &newInsn) override;
private:
void RenameInsn(Insn &insn) override;
VRegVersion *RenamedOperandSpecialCase(RegOperand &vRegOpnd, Insn &curInsn, uint32 idx);
RegOperand *CreateSSAOperand(RegOperand &virtualOpnd) override;
void CheckAsmDUbinding(Insn &insn, const VRegVersion *toBeReplaced, VRegVersion *newVersion);
};
class A64SSAOperandRenameVisitor : public SSAOperandVisitor {
public:
A64SSAOperandRenameVisitor(AArch64CGSSAInfo &cssaInfo, Insn &cInsn, const OpndDesc &cProp, uint32 idx)
: SSAOperandVisitor(cInsn, cProp, idx), ssaInfo(&cssaInfo)
{
}
~A64SSAOperandRenameVisitor() override = default;
void Visit(RegOperand *v) final;
void Visit(ListOperand *v) final;
void Visit(MemOperand *a64MemOpnd) final;
private:
AArch64CGSSAInfo *ssaInfo;
};
class A64OpndSSAUpdateVsitor : public SSAOperandVisitor, public OperandVisitor<PhiOperand> {
public:
explicit A64OpndSSAUpdateVsitor(AArch64CGSSAInfo &cssaInfo) : ssaInfo(&cssaInfo) {}
~A64OpndSSAUpdateVsitor() override = default;
void MarkIncrease()
{
isDecrease = false;
};
void MarkDecrease()
{
isDecrease = true;
};
bool HasDeleteDef() const
{
return !deletedDef.empty();
}
void Visit(RegOperand *regOpnd) final;
void Visit(ListOperand *v) final;
void Visit(MemOperand *a64MemOpnd) final;
void Visit(PhiOperand *phiOpnd) final;
bool IsPhi() const
{
return isPhi;
}
void SetPhi(bool flag)
{
isPhi = flag;
}
private:
void UpdateRegUse(uint32 ssaIdx);
void UpdateRegDef(uint32 ssaIdx);
AArch64CGSSAInfo *ssaInfo;
bool isDecrease = false;
std::set<regno_t> deletedDef;
bool isPhi = false;
};
class A64SSAOperandDumpVisitor : public SSAOperandDumpVisitor {
public:
explicit A64SSAOperandDumpVisitor(const MapleUnorderedMap<regno_t, VRegVersion *> &allssa)
: SSAOperandDumpVisitor(allssa) {};
~A64SSAOperandDumpVisitor() override = default;
void Visit(RegOperand *a64RegOpnd) final;
void Visit(ListOperand *v) final;
void Visit(MemOperand *a64MemOpnd) final;
void Visit(PhiOperand *phi) final;
};
} // namespace maplebe
#endif // MAPLEBE_CG_INCLUDE_AARCH64_SSA_H

View File

@ -0,0 +1,75 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_STRLDR_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_STRLDR_H
#include "strldr.h"
#include "aarch64_reaching.h"
#include "aarch64_operand.h"
namespace maplebe {
using namespace maple;
enum MemPropMode : uint8 { kUndef, kPropBase, kPropOffset, kPropSignedExtend, kPropUnsignedExtend, kPropShift };
class AArch64StoreLoadOpt : public StoreLoadOpt {
public:
AArch64StoreLoadOpt(CGFunc &func, MemPool &memPool)
: StoreLoadOpt(func, memPool), localAlloc(&memPool), str2MovMap(localAlloc.Adapter())
{
}
~AArch64StoreLoadOpt() override = default;
void Run() final;
void DoStoreLoadOpt();
void DoLoadZeroToMoveTransfer(const Insn &strInsn, short strSrcIdx, const InsnSet &memUseInsnSet) const;
void DoLoadToMoveTransfer(Insn &strInsn, short strSrcIdx, short memSeq, const InsnSet &memUseInsnSet);
bool CheckStoreOpCode(MOperator opCode) const;
static bool CheckNewAmount(const Insn &insn, uint32 newAmount);
private:
void StrLdrIndexModeOpt(Insn &currInsn);
bool CheckReplaceReg(Insn &defInsn, Insn &currInsn, InsnSet &replaceRegDefSet, regno_t replaceRegNo);
bool CheckDefInsn(Insn &defInsn, Insn &currInsn);
bool CheckNewMemOffset(const Insn &insn, MemOperand *newMemOpnd, uint32 opndIdx);
MemOperand *HandleArithImmDef(RegOperand &replace, Operand *oldOffset, int64 defVal);
MemOperand *SelectReplaceMem(Insn &defInsn, Insn &curInsn, RegOperand &base, Operand *offset);
MemOperand *SelectReplaceExt(const Insn &defInsn, RegOperand &base, bool isSigned);
bool CanDoMemProp(const Insn *insn);
bool CanDoIndexOpt(const MemOperand &MemOpnd);
void MemPropInit();
void SelectPropMode(const MemOperand &currMemOpnd);
int64 GetOffsetForNewIndex(Insn &defInsn, Insn &insn, regno_t baseRegNO, uint32 memOpndSize);
MemOperand *SelectIndexOptMode(Insn &insn, const MemOperand &curMemOpnd);
bool ReplaceMemOpnd(Insn &insn, regno_t regNo, RegOperand &base, Operand *offset);
void MemProp(Insn &insn);
void ProcessStrPair(Insn &insn);
void ProcessStr(Insn &insn);
void GenerateMoveLiveInsn(RegOperand &resRegOpnd, RegOperand &srcRegOpnd, Insn &ldrInsn, Insn &strInsn,
short memSeq);
void GenerateMoveDeadInsn(RegOperand &resRegOpnd, RegOperand &srcRegOpnd, Insn &ldrInsn, Insn &strInsn,
short memSeq);
bool HasMemBarrier(const Insn &ldrInsn, const Insn &strInsn) const;
bool IsAdjacentBB(Insn &defInsn, Insn &curInsn) const;
MapleAllocator localAlloc;
/* the max number of mov insn to optimize. */
static constexpr uint8 kMaxMovNum = 2;
MapleMap<Insn *, Insn *[kMaxMovNum]> str2MovMap;
MemPropMode propMode = kUndef;
uint32 amount = 0;
bool removeDefInsn = false;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_STRLDR_H */

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_UTILS_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_UTILS_H
#include "aarch64_cg.h"
#include "aarch64_operand.h"
#include "aarch64_cgfunc.h"
namespace maplebe {
/**
* Get or create new memory operand for load instruction loadIns for which
* machine opcode will be replaced with newLoadMop.
*
* @param loadIns load instruction
* @param newLoadMop new opcode for load instruction
* @return memory operand for new load machine opcode
* or nullptr if memory operand can't be obtained
*/
MemOperand *GetOrCreateMemOperandForNewMOP(CGFunc &cgFunc, const Insn &loadIns, MOperator newLoadMop);
} // namespace maplebe
#endif // MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_UTILS_H

View File

@ -0,0 +1,171 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_VALIDBIT_OPT_H
#define MAPLEBE_INCLUDE_CG_AARCH64_VALIDBIT_OPT_H
#include "cg_validbit_opt.h"
#include "operand.h"
#include "aarch64_cgfunc.h"
namespace maplebe {
class AArch64ValidBitOpt : public ValidBitOpt {
public:
AArch64ValidBitOpt(CGFunc &f, CGSSAInfo &info) : ValidBitOpt(f, info) {}
~AArch64ValidBitOpt() override = default;
void DoOpt(BB &bb, Insn &insn) override;
void SetValidBits(Insn &insn) override;
bool SetPhiValidBits(Insn &insn) override;
};
/*
* Example 1)
* def w9 def w9
* ... ...
* and w4, w9, #255 ===> mov w4, w9
*
* Example 2)
* and w6[16], w0[16], #FF00[16] mov w6, w0
* asr w6, w6[16], #8[4] ===> asr w6, w6
*/
class AndValidBitPattern : public ValidBitPattern {
public:
AndValidBitPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {}
~AndValidBitPattern() override
{
desReg = nullptr;
srcReg = nullptr;
}
void Run(BB &bb, Insn &insn) override;
bool CheckCondition(Insn &insn) override;
std::string GetPatternName() override
{
return "AndValidBitPattern";
}
private:
bool CheckImmValidBit(int64 andImm, uint32 andImmVB, int64 shiftImm) const;
MOperator newMop = MOP_undef;
RegOperand *desReg = nullptr;
RegOperand *srcReg = nullptr;
};
/*
* Example 1)
* uxth w1[16], w2[16] / uxtb w1[8], w2[8]
* ===>
* mov w1, w2
*
* Example 2)
* ubfx w1, w2[16], #0, #16 / sbfx w1, w2[16], #0, #16
* ===>
* mov w1, w2
*/
class ExtValidBitPattern : public ValidBitPattern {
public:
ExtValidBitPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {}
~ExtValidBitPattern() override
{
newDstOpnd = nullptr;
newSrcOpnd = nullptr;
}
void Run(BB &bb, Insn &insn) override;
bool CheckCondition(Insn &insn) override;
std::string GetPatternName() override
{
return "ExtValidBitPattern";
}
private:
RegOperand *newDstOpnd = nullptr;
RegOperand *newSrcOpnd = nullptr;
MOperator newMop = MOP_undef;
};
/*
* cmp w0, #0
* cset w1, NE --> mov w1, w0
*
* cmp w0, #0
* cset w1, EQ --> eor w1, w0, 1
*
* cmp w0, #1
* cset w1, NE --> eor w1, w0, 1
*
* cmp w0, #1
* cset w1, EQ --> mov w1, w0
*
* cmp w0, #0
* cset w0, NE -->null
*
* cmp w0, #1
* cset w0, EQ -->null
*
* condition:
* 1. the first operand of cmp instruction must has only one valid bit
* 2. the second operand of cmp instruction must be 0 or 1
* 3. flag register of cmp isntruction must not be used later
*/
class CmpCsetVBPattern : public ValidBitPattern {
public:
CmpCsetVBPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {}
~CmpCsetVBPattern() override
{
cmpInsn = nullptr;
}
void Run(BB &bb, Insn &csetInsn) override;
bool CheckCondition(Insn &csetInsn) override;
std::string GetPatternName() override
{
return "CmpCsetPattern";
};
private:
bool IsContinuousCmpCset(const Insn &curInsn);
bool OpndDefByOneValidBit(const Insn &defInsn);
Insn *cmpInsn = nullptr;
int64 cmpConstVal = -1;
};
/*
* cmp w0[16], #32768
* bge label ===> tbnz w0, #15, label
*
* bge / blt
*/
class CmpBranchesPattern : public ValidBitPattern {
public:
CmpBranchesPattern(CGFunc &cgFunc, CGSSAInfo &info) : ValidBitPattern(cgFunc, info) {}
~CmpBranchesPattern() override
{
prevCmpInsn = nullptr;
}
void Run(BB &bb, Insn &insn) override;
bool CheckCondition(Insn &insn) override;
std::string GetPatternName() override
{
return "CmpBranchesPattern";
};
private:
void SelectNewMop(MOperator mop);
Insn *prevCmpInsn = nullptr;
int64 newImmVal = -1;
MOperator newMop = MOP_undef;
bool is64Bit = false;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_VALIDBIT_OPT_H */

View File

@ -0,0 +1,37 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_YIELDPOINT_H
#define MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_YIELDPOINT_H
#include "yieldpoint.h"
namespace maplebe {
using namespace maple;
class AArch64YieldPointInsertion : public YieldPointInsertion {
public:
explicit AArch64YieldPointInsertion(CGFunc &func) : YieldPointInsertion(func) {}
~AArch64YieldPointInsertion() override = default;
void Run() override;
private:
void InsertYieldPoint();
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_AARCH64_YIELDPOINT_H */

View File

@ -0,0 +1,38 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_AARCH64_MPL_ATOMIC_H
#define MAPLEBE_INCLUDE_CG_AARCH64_MPL_ATOMIC_H
#include <cstdint>
#include <array>
#include "types_def.h"
namespace maple {
enum class MemOrd : uint32 {
kNotAtomic = 0,
#define ATTR(STR) STR,
#include "memory_order_attrs.def"
#undef ATTR
};
MemOrd MemOrdFromU32(uint32 val);
bool MemOrdIsAcquire(MemOrd ord);
bool MemOrdIsRelease(MemOrd ord);
} /* namespace maple */
#endif /* MAPLEBE_INCLUDE_CG_AARCH64_MPL_ATOMIC_H */

View File

@ -0,0 +1,40 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_ABI_H
#define MAPLEBE_INCLUDE_CG_ABI_H
#include <cstdint>
#include "types_def.h"
#include "operand.h"
namespace maplebe {
enum ArgumentClass : uint8 {
kNoClass,
kIntegerClass,
kFloatClass,
kPointerClass,
kVectorClass,
kMemoryClass,
kShortVectorClass,
kCompositeTypeHFAClass, /* Homegeneous Floating-point Aggregates for AArch64 */
kCompositeTypeHVAClass, /* Homegeneous Short-Vector Aggregates for AArch64 */
};
using regno_t = uint32_t;
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_ABI_H */

View File

@ -0,0 +1,122 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Abstract Maple Machine IR */
/* {mop, opnds, prop, latency, name, format, length} */
DEFINE_MOP(MOP_undef, {}, ISABSTRACT,0,"","",0)
/* conversion between all types and registers */
DEFINE_MOP(MOP_copy_ri_8, {&OpndDesc::Reg8ID,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_8","",1)
DEFINE_MOP(MOP_copy_rr_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS},ISABSTRACT|ISMOVE,0,"copy_rr_8","",1)
DEFINE_MOP(MOP_copy_ri_16, {&OpndDesc::Reg16ID,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_16","",1)
DEFINE_MOP(MOP_copy_rr_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS},ISABSTRACT|ISMOVE,0,"copy_rr_16","",1)
DEFINE_MOP(MOP_copy_ri_32, {&OpndDesc::Reg32ID,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_ri_32","",1)
DEFINE_MOP(MOP_copy_rr_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISABSTRACT|ISMOVE,0,"copy_rr_32","",1)
DEFINE_MOP(MOP_copy_ri_64, {&OpndDesc::Reg64ID,&OpndDesc::Imm64},ISABSTRACT|ISMOVE,0,"copy_ri_64","",1)
DEFINE_MOP(MOP_copy_rr_64, {&OpndDesc::Reg64ID, &OpndDesc::Reg64IS},ISABSTRACT|ISMOVE,0,"copy_rr_64","",1)
DEFINE_MOP(MOP_copy_fi_8, {&OpndDesc::Reg8FD,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_8","",1)
DEFINE_MOP(MOP_copy_ff_8, {&OpndDesc::Reg8FD,&OpndDesc::Reg8FS},ISABSTRACT|ISMOVE,0,"copy_ff_8","",1)
DEFINE_MOP(MOP_copy_fi_16, {&OpndDesc::Reg16FD,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_16","",1)
DEFINE_MOP(MOP_copy_ff_16, {&OpndDesc::Reg16FD,&OpndDesc::Reg16FS},ISABSTRACT|ISMOVE,0,"copy_ff_16","",1)
DEFINE_MOP(MOP_copy_fi_32, {&OpndDesc::Reg32FD,&OpndDesc::Imm32},ISABSTRACT|ISMOVE,0,"copy_fi_32","",1)
DEFINE_MOP(MOP_copy_ff_32, {&OpndDesc::Reg32FD,&OpndDesc::Reg32FS},ISABSTRACT|ISMOVE,0,"copy_ff_32","",1)
DEFINE_MOP(MOP_copy_fi_64, {&OpndDesc::Reg64FD,&OpndDesc::Imm64},ISABSTRACT|ISMOVE,0,"copy_fi_64","",1)
DEFINE_MOP(MOP_copy_ff_64, {&OpndDesc::Reg64FD, &OpndDesc::Reg64FS},ISABSTRACT|ISMOVE,0,"copy_ff_64","",1)
/* register extend */
DEFINE_MOP(MOP_zext_rr_16_8, {&OpndDesc::Reg16ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r16_r8","",1)
DEFINE_MOP(MOP_sext_rr_16_8, {&OpndDesc::Reg16ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r16_r8","",1)
DEFINE_MOP(MOP_zext_rr_32_8, {&OpndDesc::Reg32ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r32_r8","",1)
DEFINE_MOP(MOP_sext_rr_32_8, {&OpndDesc::Reg32ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r32_r8","",1)
DEFINE_MOP(MOP_zext_rr_32_16, {&OpndDesc::Reg32ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"zext_r32_r16","",1)
DEFINE_MOP(MOP_sext_rr_32_16, {&OpndDesc::Reg32ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"sext_r32_r16","",1)
DEFINE_MOP(MOP_zext_rr_64_8, {&OpndDesc::Reg64ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r8","",1)
DEFINE_MOP(MOP_sext_rr_64_8, {&OpndDesc::Reg64ID,&OpndDesc::Reg8IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r8","",1)
DEFINE_MOP(MOP_zext_rr_64_16, {&OpndDesc::Reg64ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r16","",1)
DEFINE_MOP(MOP_sext_rr_64_16, {&OpndDesc::Reg64ID,&OpndDesc::Reg16IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r16","",1)
DEFINE_MOP(MOP_zext_rr_64_32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"zext_r64_r32","",1)
DEFINE_MOP(MOP_sext_rr_64_32, {&OpndDesc::Reg64ID,&OpndDesc::Reg32IS},ISABSTRACT|ISCONVERSION,0,"sext_r64_r32","",1)
/* conversion between different kinds of registers */
DEFINE_MOP(MOP_cvt_rf_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32FS},ISABSTRACT|ISCONVERSION,0,"cvt_rf_32","",1)
/* Support transformation between memory and registers */
DEFINE_MOP(MOP_str_8, {&OpndDesc::Reg8IS,&OpndDesc::Mem8D},ISABSTRACT|ISSTORE,0,"str_8","",1)
DEFINE_MOP(MOP_str_16, {&OpndDesc::Reg16IS,&OpndDesc::Mem16D},ISABSTRACT|ISSTORE,0,"str_16","",1)
DEFINE_MOP(MOP_str_32, {&OpndDesc::Reg32IS,&OpndDesc::Mem32D},ISABSTRACT|ISSTORE,0,"str_32","",1)
DEFINE_MOP(MOP_str_64, {&OpndDesc::Reg64IS,&OpndDesc::Mem64D},ISABSTRACT|ISSTORE,0,"str_64","",1)
DEFINE_MOP(MOP_load_8, {&OpndDesc::Reg8ID,&OpndDesc::Mem8S},ISABSTRACT|ISLOAD,0,"load_8","",1)
DEFINE_MOP(MOP_load_16, {&OpndDesc::Reg16ID,&OpndDesc::Mem16S},ISABSTRACT|ISLOAD,0,"load_16","",1)
DEFINE_MOP(MOP_load_32, {&OpndDesc::Reg32ID,&OpndDesc::Mem32S},ISABSTRACT|ISLOAD,0,"load_32","",1)
DEFINE_MOP(MOP_load_64, {&OpndDesc::Reg64ID,&OpndDesc::Mem64S},ISABSTRACT|ISLOAD,0,"load_64","",1)
DEFINE_MOP(MOP_str_f_8, {&OpndDesc::Reg8FS,&OpndDesc::Mem8D},ISABSTRACT|ISSTORE,0,"str_f_8","",1)
DEFINE_MOP(MOP_str_f_16, {&OpndDesc::Reg16FS,&OpndDesc::Mem16D},ISABSTRACT|ISSTORE,0,"str_f_16","",1)
DEFINE_MOP(MOP_str_f_32, {&OpndDesc::Reg32FS,&OpndDesc::Mem32D},ISABSTRACT|ISSTORE,0,"str_f_32","",1)
DEFINE_MOP(MOP_str_f_64, {&OpndDesc::Reg64FS,&OpndDesc::Mem64D},ISABSTRACT|ISSTORE,0,"str_f_64","",1)
DEFINE_MOP(MOP_load_f_8, {&OpndDesc::Reg8FD,&OpndDesc::Mem8S},ISABSTRACT|ISLOAD,0,"load_f_8","",1)
DEFINE_MOP(MOP_load_f_16, {&OpndDesc::Reg16FD,&OpndDesc::Mem16S},ISABSTRACT|ISLOAD,0,"load_f_16","",1)
DEFINE_MOP(MOP_load_f_32, {&OpndDesc::Reg32FD,&OpndDesc::Mem32S},ISABSTRACT|ISLOAD,0,"load_f_32","",1)
DEFINE_MOP(MOP_load_f_64, {&OpndDesc::Reg64FD,&OpndDesc::Mem64S},ISABSTRACT|ISLOAD,0,"load_f_64","",1)
/* Support three address basic operations */
DEFINE_MOP(MOP_add_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"add_8","",1)
DEFINE_MOP(MOP_add_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"add_16","",1)
DEFINE_MOP(MOP_add_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"add_32","",1)
DEFINE_MOP(MOP_add_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"add_64","",1)
DEFINE_MOP(MOP_sub_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"sub_8","",1)
DEFINE_MOP(MOP_sub_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"sub_16","",1)
DEFINE_MOP(MOP_sub_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"sub_32","",1)
DEFINE_MOP(MOP_sub_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"sub_64","",1)
DEFINE_MOP(MOP_or_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"or_8","",1)
DEFINE_MOP(MOP_or_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"or_16","",1)
DEFINE_MOP(MOP_or_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"or_32","",1)
DEFINE_MOP(MOP_or_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"or_64","",1)
DEFINE_MOP(MOP_xor_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"xor_8","",1)
DEFINE_MOP(MOP_xor_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"xor_16","",1)
DEFINE_MOP(MOP_xor_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"xor_32","",1)
DEFINE_MOP(MOP_xor_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"xor_64","",1)
DEFINE_MOP(MOP_and_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISBASICOP,0,"and_8","",1)
DEFINE_MOP(MOP_and_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISBASICOP,0,"and_16","",1)
DEFINE_MOP(MOP_and_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISBASICOP,0,"and_32","",1)
DEFINE_MOP(MOP_and_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISBASICOP,0,"and_64","",1)
/* shift -- shl/ashr/lshr */
DEFINE_MOP(MOP_shl_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISSHIFT,0,"shl_8","",1)
DEFINE_MOP(MOP_shl_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISSHIFT,0,"shl_16","",1)
DEFINE_MOP(MOP_shl_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISSHIFT,0,"shl_32","",1)
DEFINE_MOP(MOP_shl_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISSHIFT,0,"shl_64","",1)
DEFINE_MOP(MOP_ashr_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISSHIFT,0,"ashr_8","",1)
DEFINE_MOP(MOP_ashr_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISSHIFT,0,"ashr_16","",1)
DEFINE_MOP(MOP_ashr_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISSHIFT,0,"ashr_32","",1)
DEFINE_MOP(MOP_ashr_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISSHIFT,0,"ashr_64","",1)
DEFINE_MOP(MOP_lshr_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS, &OpndDesc::Reg8IS},ISABSTRACT|ISSHIFT,0,"lshr_8","",1)
DEFINE_MOP(MOP_lshr_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS, &OpndDesc::Reg16IS},ISABSTRACT|ISSHIFT,0,"lshr_16","",1)
DEFINE_MOP(MOP_lshr_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS, &OpndDesc::Reg32IS},ISABSTRACT|ISSHIFT,0,"lshr_32","",1)
DEFINE_MOP(MOP_lshr_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS, &OpndDesc::Reg64IS},ISABSTRACT|ISSHIFT,0,"lshr_64","",1)
/* Support two address basic operations */
DEFINE_MOP(MOP_neg_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS},ISABSTRACT|ISUNARYOP,0,"neg_8","",1)
DEFINE_MOP(MOP_neg_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS},ISABSTRACT|ISUNARYOP,0,"neg_16","",1)
DEFINE_MOP(MOP_neg_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISABSTRACT|ISUNARYOP,0,"neg_32","",1)
DEFINE_MOP(MOP_neg_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISABSTRACT|ISUNARYOP,0,"neg_64","",1)
DEFINE_MOP(MOP_not_8, {&OpndDesc::Reg8ID,&OpndDesc::Reg8IS},ISABSTRACT|ISUNARYOP,0,"not_8","",1)
DEFINE_MOP(MOP_not_16, {&OpndDesc::Reg16ID,&OpndDesc::Reg16IS},ISABSTRACT|ISUNARYOP,0,"not_16","",1)
DEFINE_MOP(MOP_not_32, {&OpndDesc::Reg32ID,&OpndDesc::Reg32IS},ISABSTRACT|ISUNARYOP,0,"not_32","",1)
DEFINE_MOP(MOP_not_64, {&OpndDesc::Reg64ID,&OpndDesc::Reg64IS},ISABSTRACT|ISUNARYOP,0,"not_64","",1)
/* MOP_comment */
DEFINE_MOP(MOP_comment, {&OpndDesc::String0S},ISABSTRACT,0,"//","0", 0)

View File

@ -0,0 +1,99 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_ALIGNMENT_H
#define MAPLEBE_INCLUDE_CG_ALIGNMENT_H
#include "cg_phase.h"
#include "maple_phase.h"
#include "cgbb.h"
#include "loop.h"
namespace maplebe {
class AlignAnalysis {
public:
AlignAnalysis(CGFunc &func, MemPool &memP)
: cgFunc(&func),
alignAllocator(&memP),
loopHeaderBBs(alignAllocator.Adapter()),
jumpTargetBBs(alignAllocator.Adapter()),
alignInfos(alignAllocator.Adapter()),
sameTargetBranches(alignAllocator.Adapter())
{
}
virtual ~AlignAnalysis() = default;
void AnalysisAlignment();
void Dump();
virtual void FindLoopHeader() = 0;
virtual void FindJumpTarget() = 0;
virtual void ComputeLoopAlign() = 0;
virtual void ComputeJumpAlign() = 0;
virtual void ComputeCondBranchAlign() = 0;
/* filter condition */
virtual bool IsIncludeCall(BB &bb) = 0;
virtual bool IsInSizeRange(BB &bb) = 0;
virtual bool HasFallthruEdge(BB &bb) = 0;
std::string PhaseName() const
{
return "alignanalysis";
}
const MapleUnorderedSet<BB *> &GetLoopHeaderBBs() const
{
return loopHeaderBBs;
}
const MapleUnorderedSet<BB *> &GetJumpTargetBBs() const
{
return jumpTargetBBs;
}
const MapleUnorderedMap<BB *, uint32> &GetAlignInfos() const
{
return alignInfos;
}
uint32 GetAlignPower(BB &bb)
{
return alignInfos[&bb];
}
void InsertLoopHeaderBBs(BB &bb)
{
loopHeaderBBs.insert(&bb);
}
void InsertJumpTargetBBs(BB &bb)
{
jumpTargetBBs.insert(&bb);
}
void InsertAlignInfos(BB &bb, uint32 power)
{
alignInfos[&bb] = power;
}
protected:
CGFunc *cgFunc;
MapleAllocator alignAllocator;
MapleUnorderedSet<BB *> loopHeaderBBs;
MapleUnorderedSet<BB *> jumpTargetBBs;
MapleUnorderedMap<BB *, uint32> alignInfos;
MapleUnorderedMap<LabelIdx, uint32> sameTargetBranches;
};
MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgAlignAnalysis, maplebe::CGFunc)
MAPLE_FUNC_PHASE_DECLARE_END
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_ALIGNMENT_H */

View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_ARGS_H
#define MAPLEBE_INCLUDE_CG_ARGS_H
#include "cgfunc.h"
#include "cg_phase.h"
namespace maplebe {
class MoveRegArgs {
public:
explicit MoveRegArgs(CGFunc &func) : cgFunc(&func) {}
virtual ~MoveRegArgs() = default;
virtual void Run() {}
std::string PhaseName() const
{
return "moveargs";
}
const CGFunc *GetCGFunc() const
{
return cgFunc;
}
protected:
CGFunc *cgFunc;
};
MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgMoveRegArgs, maplebe::CGFunc)
MAPLE_FUNC_PHASE_DECLARE_END
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_ARGS_H */

View File

@ -0,0 +1,48 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_ASM_EMIT_H
#define MAPLEBE_INCLUDE_CG_ASM_EMIT_H
#include "emit.h"
namespace maplebe {
class AsmFuncEmitInfo : public FuncEmitInfo {
public:
explicit AsmFuncEmitInfo(CGFunc &func) : FuncEmitInfo(func) {}
virtual ~AsmFuncEmitInfo() = default;
};
class AsmEmitter : public Emitter {
protected:
AsmEmitter(CG &cg, const std::string &asmFileName) : Emitter(cg, asmFileName)
{
fileStream.open(asmFileName, std::ios::trunc);
}
virtual ~AsmEmitter() = default;
virtual void EmitRefToMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) = 0;
virtual void EmitRefToMethodInfo(FuncEmitInfo &funcEmitInfo, Emitter &emitter) = 0;
virtual void EmitMethodDesc(FuncEmitInfo &funcEmitInfo, Emitter &emitter) = 0;
virtual void EmitFastLSDA(FuncEmitInfo &funcEmitInfo) = 0;
virtual void EmitFullLSDA(FuncEmitInfo &funcEmitInfo) = 0;
virtual void EmitBBHeaderLabel(FuncEmitInfo &funcEmitInfo, const std::string &name, LabelIdx labIdx) = 0;
virtual void EmitJavaInsnAddr(FuncEmitInfo &funcEmitInfo) = 0;
virtual void Run(FuncEmitInfo &funcEmitInfo) = 0;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_ASM_EMIT_H */

View File

@ -0,0 +1,238 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_ASM_INFO_H
#define MAPLEBE_INCLUDE_CG_ASM_INFO_H
#include "maple_string.h"
namespace maplebe {
enum AsmLabel : uint8 {
kAsmGlbl,
kAsmLocal,
kAsmWeak,
kAsmBss,
kAsmComm,
kAsmData,
kAsmAlign,
kAsmSyname,
kAsmZero,
kAsmByte,
kAsmShort,
kAsmValue,
kAsmLong,
kAsmQuad,
kAsmSize,
kAsmType,
kAsmText,
kAsmHidden
};
class AsmInfo {
public:
const MapleString &GetCmnt() const
{
return asmCmnt;
}
const MapleString &GetAtobt() const
{
return asmAtObt;
}
const MapleString &GetFile() const
{
return asmFile;
}
const MapleString &GetSection() const
{
return asmSection;
}
const MapleString &GetRodata() const
{
return asmRodata;
}
const MapleString &GetGlobal() const
{
return asmGlobal;
}
const MapleString &GetLocal() const
{
return asmLocal;
}
const MapleString &GetWeak() const
{
return asmWeak;
}
const MapleString &GetBss() const
{
return asmBss;
}
const MapleString &GetComm() const
{
return asmComm;
}
const MapleString &GetData() const
{
return asmData;
}
const MapleString &GetAlign() const
{
return asmAlign;
}
const MapleString &GetZero() const
{
return asmZero;
}
const MapleString &GetByte() const
{
return asmByte;
}
const MapleString &GetShort() const
{
return asmShort;
}
const MapleString &GetValue() const
{
return asmValue;
}
const MapleString &GetLong() const
{
return asmLong;
}
const MapleString &GetQuad() const
{
return asmQuad;
}
const MapleString &GetSize() const
{
return asmSize;
}
const MapleString &GetType() const
{
return asmType;
}
const MapleString &GetHidden() const
{
return asmHidden;
}
const MapleString &GetText() const
{
return asmText;
}
const MapleString &GetSet() const
{
return asmSet;
}
const MapleString &GetWeakref() const
{
return asmWeakref;
}
explicit AsmInfo(MemPool &memPool)
#if TARGX86 || TARGX86_64
: asmCmnt("\t//\t", &memPool),
#elif TARGARM32
: asmCmnt("\t@\t", &memPool),
#else
: asmCmnt("\t#\t", &memPool),
#endif
asmAtObt("\t%object\t", &memPool),
asmFile("\t.file\t", &memPool),
asmSection("\t.section\t", &memPool),
asmRodata(".rodata\t", &memPool),
asmGlobal("\t.global\t", &memPool),
asmLocal("\t.local\t", &memPool),
asmWeak("\t.weak\t", &memPool),
asmBss("\t.bss\t", &memPool),
asmComm("\t.comm\t", &memPool),
asmData("\t.data\t", &memPool),
asmAlign("\t.align\t", &memPool),
asmZero("\t.zero\t", &memPool),
asmByte("\t.byte\t", &memPool),
asmShort("\t.short\t", &memPool),
#ifdef TARGARM32
asmValue("\t.short\t", &memPool),
#else
asmValue("\t.value\t", &memPool),
#endif
#ifdef TARGARM32
asmLong("\t.word\t", &memPool),
#else
asmLong("\t.long\t", &memPool),
#endif
asmQuad("\t.quad\t", &memPool),
asmSize("\t.size\t", &memPool),
asmType("\t.type\t", &memPool),
asmHidden("\t.hidden\t", &memPool),
asmText("\t.text\t", &memPool),
asmSet("\t.set\t", &memPool),
asmWeakref("\t.weakref\t", &memPool)
{
}
~AsmInfo() = default;
private:
MapleString asmCmnt;
MapleString asmAtObt;
MapleString asmFile;
MapleString asmSection;
MapleString asmRodata;
MapleString asmGlobal;
MapleString asmLocal;
MapleString asmWeak;
MapleString asmBss;
MapleString asmComm;
MapleString asmData;
MapleString asmAlign;
MapleString asmZero;
MapleString asmByte;
MapleString asmShort;
MapleString asmValue;
MapleString asmLong;
MapleString asmQuad;
MapleString asmSize;
MapleString asmType;
MapleString asmHidden;
MapleString asmText;
MapleString asmSet;
MapleString asmWeakref;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_ASM_INFO_H */

View File

@ -0,0 +1,300 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_CALL_CONV_H
#define MAPLEBE_INCLUDE_CG_CALL_CONV_H
#include "types_def.h"
#include "becommon.h"
namespace maplebe {
using namespace maple;
enum CallConvKind { kCCall, kWebKitJS, kGHC };
/* for specifying how a parameter is passed */
struct CCLocInfo {
regno_t reg0 = 0; /* 0 means parameter is stored on the stack */
regno_t reg1 = 0;
regno_t reg2 = 0; /* can have up to 4 single precision fp registers */
regno_t reg3 = 0; /* for small structure return. */
int32 memOffset = 0;
int32 memSize = 0;
uint32 fpSize = 0;
uint32 numFpPureRegs = 0;
uint8 regCount = 0; /* number of registers <= 2 storing the return value */
PrimType primTypeOfReg0; /* the primitive type stored in reg0 */
PrimType primTypeOfReg1; /* the primitive type stored in reg1 */
PrimType primTypeOfReg2;
PrimType primTypeOfReg3;
uint8 GetRegCount() const
{
return regCount;
}
PrimType GetPrimTypeOfReg0() const
{
return primTypeOfReg0;
}
PrimType GetPrimTypeOfReg1() const
{
return primTypeOfReg1;
}
PrimType GetPrimTypeOfReg2() const
{
return primTypeOfReg2;
}
PrimType GetPrimTypeOfReg3() const
{
return primTypeOfReg3;
}
regno_t GetReg0() const
{
return reg0;
}
regno_t GetReg1() const
{
return reg1;
}
regno_t GetReg2() const
{
return reg2;
}
regno_t GetReg3() const
{
return reg3;
}
void Dump()
{
std::cout << "reg: "
<< "[" << reg0 << "], "
<< "[" << reg1 << "]\n";
std::cout << "memBase: " << memOffset << " memSize: " << memSize << std::endl;
}
};
class LmbcFormalParamInfo {
public:
LmbcFormalParamInfo(PrimType pType, uint32 ofst, uint32 sz)
: type(nullptr),
primType(pType),
offset(ofst),
onStackOffset(0),
size(sz),
regNO(0),
vregNO(0),
numRegs(0),
fpSize(0),
isReturn(false),
isPureFloat(false),
isOnStack(false),
hasRegassign(false)
{
}
~LmbcFormalParamInfo() = default;
MIRStructType *GetType()
{
return type;
}
void SetType(MIRStructType *ty)
{
type = ty;
}
PrimType GetPrimType() const
{
return primType;
}
void SetPrimType(PrimType pType)
{
primType = pType;
}
uint32 GetOffset() const
{
return offset;
}
void SetOffset(uint32 ofs)
{
offset = ofs;
}
uint32 GetOnStackOffset() const
{
return onStackOffset;
}
void SetOnStackOffset(uint32 ofs)
{
onStackOffset = ofs;
}
uint32 GetSize() const
{
return size;
}
void SetSize(uint32 sz)
{
size = sz;
}
regno_t GetRegNO() const
{
return regNO;
}
void SetRegNO(regno_t reg)
{
regNO = reg;
}
regno_t GetVregNO() const
{
return vregNO;
}
void SetVregNO(regno_t reg)
{
vregNO = reg;
}
uint32 GetNumRegs() const
{
return numRegs;
}
void SetNumRegs(uint32 num)
{
numRegs = num;
}
uint32 GetFpSize() const
{
return fpSize;
}
void SetFpSize(uint32 sz)
{
fpSize = sz;
}
bool IsReturn() const
{
return isReturn;
}
void SetIsReturn()
{
isReturn = true;
}
bool IsPureFloat() const
{
return isPureFloat;
}
void SetIsPureFloat()
{
isPureFloat = true;
}
bool IsInReg() const
{
return !isOnStack;
}
bool IsOnStack() const
{
return isOnStack;
}
void SetIsOnStack()
{
isOnStack = true;
}
bool HasRegassign() const
{
return hasRegassign;
}
void SetHasRegassign()
{
hasRegassign = true;
}
private:
MIRStructType *type;
PrimType primType;
uint32 offset;
uint32 onStackOffset; /* stack location if isOnStack */
uint32 size; /* size primtype or struct */
regno_t regNO = 0; /* param reg num or starting reg num if numRegs > 0 */
regno_t vregNO = 0; /* if no explicit regassing from IR, create move from param reg */
uint32 numRegs = 0; /* number of regs for struct param */
uint32 fpSize = 0; /* size of fp param if isPureFloat */
bool isReturn;
bool isPureFloat = false;
bool isOnStack; /* large struct is passed by a copy on stack */
bool hasRegassign;
};
class CCImpl {
public:
CCImpl() = default;
~CCImpl() = default;
virtual int32 LocateNextParm(MIRType &mirType, CCLocInfo &pLoc, bool isFirst = false,
MIRFunction *func = nullptr) = 0;
virtual int32 LocateRetVal(MIRType &retType, CCLocInfo &ploc) = 0;
void InitCCLocInfo(CCLocInfo &pLoc) const
{
pLoc.reg0 = kInvalidRegNO;
pLoc.reg1 = kInvalidRegNO;
pLoc.reg2 = kInvalidRegNO;
pLoc.reg3 = kInvalidRegNO;
pLoc.memOffset = nextStackArgAdress;
pLoc.fpSize = 0;
pLoc.numFpPureRegs = 0;
return;
};
virtual void InitReturnInfo(MIRType &retTy, CCLocInfo &pLoc) = 0;
virtual void SetupSecondRetReg(const MIRType &retTy2, CCLocInfo &pLoc) const = 0;
virtual void Init() = 0;
static CallConvKind GetCallConvKind(MIRFunction &mirFunction)
{
if (mirFunction.GetAttr(FUNCATTR_ccall)) {
return kCCall;
} else if (mirFunction.GetAttr(FUNCATTR_webkitjscall)) {
return kWebKitJS;
} else if (mirFunction.GetAttr(FUNCATTR_ghcall)) {
return kGHC;
} else {
return kCCall;
}
}
static CallConvKind GetCallConvKind(StmtNode &node)
{
if (node.GetAttr(STMTATTR_ccall)) {
return kCCall;
} else if (node.GetAttr(STMTATTR_webkitjscall)) {
return kWebKitJS;
} else if (node.GetAttr(STMTATTR_ghcall)) {
return kGHC;
} else {
return kCCall;
}
}
protected:
int32 nextStackArgAdress = 0;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_CALL_CONV_H */

View File

@ -0,0 +1,142 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_CFGO_H
#define MAPLEBE_INCLUDE_CG_CFGO_H
#include "cg_cfg.h"
#include "optimize_common.h"
namespace maplebe {
class ChainingPattern : public OptimizationPattern {
public:
explicit ChainingPattern(CGFunc &func) : OptimizationPattern(func)
{
patternName = "BB Chaining";
dotColor = kCfgoChaining;
}
virtual ~ChainingPattern() = default;
bool Optimize(BB &curBB) override;
protected:
bool NoInsnBetween(const BB &from, const BB &to) const;
bool DoSameThing(const BB &bb1, const Insn &last1, const BB &bb2, const Insn &last2) const;
bool MergeFallthuBB(BB &curBB);
bool MergeGotoBB(BB &curBB, BB &sucBB);
bool MoveSuccBBAsCurBBNext(BB &curBB, BB &sucBB);
bool RemoveGotoInsn(BB &curBB, BB &sucBB);
bool ClearCurBBAndResetTargetBB(BB &curBB, BB &sucBB);
};
class SequentialJumpPattern : public OptimizationPattern {
public:
explicit SequentialJumpPattern(CGFunc &func) : OptimizationPattern(func)
{
patternName = "Sequential Jump";
dotColor = kCfgoSj;
}
virtual ~SequentialJumpPattern() = default;
bool Optimize(BB &curBB) override;
protected:
void SkipSucBB(BB &curBB, BB &sucBB);
void UpdateSwitchSucc(BB &curBB, BB &sucBB);
};
class FlipBRPattern : public OptimizationPattern {
public:
explicit FlipBRPattern(CGFunc &func) : OptimizationPattern(func)
{
patternName = "Condition Flip";
dotColor = kCfgoFlipCond;
}
virtual ~FlipBRPattern() = default;
bool Optimize(BB &curBB) override;
protected:
void RelocateThrowBB(BB &curBB);
private:
virtual uint32 GetJumpTargetIdx(const Insn &insn) = 0;
virtual MOperator FlipConditionOp(MOperator flippedOp) = 0;
};
/* This class represents the scenario that the BB is unreachable. */
class UnreachBBPattern : public OptimizationPattern {
public:
explicit UnreachBBPattern(CGFunc &func) : OptimizationPattern(func)
{
patternName = "Unreachable BB";
dotColor = kCfgoUnreach;
func.GetTheCFG()->FindAndMarkUnreachable(*cgFunc);
}
virtual ~UnreachBBPattern() = default;
bool Optimize(BB &curBB) override;
};
/*
* This class represents the scenario that a common jump BB can be duplicated
* to one of its another predecessor.
*/
class DuplicateBBPattern : public OptimizationPattern {
public:
explicit DuplicateBBPattern(CGFunc &func) : OptimizationPattern(func)
{
patternName = "Duplicate BB";
dotColor = kCfgoDup;
}
virtual ~DuplicateBBPattern() = default;
bool Optimize(BB &curBB) override;
private:
static constexpr int kThreshold = 10;
};
/*
* This class represents the scenario that a BB contains nothing.
*/
class EmptyBBPattern : public OptimizationPattern {
public:
explicit EmptyBBPattern(CGFunc &func) : OptimizationPattern(func)
{
patternName = "Empty BB";
dotColor = kCfgoEmpty;
}
virtual ~EmptyBBPattern() = default;
bool Optimize(BB &curBB) override;
};
class CFGOptimizer : public Optimizer {
public:
CFGOptimizer(CGFunc &func, MemPool &memPool) : Optimizer(func, memPool)
{
name = "CFGO";
}
virtual ~CFGOptimizer() = default;
};
MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgCfgo, maplebe::CGFunc)
MAPLE_FUNC_PHASE_DECLARE_END
MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPostCfgo, maplebe::CGFunc)
MAPLE_FUNC_PHASE_DECLARE_END
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_CFGO_H */

View File

@ -0,0 +1,54 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* Binutiles 2.28 */
/* https://sourceware.org/binutils/docs-2.28/as/CFI-directives.html#CFI-directives */
CFI_DEFINE( sections, , 1, List, Undef, Undef )
CFI_DEFINE( startproc, , 0, Undef, Undef, Undef )
CFI_DEFINE( startproc, _simple, 1, String, Undef, Undef ) /* "simple" */
CFI_DEFINE( endproc, , 0, Undef, Undef, Undef )
CFI_DEFINE( personality, _default, 1, Immediate, Undef, Undef )
CFI_DEFINE( personality, _symbol, 2, Immediate, String, Undef )
CFI_DEFINE( personality, _constant, 2, Immediate, Immediate, Undef )
CFI_DEFINE( personality_id, , 1, StImmediate, Undef, Undef )
CFI_DEFINE( fde_data, , 1, List, Undef, Undef )
CFI_DEFINE( lsda, _default, 1, Immediate, Undef, Undef )
CFI_DEFINE( lsda, _label, 2, Immediate, BBAddress, Undef )
CFI_DEFINE( lsda, _constant, 2, Immediate, Immediate, Undef )
CFI_DEFINE( inline_lsda, , 0, Undef, Undef, Undef )
CFI_DEFINE( inline_lsda, _align, 1, Immediate, Undef, Undef ) /* power of 2 */
CFI_DEFINE( def_cfa, , 2, Register, Immediate, Undef )
CFI_DEFINE( def_cfa_register, , 1, Register, Undef, Undef )
CFI_DEFINE( def_cfa_offset, , 1, Immediate, Undef, Undef )
CFI_DEFINE( adjust_cfa_offset, , 1, Immediate, Undef, Undef )
CFI_DEFINE( offset, , 2, Register, Immediate, Undef )
CFI_DEFINE( val_offset, , 2, Register, Immediate, Undef )
CFI_DEFINE( rel_offset, , 2, Register, Immediate, Undef )
CFI_DEFINE( register, , 2, Register, Register, Undef )
CFI_DEFINE( restore, , 1, Register, Undef, Undef )
CFI_DEFINE( undefined, , 1, Register, Undef, Undef )
CFI_DEFINE( same_value, , 1, Register, Undef, Undef )
CFI_DEFINE( remember_state, , 0, Undef, Undef, Undef )
CFI_DEFINE( restore_state, , 0, Undef, Undef, Undef )
CFI_DEFINE( return_column, , 1, Register, Undef, Undef )
CFI_DEFINE( signal_frame, , 0, Undef, Undef, Undef )
CFI_DEFINE( window_save, , 0, Undef, Undef, Undef )
CFI_DEFINE( escape, , 2, StImmediate, List /*expression[, ...]*/, Undef )
CFI_DEFINE( val_encoded_addr, , 3, Register, Immediate, StImmediate )
ARM_DIRECTIVES_DEFINE( save, , 1, List, Undef, Undef )
ARM_DIRECTIVES_DEFINE( vsave, , 1, List, Undef, Undef )
ARM_DIRECTIVES_DEFINE( setfp, , 3, Register, Register, Immediate )
ARM_DIRECTIVES_DEFINE( pad, , 1, Immediate, Undef, Undef )

View File

@ -0,0 +1,295 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_CFI_H
#define MAPLEBE_INCLUDE_CG_CFI_H
#include "insn.h"
#include "mempool_allocator.h"
#include "mir_symbol.h"
#include "operand.h"
#include "common_utils.h"
/*
* Reference:
* GNU Binutils. AS documentation
* https://sourceware.org/binutils/docs-2.28/as/index.html
*
* CFI blog
* https://www.imperialviolet.org/2017/01/18/cfi.html
*
* System V Application Binary Interface
* AMD64 Architecture Processor Supplement. Draft Version 0.99.7
* https://www.uclibc.org/docs/psABI-x86_64.pdf $ 3.7 Figure 3.36
* (RBP->6, RSP->7)
*
* System V Application Binary Interface
* Inte386 Architecture Processor Supplement. Version 1.0
* https://www.uclibc.org/docs/psABI-i386.pdf $ 2.5 Table 2.14
* (EBP->5, ESP->4)
*
* DWARF for ARM Architecture (ARM IHI 0040B)
* infocenter.arm.com/help/topic/com.arm.doc.ihi0040b/IHI0040B_aadwarf.pdf
* $ 3.1 Table 1
* (0-15 -> R0-R15)
*/
namespace cfi {
using namespace maple;
enum CfiOpcode : uint8 {
#define CFI_DEFINE(k, sub, n, o0, o1, o2) OP_CFI_##k##sub,
#define ARM_DIRECTIVES_DEFINE(k, sub, n, o0, o1, o2) OP_ARM_DIRECTIVES_##k##sub,
#include "cfi.def"
#undef CFI_DEFINE
#undef ARM_DIRECTIVES_DEFINE
kOpCfiLast
};
class CfiInsn : public maplebe::Insn {
public:
CfiInsn(MemPool &memPool, maplebe::MOperator op) : Insn(memPool, op) {}
CfiInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0) : Insn(memPool, op, opnd0) {}
CfiInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0, maplebe::Operand &opnd1)
: Insn(memPool, op, opnd0, opnd1)
{
}
CfiInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0, maplebe::Operand &opnd1,
maplebe::Operand &opnd2)
: Insn(memPool, op, opnd0, opnd1, opnd2)
{
}
~CfiInsn() = default;
bool IsMachineInstruction() const override
{
return false;
}
void Dump() const override;
#if DEBUG
void Check() const override;
#endif
bool IsCfiInsn() const override
{
return true;
}
bool IsTargetInsn() const override
{
return false;
}
bool IsRegDefined(maplebe::regno_t regNO) const override
{
CHECK_FATAL(false, "cfi do not def regs");
return false;
}
std::set<uint32> GetDefRegs() const override
{
CHECK_FATAL(false, "cfi do not def regs");
return std::set<uint32>();
}
uint32 GetBothDefUseOpnd() const override
{
return maplebe::kInsnMaxOpnd;
}
private:
CfiInsn &operator=(const CfiInsn &);
};
class RegOperand : public maplebe::OperandVisitable<RegOperand> {
public:
RegOperand(uint32 no, uint32 size) : OperandVisitable(kOpdRegister, size), regNO(no) {}
~RegOperand() = default;
using OperandVisitable<RegOperand>::OperandVisitable;
uint32 GetRegisterNO() const
{
return regNO;
}
Operand *Clone(MemPool &memPool) const override
{
Operand *opnd = memPool.Clone<RegOperand>(*this);
return opnd;
}
void Dump() const override;
bool Less(const Operand &right) const override
{
(void)right;
return false;
}
private:
uint32 regNO;
};
class ImmOperand : public maplebe::OperandVisitable<ImmOperand> {
public:
ImmOperand(int64 val, uint32 size) : OperandVisitable(kOpdImmediate, size), val(val) {}
~ImmOperand() = default;
using OperandVisitable<ImmOperand>::OperandVisitable;
Operand *Clone(MemPool &memPool) const override
{
Operand *opnd = memPool.Clone<ImmOperand>(*this);
return opnd;
}
int64 GetValue() const
{
return val;
}
void Dump() const override;
bool Less(const Operand &right) const override
{
(void)right;
return false;
}
private:
int64 val;
};
class SymbolOperand : public maplebe::OperandVisitable<SymbolOperand> {
public:
SymbolOperand(maple::MIRSymbol &mirSymbol, uint8 size) : OperandVisitable(kOpdStImmediate, size), symbol(&mirSymbol)
{
}
~SymbolOperand() = default;
using OperandVisitable<SymbolOperand>::OperandVisitable;
Operand *Clone(MemPool &memPool) const override
{
Operand *opnd = memPool.Clone<SymbolOperand>(*this);
return opnd;
}
bool Less(const Operand &right) const override
{
(void)right;
return false;
}
void Dump() const override
{
LogInfo::MapleLogger() << "symbol is : " << symbol->GetName();
}
private:
maple::MIRSymbol *symbol;
};
class StrOperand : public maplebe::OperandVisitable<StrOperand> {
public:
StrOperand(const std::string &str, MemPool &memPool) : OperandVisitable(kOpdString, 0), str(str, &memPool) {}
~StrOperand() = default;
using OperandVisitable<StrOperand>::OperandVisitable;
Operand *Clone(MemPool &memPool) const override
{
Operand *opnd = memPool.Clone<StrOperand>(*this);
return opnd;
}
bool Less(const Operand &right) const override
{
(void)right;
return false;
}
const MapleString &GetStr() const
{
return str;
}
void Dump() const override;
private:
const MapleString str;
};
class LabelOperand : public maplebe::OperandVisitable<LabelOperand> {
public:
LabelOperand(const std::string &parent, LabelIdx labIdx, MemPool &memPool)
: OperandVisitable(kOpdBBAddress, 0), parentFunc(parent, &memPool), labelIndex(labIdx)
{
}
~LabelOperand() = default;
using OperandVisitable<LabelOperand>::OperandVisitable;
Operand *Clone(MemPool &memPool) const override
{
Operand *opnd = memPool.Clone<LabelOperand>(*this);
return opnd;
}
bool Less(const Operand &right) const override
{
(void)right;
return false;
}
void Dump() const override;
const MapleString &GetParentFunc() const
{
return parentFunc;
}
LabelIdx GetIabelIdx() const
{
return labelIndex;
};
private:
const MapleString parentFunc;
LabelIdx labelIndex;
};
class CFIOpndEmitVisitor
: public maplebe::OperandVisitorBase,
public maplebe::OperandVisitors<RegOperand, ImmOperand, SymbolOperand, StrOperand, LabelOperand> {
public:
explicit CFIOpndEmitVisitor(maplebe::Emitter &asmEmitter) : emitter(asmEmitter) {}
virtual ~CFIOpndEmitVisitor() = default;
protected:
maplebe::Emitter &emitter;
private:
void Visit(RegOperand *v) final;
void Visit(ImmOperand *v) final;
void Visit(SymbolOperand *v) final;
void Visit(StrOperand *v) final;
void Visit(LabelOperand *v) final;
};
} /* namespace cfi */
#endif /* MAPLEBE_INCLUDE_CG_CFI_H */

View File

@ -0,0 +1,474 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_CG_H
#define MAPLEBE_INCLUDE_CG_CG_H
/* C++ headers. */
#include <cstddef>
#include <string>
/* MapleIR headers. */
#include "operand.h"
#include "insn.h"
#include "cgfunc.h"
#include "live.h"
#include "cg_option.h"
#include "opcode_info.h"
#include "global_tables.h"
#include "mir_function.h"
#include "mad.h"
namespace maplebe {
#define ADDTARGETPHASE(PhaseName, condition) \
if (!CGOptions::IsSkipPhase(PhaseName)) { \
pm->AddPhase(PhaseName, condition); \
}
/* subtarget opt phase -- cyclic Dependency, use Forward declaring */
class CGSSAInfo;
class PhiEliminate;
class DomAnalysis;
class CGProp;
class CGDce;
class AlignAnalysis;
class MoveRegArgs;
class MPISel;
class Standardize;
class LiveIntervalAnalysis;
class ValidBitOpt;
class CG;
class LocalOpt;
class CFGOptimizer;
class Globals {
public:
static Globals *GetInstance()
{
static Globals instance;
return &instance;
}
~Globals() = default;
void SetBECommon(BECommon &bc)
{
beCommon = &bc;
}
BECommon *GetBECommon()
{
return beCommon;
}
const BECommon *GetBECommon() const
{
return beCommon;
}
void SetMAD(MAD &m)
{
mad = &m;
}
MAD *GetMAD()
{
return mad;
}
const MAD *GetMAD() const
{
return mad;
}
void SetOptimLevel(int32 opLevel)
{
optimLevel = opLevel;
}
int32 GetOptimLevel() const
{
return optimLevel;
}
void SetTarget(CG &target);
const CG *GetTarget() const;
private:
BECommon *beCommon = nullptr;
MAD *mad = nullptr;
int32 optimLevel = 0;
CG *cg = nullptr;
Globals() = default;
};
class CG {
public:
using GenerateFlag = uint64;
public:
CG(MIRModule &mod, const CGOptions &cgOptions)
: memPool(memPoolCtrler.NewMemPool("maplecg mempool", false /* isLocalPool */)),
allocator(memPool),
mirModule(&mod),
emitter(nullptr),
labelOrderCnt(0),
cgOption(cgOptions),
instrumentationFunction(nullptr),
fileGP(nullptr)
{
const std::string &internalNameLiteral = namemangler::GetInternalNameLiteral(namemangler::kJavaLangObjectStr);
GStrIdx strIdxFromName = GlobalTables::GetStrTable().GetStrIdxFromName(internalNameLiteral);
isLibcore = (GlobalTables::GetGsymTable().GetSymbolFromStrIdx(strIdxFromName) != nullptr);
DefineDebugTraceFunctions();
isLmbc = (mirModule->GetFlavor() == MIRFlavor::kFlavorLmbc);
}
virtual ~CG();
/* enroll all code generator phases for target machine */
virtual void EnrollTargetPhases(MaplePhaseManager *pm) const = 0;
void GenExtraTypeMetadata(const std::string &classListFileName, const std::string &outputBaseName);
void GenPrimordialObjectList(const std::string &outputBaseName);
const std::string ExtractFuncName(const std::string &str);
virtual Insn &BuildPhiInsn(RegOperand &defOpnd, Operand &listParam) = 0;
virtual PhiOperand &CreatePhiOperand(MemPool &mp, MapleAllocator &mAllocator) = 0;
virtual CGFunc *CreateCGFunc(MIRModule &mod, MIRFunction &, BECommon &, MemPool &, StackMemPool &, MapleAllocator &,
uint32) = 0;
bool IsExclusiveEH() const
{
return CGOptions::IsExclusiveEH();
}
virtual bool IsExclusiveFunc(MIRFunction &mirFunc) = 0;
/* NOTE: Consider making be_common a field of CG. */
virtual void GenerateObjectMaps(BECommon &beCommon) = 0;
/* Used for GCTIB pattern merging */
virtual std::string FindGCTIBPatternName(const std::string &name) const = 0;
bool GenerateVerboseAsm() const
{
return cgOption.GenerateVerboseAsm();
}
bool GenerateVerboseCG() const
{
return cgOption.GenerateVerboseCG();
}
bool DoPrologueEpilogue() const
{
return cgOption.DoPrologueEpilogue();
}
bool DoTailCall() const
{
return cgOption.DoTailCall();
}
bool DoCheckSOE() const
{
return cgOption.DoCheckSOE();
}
bool GenerateDebugFriendlyCode() const
{
return cgOption.GenerateDebugFriendlyCode();
}
int32 GetOptimizeLevel() const
{
return cgOption.GetOptimizeLevel();
}
bool UseFastUnwind() const
{
return true;
}
bool IsStackProtectorStrong() const
{
return cgOption.IsStackProtectorStrong();
}
bool IsStackProtectorAll() const
{
return cgOption.IsStackProtectorAll();
}
bool NeedInsertInstrumentationFunction() const
{
return cgOption.NeedInsertInstrumentationFunction();
}
void SetInstrumentationFunction(const std::string &name);
const MIRSymbol *GetInstrumentationFunction() const
{
return instrumentationFunction;
}
bool InstrumentWithDebugTraceCall() const
{
return cgOption.InstrumentWithDebugTraceCall();
}
bool InstrumentWithProfile() const
{
return cgOption.InstrumentWithProfile();
}
bool DoPatchLongBranch() const
{
return cgOption.DoPatchLongBranch();
}
uint8 GetRematLevel() const
{
return CGOptions::GetRematLevel();
}
bool GenYieldPoint() const
{
return cgOption.GenYieldPoint();
}
bool GenLocalRC() const
{
return cgOption.GenLocalRC();
}
bool GenerateExceptionHandlingCode() const
{
return cgOption.GenerateExceptionHandlingCode();
}
bool DoConstFold() const
{
return cgOption.DoConstFold();
}
void AddStackGuardvar();
void DefineDebugTraceFunctions();
MIRModule *GetMIRModule()
{
return mirModule;
}
void SetEmitter(Emitter &emitter)
{
this->emitter = &emitter;
}
Emitter *GetEmitter() const
{
return emitter;
}
MIRModule *GetMIRModule() const
{
return mirModule;
}
void IncreaseLabelOrderCnt()
{
labelOrderCnt++;
}
LabelIDOrder GetLabelOrderCnt() const
{
return labelOrderCnt;
}
const CGOptions &GetCGOptions() const
{
return cgOption;
}
void UpdateCGOptions(const CGOptions &newOption)
{
cgOption.SetOptionFlag(newOption.GetOptionFlag());
}
bool IsLibcore() const
{
return isLibcore;
}
bool IsLmbc() const
{
return isLmbc;
}
MIRSymbol *GetDebugTraceEnterFunction()
{
return dbgTraceEnter;
}
const MIRSymbol *GetDebugTraceEnterFunction() const
{
return dbgTraceEnter;
}
MIRSymbol *GetProfileFunction()
{
return dbgFuncProfile;
}
const MIRSymbol *GetProfileFunction() const
{
return dbgFuncProfile;
}
const MIRSymbol *GetDebugTraceExitFunction() const
{
return dbgTraceExit;
}
/* Init SubTarget phase */
virtual LiveAnalysis *CreateLiveAnalysis(MemPool &mp, CGFunc &f) const
{
return nullptr;
};
virtual ReachingDefinition *CreateReachingDefinition(MemPool &mp, CGFunc &f) const
{
return nullptr;
};
virtual MoveRegArgs *CreateMoveRegArgs(MemPool &mp, CGFunc &f) const
{
return nullptr;
};
virtual AlignAnalysis *CreateAlignAnalysis(MemPool &mp, CGFunc &f) const
{
return nullptr;
};
virtual MPISel *CreateMPIsel(MemPool &mp, MapleAllocator &allocator, CGFunc &f) const
{
return nullptr;
}
virtual Standardize *CreateStandardize(MemPool &mp, CGFunc &f) const
{
return nullptr;
}
virtual ValidBitOpt *CreateValidBitOpt(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const
{
return nullptr;
}
/* Init SubTarget optimization */
virtual CGSSAInfo *CreateCGSSAInfo(MemPool &mp, CGFunc &f, DomAnalysis &da, MemPool &tmp) const
{
return nullptr;
};
virtual LiveIntervalAnalysis *CreateLLAnalysis(MemPool &mp, CGFunc &f) const
{
return nullptr;
};
virtual PhiEliminate *CreatePhiElimintor(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const
{
return nullptr;
};
virtual CGProp *CreateCGProp(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo, LiveIntervalAnalysis &ll) const
{
return nullptr;
};
virtual CGDce *CreateCGDce(MemPool &mp, CGFunc &f, CGSSAInfo &ssaInfo) const
{
return nullptr;
};
virtual LocalOpt *CreateLocalOpt(MemPool &mp, CGFunc &f, ReachingDefinition &) const
{
return nullptr;
};
virtual CFGOptimizer *CreateCFGOptimizer(MemPool &mp, CGFunc &f) const
{
return nullptr;
}
/* Object map generation helper */
std::vector<int64> GetReferenceOffsets64(const BECommon &beCommon, MIRStructType &structType);
void SetGP(MIRSymbol *sym)
{
fileGP = sym;
}
MIRSymbol *GetGP() const
{
return fileGP;
}
static bool IsInFuncWrapLabels(MIRFunction *func)
{
return funcWrapLabels.find(func) != funcWrapLabels.end();
}
static void SetFuncWrapLabels(MIRFunction *func, const std::pair<LabelIdx, LabelIdx> labels)
{
if (!IsInFuncWrapLabels(func)) {
funcWrapLabels[func] = labels;
}
}
static std::map<MIRFunction *, std::pair<LabelIdx, LabelIdx>> &GetFuncWrapLabels()
{
return funcWrapLabels;
}
static void SetCurCGFunc(CGFunc &cgFunc)
{
currentCGFunction = &cgFunc;
}
static const CGFunc *GetCurCGFunc()
{
return currentCGFunction;
}
static CGFunc *GetCurCGFuncNoConst()
{
return currentCGFunction;
}
virtual const InsnDesc &GetTargetMd(MOperator mOp) const = 0;
virtual bool IsEffectiveCopy(Insn &insn) const = 0;
virtual bool IsTargetInsn(MOperator mOp) const = 0;
virtual bool IsClinitInsn(MOperator mOp) const = 0;
virtual bool IsPseudoInsn(MOperator mOp) const = 0;
virtual void DumpTargetOperand(Operand &opnd, const OpndDesc &opndDesc) const = 0;
protected:
MemPool *memPool;
MapleAllocator allocator;
private:
MIRModule *mirModule;
Emitter *emitter;
LabelIDOrder labelOrderCnt;
static CGFunc *currentCGFunction; /* current cg function being compiled */
CGOptions cgOption;
MIRSymbol *instrumentationFunction;
MIRSymbol *dbgTraceEnter = nullptr;
MIRSymbol *dbgTraceExit = nullptr;
MIRSymbol *dbgFuncProfile = nullptr;
MIRSymbol *fileGP; /* for lmbc, one local %GP per file */
static std::map<MIRFunction *, std::pair<LabelIdx, LabelIdx>> funcWrapLabels;
bool isLibcore;
bool isLmbc;
}; /* class CG */
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_CG_H */

View File

@ -0,0 +1,133 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_CG_CFG_H
#define MAPLEBE_INCLUDE_CG_CG_CFG_H
#include "eh_func.h"
#include "cgbb.h"
namespace maplebe {
class InsnVisitor {
public:
explicit InsnVisitor(CGFunc &func) : cgFunc(&func) {}
virtual ~InsnVisitor() = default;
CGFunc *GetCGFunc() const
{
return cgFunc;
}
/*
* Precondition:
* The last instruction in bb is either conditional or unconditional jump.
*
* The jump target of bb is modified to the location specified by targetLabel.
*/
virtual void ModifyJumpTarget(LabelIdx targetLabel, BB &bb) = 0;
/*
* Precondition:
* The last instruction in bb is either conditional or unconditional jump.
*
* The jump target of bb is modified to the location specified by targetOperand.
*/
virtual void ModifyJumpTarget(Operand &targetOperand, BB &bb) = 0;
/*
* Precondition:
* The last instruction in bb is either a conditional or an unconditional jump.
* The last instruction in newTarget is an unconditional jump.
*
* The jump target of bb is modified to newTarget's jump target.
*/
virtual void ModifyJumpTarget(BB &newTarget, BB &bb) = 0;
/* Check if it requires to add extra gotos when relocate bb */
virtual Insn *CloneInsn(Insn &originalInsn) = 0;
/* Create a new virtual register operand which has the same type and size as the given one. */
virtual RegOperand *CreateVregFromReg(const RegOperand &reg) = 0;
virtual LabelIdx GetJumpLabel(const Insn &insn) const = 0;
virtual bool IsCompareInsn(const Insn &insn) const = 0;
virtual bool IsCompareAndBranchInsn(const Insn &insn) const = 0;
virtual bool IsAddOrSubInsn(const Insn &insn) const = 0;
private:
CGFunc *cgFunc;
}; /* class InsnVisitor; */
class CGCFG {
public:
explicit CGCFG(CGFunc &cgFunc) : cgFunc(&cgFunc) {}
~CGCFG() = default;
void BuildCFG();
void CheckCFG();
void CheckCFGFreq();
void InitInsnVisitor(CGFunc &func);
InsnVisitor *GetInsnModifier() const
{
return insnVisitor;
}
static bool AreCommentAllPreds(const BB &bb);
bool CanMerge(const BB &merger, const BB &mergee) const;
bool BBJudge(const BB &first, const BB &second) const;
/*
* Merge all instructions in mergee into merger, each BB's successors and
* predecessors should be modified accordingly.
*/
static void MergeBB(BB &merger, BB &mergee, CGFunc &func);
/*
* Remove a BB from its position in the CFG.
* Prev, next, preds and sucs are all modified accordingly.
*/
void RemoveBB(BB &curBB, bool isGotoIf = false);
/* Skip the successor of bb, directly jump to bb's successor'ssuccessor */
void RetargetJump(BB &srcBB, BB &targetBB);
/* Loop up if the given label is in the exception tables in LSDA */
static bool InLSDA(LabelIdx label, const EHFunc &ehFunc);
static bool InSwitchTable(LabelIdx label, const CGFunc &func);
RegOperand *CreateVregFromReg(const RegOperand &pReg);
Insn *CloneInsn(Insn &originalInsn);
static BB *GetTargetSuc(BB &curBB, bool branchOnly = false, bool isGotoIf = false);
bool IsCompareAndBranchInsn(const Insn &insn) const;
bool IsAddOrSubInsn(const Insn &insn) const;
Insn *FindLastCondBrInsn(BB &bb) const;
static void FindAndMarkUnreachable(CGFunc &func);
void FlushUnReachableStatusAndRemoveRelations(BB &bb, const CGFunc &func) const;
void MarkLabelTakenBB();
void UnreachCodeAnalysis();
void FindWillExitBBs(BB *bb, std::set<BB *, BBIdCmp> *visitedBBs);
void WontExitAnalysis();
BB *FindLastRetBB();
void UpdatePredsSuccsAfterSplit(BB &pred, BB &succ, BB &newBB);
void BreakCriticalEdge(BB &pred, BB &succ);
/* cgcfgvisitor */
private:
CGFunc *cgFunc = nullptr;
static InsnVisitor *insnVisitor;
static void MergeBB(BB &merger, BB &mergee);
}; /* class CGCFG */
MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgHandleCFG, maplebe::CGFunc)
MAPLE_FUNC_PHASE_DECLARE_END
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_CG_CFG_H */

View File

@ -0,0 +1,41 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_CRITICAL_EDGE_H
#define MAPLEBE_INCLUDE_CG_CRITICAL_EDGE_H
#include "cgbb.h"
#include "insn.h"
namespace maplebe {
class CriticalEdge {
public:
CriticalEdge(CGFunc &func, MemPool &mem) : cgFunc(&func), alloc(&mem), criticalEdges(alloc.Adapter()) {}
~CriticalEdge() = default;
void CollectCriticalEdges();
void SplitCriticalEdges();
private:
CGFunc *cgFunc;
MapleAllocator alloc;
MapleVector<std::pair<BB *, BB *>> criticalEdges;
};
MAPLE_FUNC_PHASE_DECLARE(CgCriticalEdge, maplebe::CGFunc)
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_CRITICAL_EDGE_H */

View File

@ -0,0 +1,62 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_DCE_H
#define MAPLEBE_INCLUDE_CG_DCE_H
#include "cgfunc.h"
#include "cg_ssa.h"
namespace maplebe {
/* dead code elimination*/
class CGDce {
public:
CGDce(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo) : memPool(&mp), cgFunc(&f), ssaInfo(&sInfo) {}
virtual ~CGDce() = default;
void DoDce();
/* provide public use in ssa opt */
virtual bool RemoveUnuseDef(VRegVersion &defVersion) = 0;
CGSSAInfo *GetSSAInfo()
{
return ssaInfo;
}
protected:
MemPool *memPool;
CGFunc *cgFunc;
CGSSAInfo *ssaInfo;
};
class DeleteRegUseVisitor : public OperandVisitorBase,
public OperandVisitors<RegOperand, ListOperand, MemOperand>,
public OperandVisitor<PhiOperand> {
public:
DeleteRegUseVisitor(CGSSAInfo &cgSSAInfo, uint32 dInsnID) : deleteInsnId(dInsnID), ssaInfo(&cgSSAInfo) {}
virtual ~DeleteRegUseVisitor() = default;
protected:
CGSSAInfo *GetSSAInfo()
{
return ssaInfo;
}
uint32 deleteInsnId;
private:
CGSSAInfo *ssaInfo;
};
MAPLE_FUNC_PHASE_DECLARE(CgDce, maplebe::CGFunc)
} // namespace maplebe
#endif /* MAPLEBE_INCLUDE_CG_DCE_H */

View File

@ -0,0 +1,323 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_DOM_H
#define MAPLEBE_INCLUDE_CG_DOM_H
#include "cg_phase.h"
#include "insn.h"
#include "cgbb.h"
#include "datainfo.h"
#include "maple_phase.h"
namespace maplebe {
class DominanceBase : public AnalysisResult {
public:
DominanceBase(CGFunc &func, MemPool &memPool, MemPool &tmpPool, MapleVector<BB *> &bbVec, BB &commonEntryBB,
BB &commonExitBB)
: AnalysisResult(&memPool),
domAllocator(&memPool),
tmpAllocator(&tmpPool),
bbVec(bbVec),
cgFunc(func),
commonEntryBB(commonEntryBB),
commonExitBB(commonExitBB)
{
}
~DominanceBase() override = default;
BB &GetCommonEntryBB() const
{
return commonEntryBB;
}
BB &GetCommonExitBB() const
{
return commonExitBB;
}
protected:
bool CommonEntryBBIsPred(const BB &bb) const;
MapleAllocator domAllocator; // stores the analysis results
MapleAllocator tmpAllocator; // can be freed after dominator computation
MapleVector<BB *> &bbVec;
CGFunc &cgFunc;
BB &commonEntryBB;
BB &commonExitBB;
};
class DomAnalysis : public DominanceBase {
public:
DomAnalysis(CGFunc &func, MemPool &memPool, MemPool &tmpPool, MapleVector<BB *> &bbVec, BB &commonEntryBB,
BB &commonExitBB)
: DominanceBase(func, memPool, tmpPool, bbVec, commonEntryBB, commonExitBB),
postOrderIDVec(bbVec.size() + 1, -1, tmpAllocator.Adapter()),
reversePostOrder(tmpAllocator.Adapter()),
doms(bbVec.size() + 1, nullptr, domAllocator.Adapter()),
domFrontier(bbVec.size() + 1, MapleVector<uint32>(domAllocator.Adapter()), domAllocator.Adapter()),
domChildren(bbVec.size() + 1, MapleVector<uint32>(domAllocator.Adapter()), domAllocator.Adapter()),
iterDomFrontier(bbVec.size() + 1, MapleSet<uint32>(domAllocator.Adapter()), domAllocator.Adapter()),
dtPreOrder(bbVec.size() + 1, 0, domAllocator.Adapter()),
dtDfn(bbVec.size() + 1, -1, domAllocator.Adapter()),
dtDfnOut(bbVec.size() + 1, -1, domAllocator.Adapter())
{
}
~DomAnalysis() override = default;
void Compute();
void Dump();
void GenPostOrderID();
void ComputeDominance();
void ComputeDomFrontiers();
void ComputeDomChildren();
void GetIterDomFrontier(const BB *bb, MapleSet<uint32> *dfset, uint32 bbidMarker, std::vector<bool> &visitedMap);
void ComputeIterDomFrontiers();
uint32 ComputeDtPreorder(const BB &bb, uint32 &num);
bool Dominate(const BB &bb1, const BB &bb2); // true if bb1 dominates bb2
MapleVector<BB *> &GetReversePostOrder()
{
return reversePostOrder;
}
MapleVector<uint32> &GetDtPreOrder()
{
return dtPreOrder;
}
uint32 GetDtPreOrderItem(size_t idx) const
{
return dtPreOrder[idx];
}
size_t GetDtPreOrderSize() const
{
return dtPreOrder.size();
}
uint32 GetDtDfnItem(size_t idx) const
{
return dtDfn[idx];
}
size_t GetDtDfnSize() const
{
return dtDfn.size();
}
BB *GetDom(uint32 id)
{
DEBUG_ASSERT(id < doms.size(), "bbid out of range");
return doms[id];
}
void SetDom(uint32 id, BB *bb)
{
DEBUG_ASSERT(id < doms.size(), "bbid out of range");
doms[id] = bb;
}
size_t GetDomsSize() const
{
return doms.size();
}
auto &GetDomFrontier(size_t idx)
{
return domFrontier[idx];
}
bool HasDomFrontier(uint32 id, uint32 frontier) const
{
return std::find(domFrontier[id].begin(), domFrontier[id].end(), frontier) != domFrontier[id].end();
}
size_t GetDomFrontierSize() const
{
return domFrontier.size();
}
auto &GetDomChildren()
{
return domChildren;
}
auto &GetDomChildren(size_t idx)
{
return domChildren[idx];
}
auto &GetIdomFrontier(uint32 idx)
{
return iterDomFrontier[idx];
}
size_t GetDomChildrenSize() const
{
return domChildren.size();
}
private:
void PostOrderWalk(const BB &bb, int32 &pid, MapleVector<bool> &visitedMap);
BB *Intersect(BB &bb1, const BB &bb2);
MapleVector<int32> postOrderIDVec; // index is bb id
MapleVector<BB *> reversePostOrder; // an ordering of the BB in reverse postorder
MapleVector<BB *> doms; // index is bb id; immediate dominator for each BB
MapleVector<MapleVector<uint32>> domFrontier; // index is bb id
MapleVector<MapleVector<uint32>> domChildren; // index is bb id; for dom tree
MapleVector<MapleSet<uint32>> iterDomFrontier;
MapleVector<uint32> dtPreOrder; // ordering of the BBs in a preorder traversal of the dominator tree
MapleVector<uint32> dtDfn; // gives position of each BB in dt_preorder
MapleVector<uint32> dtDfnOut; // max position of all nodes in the sub tree of each BB in dt_preorder
};
class PostDomAnalysis : public DominanceBase {
public:
PostDomAnalysis(CGFunc &func, MemPool &memPool, MemPool &tmpPool, MapleVector<BB *> &bbVec, BB &commonEntryBB,
BB &commonExitBB)
: DominanceBase(func, memPool, tmpPool, bbVec, commonEntryBB, commonExitBB),
pdomPostOrderIDVec(bbVec.size() + 1, -1, tmpAllocator.Adapter()),
pdomReversePostOrder(tmpAllocator.Adapter()),
pdoms(bbVec.size() + 1, nullptr, domAllocator.Adapter()),
pdomFrontier(bbVec.size() + 1, MapleVector<uint32>(domAllocator.Adapter()), domAllocator.Adapter()),
pdomChildren(bbVec.size() + 1, MapleVector<uint32>(domAllocator.Adapter()), domAllocator.Adapter()),
iterPdomFrontier(bbVec.size() + 1, MapleSet<uint32>(domAllocator.Adapter()), domAllocator.Adapter()),
pdtPreOrder(bbVec.size() + 1, 0, domAllocator.Adapter()),
pdtDfn(bbVec.size() + 1, -1, domAllocator.Adapter()),
pdtDfnOut(bbVec.size() + 1, -1, domAllocator.Adapter())
{
}
~PostDomAnalysis() override = default;
void Compute();
void PdomGenPostOrderID();
void ComputePostDominance();
void ComputePdomFrontiers();
void ComputePdomChildren();
void GetIterPdomFrontier(const BB *bb, MapleSet<uint32> *dfset, uint32 bbidMarker, std::vector<bool> &visitedMap);
void ComputeIterPdomFrontiers();
uint32 ComputePdtPreorder(const BB &bb, uint32 &num);
bool PostDominate(const BB &bb1, const BB &bb2); // true if bb1 postdominates bb2
void Dump();
auto &GetPdomFrontierItem(size_t idx)
{
return pdomFrontier[idx];
}
size_t GetPdomFrontierSize() const
{
return pdomFrontier.size();
}
auto &GetIpdomFrontier(uint32 idx)
{
return iterPdomFrontier[idx];
}
auto &GetPdomChildrenItem(size_t idx)
{
return pdomChildren[idx];
}
void ResizePdtPreOrder(size_t n)
{
pdtPreOrder.resize(n);
}
uint32 GetPdtPreOrderItem(size_t idx) const
{
return pdtPreOrder[idx];
}
size_t GetPdtPreOrderSize() const
{
return pdtPreOrder.size();
}
uint32 GetPdtDfnItem(size_t idx) const
{
return pdtDfn[idx];
}
int32 GetPdomPostOrderIDVec(size_t idx) const
{
return pdomPostOrderIDVec[idx];
}
BB *GetPdomReversePostOrder(size_t idx)
{
return pdomReversePostOrder[idx];
}
MapleVector<BB *> &GetPdomReversePostOrder()
{
return pdomReversePostOrder;
}
size_t GetPdomReversePostOrderSize() const
{
return pdomReversePostOrder.size();
}
bool HasPdomFrontier(uint32 id, uint32 frontier) const
{
return std::find(pdomFrontier[id].begin(), pdomFrontier[id].end(), frontier) != pdomFrontier[id].end();
}
BB *GetPdom(uint32 id)
{
DEBUG_ASSERT(id < pdoms.size(), "bbid out of range");
return pdoms[id];
}
void SetPdom(uint32 id, BB *bb)
{
DEBUG_ASSERT(id < pdoms.size(), "bbid out of range");
pdoms[id] = bb;
}
private:
void PdomPostOrderWalk(const BB &bb, int32 &pid, MapleVector<bool> &visitedMap);
BB *PdomIntersect(BB &bb1, const BB &bb2);
MapleVector<int32> pdomPostOrderIDVec; // index is bb id
MapleVector<BB *> pdomReversePostOrder; // an ordering of the BB in reverse postorder
MapleVector<BB *> pdoms; // index is bb id; immediate dominator for each BB
MapleVector<MapleVector<uint32>> pdomFrontier; // index is bb id
MapleVector<MapleVector<uint32>> pdomChildren; // index is bb id; for pdom tree
MapleVector<MapleSet<uint32>> iterPdomFrontier;
MapleVector<uint32> pdtPreOrder; // ordering of the BBs in a preorder traversal of the post-dominator tree
MapleVector<uint32> pdtDfn; // gives position of each BB in pdt_preorder
MapleVector<uint32> pdtDfnOut; // max position of all nodes in the sub tree of each BB in pdt_preorder
};
MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgDomAnalysis, maplebe::CGFunc);
DomAnalysis *GetResult()
{
return domAnalysis;
}
DomAnalysis *domAnalysis = nullptr;
MAPLE_FUNC_PHASE_DECLARE_END
MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgPostDomAnalysis, maplebe::CGFunc);
PostDomAnalysis *GetResult()
{
return pdomAnalysis;
}
PostDomAnalysis *pdomAnalysis = nullptr;
MAPLE_FUNC_PHASE_DECLARE_END
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_DOM_H */

View File

@ -0,0 +1,96 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_IRBUILDER_H
#define MAPLEBE_INCLUDE_CG_IRBUILDER_H
#include "insn.h"
#include "operand.h"
namespace maplebe {
class InsnBuilder {
public:
explicit InsnBuilder(MemPool &memPool) : mp(&memPool) {}
virtual ~InsnBuilder()
{
mp = nullptr;
}
template <class Target>
Insn &BuildInsn(MOperator opCode)
{
return BuildInsn(opCode, Target::kMd[opCode]);
}
Insn &BuildInsn(MOperator opCode, const InsnDesc &idesc);
Insn &BuildInsn(MOperator opCode, Operand &o0);
Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1);
Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2);
Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3);
Insn &BuildInsn(MOperator opCode, Operand &o0, Operand &o1, Operand &o2, Operand &o3, Operand &o4);
Insn &BuildInsn(MOperator opCode, std::vector<Operand *> &opnds);
Insn &BuildCfiInsn(MOperator opCode);
Insn &BuildDbgInsn(MOperator opCode);
VectorInsn &BuildVectorInsn(MOperator opCode, const InsnDesc &idesc);
uint32 GetCreatedInsnNum() const
{
return createdInsnNum;
}
protected:
MemPool *mp;
private:
void IncreaseInsnNum()
{
createdInsnNum++;
}
uint32 createdInsnNum = 0;
};
constexpr uint32 baseVirtualRegNO = 200; /* avoid conflicts between virtual and physical */
class OperandBuilder {
public:
explicit OperandBuilder(MemPool &mp, uint32 mirPregNum = 0) : alloc(&mp), virtualRegNum(mirPregNum) {}
/* create an operand in cgfunc when no mempool is supplied */
ImmOperand &CreateImm(uint32 size, int64 value, MemPool *mp = nullptr);
ImmOperand &CreateImm(const MIRSymbol &symbol, int64 offset, int32 relocs, MemPool *mp = nullptr);
MemOperand &CreateMem(uint32 size, MemPool *mp = nullptr);
MemOperand &CreateMem(RegOperand &baseOpnd, int64 offset, uint32 size);
RegOperand &CreateVReg(uint32 size, RegType type, MemPool *mp = nullptr);
RegOperand &CreateVReg(regno_t vRegNO, uint32 size, RegType type, MemPool *mp = nullptr);
RegOperand &CreatePReg(regno_t pRegNO, uint32 size, RegType type, MemPool *mp = nullptr);
ListOperand &CreateList(MemPool *mp = nullptr);
FuncNameOperand &CreateFuncNameOpnd(MIRSymbol &symbol, MemPool *mp = nullptr);
LabelOperand &CreateLabel(const char *parent, LabelIdx idx, MemPool *mp = nullptr);
CommentOperand &CreateComment(const std::string &s, MemPool *mp = nullptr);
CommentOperand &CreateComment(const MapleString &s, MemPool *mp = nullptr);
uint32 GetCurrentVRegNum() const
{
return virtualRegNum;
}
protected:
MapleAllocator alloc;
private:
uint32 virtualRegNum = 0;
/* reg bank for multiple use */
};
} // namespace maplebe
#endif // MAPLEBE_INCLUDE_CG_IRBUILDER_H

View File

@ -0,0 +1,568 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_CG_INCLUDE_CGOCCUR_H
#define MAPLEBE_CG_INCLUDE_CGOCCUR_H
#include "cg_dominance.h"
// the data structures that represent occurrences and work candidates for PRE
namespace maplebe {
enum OccType {
kOccUndef,
kOccReal,
kOccDef,
kOccStore,
kOccPhiocc,
kOccPhiopnd,
kOccExit,
kOccUse, // for use appearances when candidate is dassign
kOccMembar, // for representing occurrence of memory barriers (use CgRealOcc)
};
class CgOccur {
public:
CgOccur(OccType ty, BB *bb, Insn *insn, Operand *opnd) : occTy(ty), cgBB(bb), insn(insn), opnd(opnd) {}
CgOccur(OccType ty, int cId, BB &bb, CgOccur *df) : occTy(ty), classID(cId), cgBB(&bb), def(df) {}
virtual ~CgOccur() = default;
bool IsDominate(DomAnalysis &dom, CgOccur &occ);
const BB *GetBB() const
{
return cgBB;
}
BB *GetBB()
{
return cgBB;
}
void SetBB(BB &bb)
{
cgBB = &bb;
}
OccType GetOccType() const
{
return occTy;
}
int GetClassID() const
{
return classID;
}
void SetClassID(int id)
{
classID = id;
}
const CgOccur *GetDef() const
{
return def;
}
CgOccur *GetDef()
{
return def;
}
void SetDef(CgOccur *define)
{
def = define;
}
const Insn *GetInsn() const
{
return insn;
}
Insn *GetInsn()
{
return insn;
}
const Operand *GetOperand() const
{
return opnd;
}
Operand *GetOperand()
{
return opnd;
}
bool Processed() const
{
return processed;
}
void SetProcessed(bool val)
{
processed = val;
}
virtual CgOccur *GetPrevVersionOccur()
{
CHECK_FATAL(false, "has no prev version occur");
}
virtual void SetPrevVersionOccur(CgOccur *)
{
CHECK_FATAL(false, "has no prev version occur");
}
virtual void Dump() const
{
if (occTy == kOccExit) {
LogInfo::MapleLogger() << "ExitOcc at bb " << GetBB()->GetId() << std::endl;
}
};
private:
OccType occTy = kOccUndef; // kinds of occ
int classID = 0; // class id
BB *cgBB = nullptr; // the BB it occurs in
Insn *insn = nullptr;
Operand *opnd = nullptr;
CgOccur *def = nullptr;
bool processed = false;
};
class CgUseOcc : public CgOccur {
public:
CgUseOcc(BB *bb, Insn *insn, Operand *opnd) : CgOccur(kOccUse, bb, insn, opnd), needReload(false) {}
~CgUseOcc() = default;
bool Reload() const
{
return needReload;
}
void SetReload(bool val)
{
needReload = val;
}
CgOccur *GetPrevVersionOccur() override
{
return prevVersion;
}
void SetPrevVersionOccur(CgOccur *val) override
{
prevVersion = val;
}
void Dump() const override
{
LogInfo::MapleLogger() << "UseOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": "
<< (needReload ? "need-reload, " : "not need-reload, ") << "\n";
}
private:
bool needReload = false;
CgOccur *prevVersion = nullptr;
};
class CgStoreOcc : public CgOccur {
public:
CgStoreOcc(BB *bb, Insn *insn, Operand *opnd) : CgOccur(kOccStore, bb, insn, opnd) {}
~CgStoreOcc() = default;
bool Reload() const
{
return needReload;
}
void SetReload(bool val)
{
needReload = val;
}
CgOccur *GetPrevVersionOccur() override
{
return prevVersion;
}
void SetPrevVersionOccur(CgOccur *val) override
{
prevVersion = val;
}
void Dump() const override
{
LogInfo::MapleLogger() << "StoreOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": "
<< (needReload ? "reload, " : "not reload, ") << "\n";
}
private:
bool needReload = false;
CgOccur *prevVersion = nullptr;
};
class CgDefOcc : public CgOccur {
public:
CgDefOcc(BB *bb, Insn *insn, Operand *opnd) : CgOccur(kOccDef, bb, insn, opnd) {}
~CgDefOcc() = default;
bool Loaded() const
{
return needStore;
}
void SetLoaded(bool val)
{
needStore = val;
}
CgOccur *GetPrevVersionOccur() override
{
return prevVersion;
}
void SetPrevVersionOccur(CgOccur *val) override
{
prevVersion = val;
}
void Dump() const override
{
LogInfo::MapleLogger() << "DefOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": "
<< (needStore ? "store" : "not store") << "\n";
}
private:
bool needStore = false;
CgOccur *prevVersion = nullptr;
};
class CgPhiOpndOcc;
enum AvailState { kFullyAvailable, kPartialAvailable, kNotAvailable };
class CgPhiOcc : public CgOccur {
public:
CgPhiOcc(BB &bb, Operand *opnd, MapleAllocator &alloc)
: CgOccur(kOccPhiocc, 0, bb, nullptr), regOpnd(opnd), isDownSafe(!bb.IsCatch()), phiOpnds(alloc.Adapter())
{
}
virtual ~CgPhiOcc() = default;
bool IsDownSafe() const
{
return isDownSafe;
}
void SetIsDownSafe(bool downSafe)
{
isDownSafe = downSafe;
}
const MapleVector<CgPhiOpndOcc *> &GetPhiOpnds() const
{
return phiOpnds;
}
MapleVector<CgPhiOpndOcc *> &GetPhiOpnds()
{
return phiOpnds;
}
Operand *GetOpnd()
{
return regOpnd;
}
CgPhiOpndOcc *GetPhiOpnd(size_t idx)
{
DEBUG_ASSERT(idx < phiOpnds.size(), "out of range in CgPhiOcc::GetPhiOpnd");
return phiOpnds.at(idx);
}
const CgPhiOpndOcc *GetPhiOpnd(size_t idx) const
{
DEBUG_ASSERT(idx < phiOpnds.size(), "out of range in CgPhiOcc::GetPhiOpnd");
return phiOpnds.at(idx);
}
void AddPhiOpnd(CgPhiOpndOcc &opnd)
{
phiOpnds.push_back(&opnd);
}
CgOccur *GetPrevVersionOccur() override
{
return prevVersion;
}
void SetPrevVersionOccur(CgOccur *val) override
{
prevVersion = val;
}
bool IsFullyAvailable() const
{
return availState == kFullyAvailable;
}
bool IsPartialAvailable() const
{
return availState == kPartialAvailable;
}
bool IsNotAvailable() const
{
return availState == kNotAvailable;
}
void SetAvailability(AvailState val)
{
availState = val;
}
void Dump() const override
{
LogInfo::MapleLogger() << "PhiOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": "
<< (isDownSafe ? "downsafe, " : "not downsafe, ")
<< (availState == kNotAvailable
? "not avail"
: (availState == kPartialAvailable ? "part avail" : "fully avail"))
<< "\n";
}
private:
Operand *regOpnd;
bool isDownSafe = true; // default is true
AvailState availState = kFullyAvailable;
MapleVector<CgPhiOpndOcc *> phiOpnds;
CgOccur *prevVersion = nullptr;
};
class CgPhiOpndOcc : public CgOccur {
public:
CgPhiOpndOcc(BB *bb, Operand *opnd, CgPhiOcc *defPhi)
: CgOccur(kOccPhiopnd, bb, nullptr, opnd), hasRealUse(false), phiOcc(defPhi)
{
}
~CgPhiOpndOcc() = default;
bool HasRealUse() const
{
return hasRealUse;
}
void SetHasRealUse(bool realUse)
{
hasRealUse = realUse;
}
const CgPhiOcc *GetPhiOcc() const
{
return phiOcc;
}
CgPhiOcc *GetPhiOcc()
{
return phiOcc;
}
void SetPhiOcc(CgPhiOcc &occ)
{
phiOcc = &occ;
}
bool Reload() const
{
return reload;
}
void SetReload(bool val)
{
reload = val;
}
void Dump() const override
{
LogInfo::MapleLogger() << "PhiOpndOcc " << GetClassID() << " at bb " << GetBB()->GetId() << ": "
<< (hasRealUse ? "hasRealUse, " : "not hasRealUse, ")
<< (reload ? "reload" : "not reload") << std::endl;
}
private:
bool hasRealUse;
bool reload = false;
CgPhiOcc *phiOcc = nullptr; // its lhs
};
// each singly linked list represents each bucket in workCandHashTable
class PreWorkCand {
public:
PreWorkCand(MapleAllocator &alloc, Operand *curOpnd, PUIdx pIdx)
: next(nullptr),
allOccs(alloc.Adapter()),
realOccs(alloc.Adapter()),
phiOccs(alloc.Adapter()),
theOperand(curOpnd),
puIdx(pIdx),
redo2HandleCritEdges(false)
{
DEBUG_ASSERT(pIdx != 0, "PreWorkCand: initial puIdx cannot be 0");
}
virtual ~PreWorkCand() = default;
void AddRealOccAsLast(CgOccur &occ, PUIdx pIdx)
{
realOccs.push_back(&occ); // add as last
DEBUG_ASSERT(pIdx != 0, "puIdx of realocc cannot be 0");
if (pIdx != puIdx) {
puIdx = 0;
}
}
const PreWorkCand *GetNext() const
{
return next;
}
PreWorkCand *GetNext()
{
return next;
}
void SetNext(PreWorkCand &workCand)
{
next = &workCand;
}
int32 GetIndex() const
{
return index;
}
void SetIndex(int idx)
{
index = idx;
}
const MapleVector<CgOccur *> &GetRealOccs() const
{
return realOccs;
}
MapleVector<CgOccur *> &GetRealOccs()
{
return realOccs;
}
const CgOccur *GetRealOcc(size_t idx) const
{
DEBUG_ASSERT(idx < realOccs.size(), "out of range in PreWorkCand::GetRealOccAt");
return realOccs.at(idx);
}
CgOccur *GetRealOcc(size_t idx)
{
DEBUG_ASSERT(idx < realOccs.size(), "out of range in PreWorkCand::GetRealOccAt");
return realOccs.at(idx);
}
const MapleVector<CgPhiOcc *> &PhiOccs() const
{
return phiOccs;
}
MapleVector<CgPhiOcc *> &PhiOccs()
{
return phiOccs;
}
const Operand *GetTheOperand() const
{
return theOperand;
}
Operand *GetTheOperand()
{
return theOperand;
}
void SetTheOperand(Operand &expr)
{
theOperand = &expr;
}
PUIdx GetPUIdx() const
{
return puIdx;
}
void SetPUIdx(PUIdx idx)
{
puIdx = idx;
}
bool Redo2HandleCritEdges() const
{
return redo2HandleCritEdges;
}
void SetRedo2HandleCritEdges(bool redo)
{
redo2HandleCritEdges = redo;
}
private:
PreWorkCand *next;
int32 index = 0;
MapleVector<CgOccur *> allOccs;
MapleVector<CgOccur *> realOccs; // maintained in order of dt_preorder
MapleVector<CgPhiOcc *> phiOccs;
Operand *theOperand; // the expression of this workcand
PUIdx puIdx; // if 0, its occ span multiple PUs; initial value must
// puIdx cannot be 0 if hasLocalOpnd is true
bool redo2HandleCritEdges : 1; // redo to make critical edges affect canbevail
};
class PreWorkCandHashTable {
public:
static const uint32 workCandHashLength = 229;
static uint32 ComputeWorkCandHashIndex(const Operand &opnd);
static uint32 ComputeStmtWorkCandHashIndex(const Insn &insn);
PreWorkCandHashTable() = default;
~PreWorkCandHashTable() = default;
std::array<PreWorkCand *, workCandHashLength> &GetWorkcandHashTable()
{
return workCandHashTable;
}
PreWorkCand *GetWorkcandFromIndex(size_t idx)
{
return workCandHashTable[idx];
}
void SetWorkCandAt(size_t idx, PreWorkCand &workCand)
{
workCandHashTable[idx] = &workCand;
}
private:
std::array<PreWorkCand *, workCandHashLength> workCandHashTable;
};
} // namespace maplebe
#endif // MAPLEBE_CG_INCLUDE_CGOCCUR_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,137 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLE_BE_INCLUDE_CG_OPTIONS_H
#define MAPLE_BE_INCLUDE_CG_OPTIONS_H
#include "cl_option.h"
#include "cl_parser.h"
#include <stdint.h>
#include <string>
namespace opts::cg {
extern maplecl::Option<bool> pie;
extern maplecl::Option<bool> fpic;
extern maplecl::Option<bool> verboseAsm;
extern maplecl::Option<bool> verboseCg;
extern maplecl::Option<bool> maplelinker;
extern maplecl::Option<bool> quiet;
extern maplecl::Option<bool> cg;
extern maplecl::Option<bool> replaceAsm;
extern maplecl::Option<bool> generalRegOnly;
extern maplecl::Option<bool> lazyBinding;
extern maplecl::Option<bool> hotFix;
extern maplecl::Option<bool> ebo;
extern maplecl::Option<bool> cfgo;
extern maplecl::Option<bool> ico;
extern maplecl::Option<bool> storeloadopt;
extern maplecl::Option<bool> globalopt;
extern maplecl::Option<bool> hotcoldsplit;
extern maplecl::Option<bool> prelsra;
extern maplecl::Option<bool> lsraLvarspill;
extern maplecl::Option<bool> lsraOptcallee;
extern maplecl::Option<bool> calleeregsPlacement;
extern maplecl::Option<bool> ssapreSave;
extern maplecl::Option<bool> ssupreRestore;
extern maplecl::Option<bool> prepeep;
extern maplecl::Option<bool> peep;
extern maplecl::Option<bool> preschedule;
extern maplecl::Option<bool> schedule;
extern maplecl::Option<bool> retMerge;
extern maplecl::Option<bool> vregRename;
extern maplecl::Option<bool> fullcolor;
extern maplecl::Option<bool> writefieldopt;
extern maplecl::Option<bool> dumpOlog;
extern maplecl::Option<bool> nativeopt;
extern maplecl::Option<bool> objmap;
extern maplecl::Option<bool> yieldpoint;
extern maplecl::Option<bool> proepilogue;
extern maplecl::Option<bool> localRc;
extern maplecl::Option<std::string> insertCall;
extern maplecl::Option<bool> addDebugTrace;
extern maplecl::Option<bool> addFuncProfile;
extern maplecl::Option<std::string> classListFile;
extern maplecl::Option<bool> genCMacroDef;
extern maplecl::Option<bool> genGctibFile;
extern maplecl::Option<bool> stackProtectorStrong;
extern maplecl::Option<bool> stackProtectorAll;
extern maplecl::Option<bool> debug;
extern maplecl::Option<bool> gdwarf;
extern maplecl::Option<bool> gsrc;
extern maplecl::Option<bool> gmixedsrc;
extern maplecl::Option<bool> gmixedasm;
extern maplecl::Option<bool> profile;
extern maplecl::Option<bool> withRaLinearScan;
extern maplecl::Option<bool> withRaGraphColor;
extern maplecl::Option<bool> patchLongBranch;
extern maplecl::Option<bool> constFold;
extern maplecl::Option<std::string> ehExclusiveList;
extern maplecl::Option<bool> o0;
extern maplecl::Option<bool> o1;
extern maplecl::Option<bool> o2;
extern maplecl::Option<bool> os;
extern maplecl::Option<bool> olitecg;
extern maplecl::Option<uint64_t> lsraBb;
extern maplecl::Option<uint64_t> lsraInsn;
extern maplecl::Option<uint64_t> lsraOverlap;
extern maplecl::Option<uint8_t> remat;
extern maplecl::Option<bool> suppressFileinfo;
extern maplecl::Option<bool> dumpCfg;
extern maplecl::Option<std::string> target;
extern maplecl::Option<std::string> dumpPhases;
extern maplecl::Option<std::string> skipPhases;
extern maplecl::Option<std::string> skipFrom;
extern maplecl::Option<std::string> skipAfter;
extern maplecl::Option<std::string> dumpFunc;
extern maplecl::Option<bool> timePhases;
extern maplecl::Option<bool> useBarriersForVolatile;
extern maplecl::Option<std::string> range;
extern maplecl::Option<uint8_t> fastAlloc;
extern maplecl::Option<std::string> spillRange;
extern maplecl::Option<bool> dupBb;
extern maplecl::Option<bool> calleeCfi;
extern maplecl::Option<bool> printFunc;
extern maplecl::Option<std::string> cyclePatternList;
extern maplecl::Option<std::string> duplicateAsmList;
extern maplecl::Option<std::string> duplicateAsmList2;
extern maplecl::Option<std::string> blockMarker;
extern maplecl::Option<bool> soeCheck;
extern maplecl::Option<bool> checkArraystore;
extern maplecl::Option<bool> debugSchedule;
extern maplecl::Option<bool> bruteforceSchedule;
extern maplecl::Option<bool> simulateSchedule;
extern maplecl::Option<bool> crossLoc;
extern maplecl::Option<std::string> floatAbi;
extern maplecl::Option<std::string> filetype;
extern maplecl::Option<bool> longCalls;
extern maplecl::Option<bool> functionSections;
extern maplecl::Option<bool> omitFramePointer;
extern maplecl::Option<bool> fastMath;
extern maplecl::Option<bool> tailcall;
extern maplecl::Option<bool> alignAnalysis;
extern maplecl::Option<bool> cgSsa;
extern maplecl::Option<bool> common;
extern maplecl::Option<bool> condbrAlign;
extern maplecl::Option<uint32_t> alignMinBbSize;
extern maplecl::Option<uint32_t> alignMaxBbSize;
extern maplecl::Option<uint32_t> loopAlignPow;
extern maplecl::Option<uint32_t> jumpAlignPow;
extern maplecl::Option<uint32_t> funcAlignPow;
} // namespace opts::cg
#endif /* MAPLE_BE_INCLUDE_CG_OPTIONS_H */

View File

@ -0,0 +1,26 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_CG_PHASE_H
#define MAPLEBE_INCLUDE_CG_CG_PHASE_H
namespace maple {
}
namespace maplebe {
using namespace maple;
class CGFunc;
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_CG_PHASE_H */

View File

@ -0,0 +1,90 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_CG_PHASEMANAGER_H
#define MAPLEBE_INCLUDE_CG_CG_PHASEMANAGER_H
#include <vector>
#include <string>
#include <sys/stat.h>
#include "mempool.h"
#include "mempool_allocator.h"
#include "mir_module.h"
#include "mir_lower.h"
#include "lower.h"
#include "constantfold.h"
#include "cgfunc.h"
#include "cg_phase.h"
#include "cg_option.h"
namespace maplebe {
using cgFuncOptTy = MapleFunctionPhase<CGFunc>;
/* =================== new phase manager =================== */
class CgFuncPM : public FunctionPM {
public:
explicit CgFuncPM(MemPool *mp) : FunctionPM(mp, &id) {}
PHASECONSTRUCTOR(CgFuncPM);
std::string PhaseName() const override;
~CgFuncPM() override
{
cgOptions = nullptr;
cg = nullptr;
beCommon = nullptr;
if (CGOptions::IsEnableTimePhases()) {
DumpPhaseTime();
}
}
bool PhaseRun(MIRModule &m) override;
void SetCGOptions(CGOptions *curCGOptions)
{
cgOptions = curCGOptions;
}
CG *GetCG()
{
return cg;
}
BECommon *GetBECommon()
{
return beCommon;
}
private:
bool FuncLevelRun(CGFunc &cgFunc, AnalysisDataManager &serialADM);
void GenerateOutPutFile(MIRModule &m);
void CreateCGAndBeCommon(MIRModule &m);
void PrepareLower(MIRModule &m);
void PostOutPut(MIRModule &m);
void DoFuncCGLower(const MIRModule &m, MIRFunction &mirFunc);
/* Tool functions */
void DumpFuncCGIR(const CGFunc &f, const std::string &phaseName) const;
/* For Emit */
void InitProfile(MIRModule &m) const;
void EmitGlobalInfo(MIRModule &m) const;
void EmitDuplicatedAsmFunc(MIRModule &m) const;
void EmitDebugInfo(const MIRModule &m) const;
void EmitFastFuncs(const MIRModule &m) const;
bool IsFramework(MIRModule &m) const;
void SweepUnusedStaticSymbol(MIRModule &m);
CG *cg = nullptr;
BECommon *beCommon = nullptr;
MIRLower *mirLower = nullptr;
CGLowerer *cgLower = nullptr;
/* module options */
CGOptions *cgOptions = nullptr;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_CG_PHASEMANAGER_H */

View File

@ -0,0 +1,79 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_CG_INCLUDE_CG_PHI_ELIMINATE_H
#define MAPLEBE_CG_INCLUDE_CG_PHI_ELIMINATE_H
#include "cgfunc.h"
#include "cg_ssa.h"
namespace maplebe {
class PhiEliminate {
public:
PhiEliminate(CGFunc &f, CGSSAInfo &ssaAnalysisResult, MemPool &mp)
: cgFunc(&f),
ssaInfo(&ssaAnalysisResult),
phiEliAlloc(&mp),
eliminatedBB(phiEliAlloc.Adapter()),
replaceVreg(phiEliAlloc.Adapter()),
remateInfoAfterSSA(phiEliAlloc.Adapter())
{
tempRegNO = static_cast<uint32_t>(GetSSAInfo()->GetAllSSAOperands().size()) + CGSSAInfo::SSARegNObase;
}
virtual ~PhiEliminate() = default;
CGSSAInfo *GetSSAInfo()
{
return ssaInfo;
}
void TranslateTSSAToCSSA();
/* move ssaRegOperand from ssaInfo to cgfunc */
virtual void ReCreateRegOperand(Insn &insn) = 0;
protected:
virtual Insn &CreateMov(RegOperand &destOpnd, RegOperand &fromOpnd) = 0;
virtual void MaintainRematInfo(RegOperand &destOpnd, RegOperand &fromOpnd, bool isCopy) = 0;
virtual void AppendMovAfterLastVregDef(BB &bb, Insn &movInsn) const = 0;
void UpdateRematInfo();
regno_t GetAndIncreaseTempRegNO();
RegOperand *MakeRoomForNoDefVreg(RegOperand &conflictReg);
void RecordRematInfo(regno_t vRegNO, PregIdx pIdx);
PregIdx FindRematInfo(regno_t vRegNO)
{
return remateInfoAfterSSA.count(vRegNO) ? remateInfoAfterSSA[vRegNO] : -1;
}
CGFunc *cgFunc;
CGSSAInfo *ssaInfo;
MapleAllocator phiEliAlloc;
private:
void PlaceMovInPredBB(uint32 predBBId, Insn &movInsn);
virtual RegOperand &CreateTempRegForCSSA(RegOperand &oriOpnd) = 0;
MapleSet<uint32> eliminatedBB;
/*
* noDef Vregs occupy the vregno_t which is used for ssa re_creating
* first : conflicting VReg with noDef VReg second : new_Vreg opnd to replace occupied Vreg
*/
MapleUnorderedMap<regno_t, RegOperand *> replaceVreg;
regno_t tempRegNO = 0; /* use for create mov insn for phi */
MapleMap<regno_t, PregIdx> remateInfoAfterSSA;
};
class OperandPhiElmVisitor : public OperandVisitorBase, public OperandVisitors<RegOperand, ListOperand, MemOperand> {
};
MAPLE_FUNC_PHASE_DECLARE(CgPhiElimination, maplebe::CGFunc)
} // namespace maplebe
#endif // MAPLEBE_CG_INCLUDE_CG_PHI_ELIMINATE_H

View File

@ -0,0 +1,118 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_CG_INCLUDE_CGPRE_H
#define MAPLEBE_CG_INCLUDE_CGPRE_H
#include "cg_occur.h"
#include "cg_dominance.h"
#include "cgfunc.h"
namespace maplebe {
enum PreKind { kExprPre, kStmtPre, kLoadPre, kAddrPre };
class CGPre {
public:
CGPre(DomAnalysis &currDom, MemPool &memPool, MemPool &mp2, PreKind kind, uint32 limit)
: dom(&currDom),
ssaPreMemPool(&memPool),
ssaPreAllocator(&memPool),
perCandMemPool(&mp2),
perCandAllocator(&mp2),
workList(ssaPreAllocator.Adapter()),
preKind(kind),
allOccs(ssaPreAllocator.Adapter()),
phiOccs(ssaPreAllocator.Adapter()),
exitOccs(ssaPreAllocator.Adapter()),
preLimit(limit),
dfPhiDfns(std::less<uint32>(), ssaPreAllocator.Adapter()),
varPhiDfns(std::less<uint32>(), ssaPreAllocator.Adapter()),
temp2LocalRefVarMap(ssaPreAllocator.Adapter())
{
preWorkCandHashTable.GetWorkcandHashTable().fill(nullptr);
}
virtual ~CGPre() = default;
const MapleVector<CgOccur *> &GetRealOccList() const
{
return workCand->GetRealOccs();
}
virtual BB *GetBB(uint32 id) const = 0;
virtual PUIdx GetPUIdx() const = 0;
virtual void SetCurFunction(PUIdx) const {}
void GetIterDomFrontier(const BB *bb, MapleSet<uint32> *dfset) const
{
for (uint32 bbid : dom->GetIdomFrontier(bb->GetId())) {
(void)dfset->insert(dom->GetDtDfnItem(bbid));
}
}
PreWorkCand *GetWorkCand() const
{
return workCand;
}
// compute downsafety for each PHI
static void ResetDS(CgPhiOcc *phiOcc);
void ComputeDS();
protected:
virtual void ComputeVarAndDfPhis() = 0;
virtual void CreateSortedOccs();
CgOccur *CreateRealOcc(Insn &insn, Operand &opnd, OccType occType);
virtual void BuildWorkList() = 0;
/* for stmt pre only */
void CreateExitOcc(BB &bb)
{
CgOccur *exitOcc = ssaPreMemPool->New<CgOccur>(kOccExit, 0, bb, nullptr);
exitOccs.push_back(exitOcc);
}
DomAnalysis *dom;
MemPool *ssaPreMemPool;
MapleAllocator ssaPreAllocator;
MemPool *perCandMemPool;
MapleAllocator perCandAllocator;
MapleList<PreWorkCand *> workList;
PreWorkCand *workCand = nullptr; // the current PreWorkCand
PreKind preKind;
// PRE work candidates; incremented by 2 for each tree;
// purpose is to avoid processing a node the third time
// inside a tree (which is a DAG)
// the following 3 lists are all maintained in order of dt_preorder
MapleVector<CgOccur *> allOccs; // cleared at start of each workcand
MapleVector<CgPhiOcc *> phiOccs; // cleared at start of each workcand
MapleVector<CgOccur *> exitOccs; // this is shared by all workcands
uint32 preLimit; // set by command-line option to limit the number of candidates optimized (for debugging purpose)
// step 1 phi insertion data structures
// following are set of BBs in terms of their dfn's; index into
// dominance->pdt_preorder to get their bbid's
MapleSet<uint32> dfPhiDfns; // phis inserted due to dominance frontiers
MapleSet<uint32> varPhiDfns; // phis inserted due to the var operands
// step 2 renaming data structures
uint32 classCount = 0; // count class created during renaming
// is index into workCand->realOccs
// step 6 codemotion data structures
MapleMap<Operand *, Operand *> temp2LocalRefVarMap;
int32 reBuiltOccIndex = -1; // stores the size of worklist every time when try to add new worklist, update before
// each code motion
uint32 strIdxCount =
0; // ssapre will create a lot of temp variables if using var to store redundances, start from 0
PreWorkCandHashTable preWorkCandHashTable;
};
} // namespace maplebe
#endif // MAPLEBE_CG_INCLUDE_CGPRE_H

View File

@ -0,0 +1,128 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_PROP_H
#define MAPLEBE_INCLUDE_CG_PROP_H
#include "cgfunc.h"
#include "cg_ssa.h"
#include "cg_dce.h"
#include "cg.h"
#include "reg_coalesce.h"
namespace maplebe {
class CGProp {
public:
CGProp(MemPool &mp, CGFunc &f, CGSSAInfo &sInfo, LiveIntervalAnalysis &ll)
: memPool(&mp), cgFunc(&f), propAlloc(&mp), ssaInfo(&sInfo), regll(&ll)
{
cgDce = f.GetCG()->CreateCGDce(mp, f, sInfo);
}
virtual ~CGProp() = default;
void DoCopyProp();
void DoTargetProp();
protected:
MemPool *memPool;
CGFunc *cgFunc;
MapleAllocator propAlloc;
CGSSAInfo *GetSSAInfo()
{
return ssaInfo;
}
CGDce *GetDce()
{
return cgDce;
}
LiveIntervalAnalysis *GetRegll()
{
return regll;
}
private:
virtual void CopyProp() = 0;
virtual void TargetProp(Insn &insn) = 0;
virtual void PropPatternOpt() = 0;
CGSSAInfo *ssaInfo;
CGDce *cgDce = nullptr;
LiveIntervalAnalysis *regll;
};
class PropOptimizeManager {
public:
~PropOptimizeManager() = default;
template <typename PropOptimizePattern>
void Optimize(CGFunc &cgFunc, CGSSAInfo *cgssaInfo, LiveIntervalAnalysis *ll) const
{
PropOptimizePattern optPattern(cgFunc, cgssaInfo, ll);
optPattern.Run();
}
template <typename PropOptimizePattern>
void Optimize(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) const
{
PropOptimizePattern optPattern(cgFunc, cgssaInfo);
optPattern.Run();
}
};
class PropOptimizePattern {
public:
PropOptimizePattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo, LiveIntervalAnalysis *ll)
: cgFunc(cgFunc), optSsaInfo(cgssaInfo), regll(ll)
{
}
PropOptimizePattern(CGFunc &cgFunc, CGSSAInfo *cgssaInfo) : cgFunc(cgFunc), optSsaInfo(cgssaInfo) {}
virtual ~PropOptimizePattern() = default;
virtual bool CheckCondition(Insn &insn) = 0;
virtual void Optimize(Insn &insn) = 0;
virtual void Run() = 0;
protected:
std::string PhaseName() const
{
return "propopt";
}
virtual void Init() = 0;
Insn *FindDefInsn(const VRegVersion *useVersion);
CGFunc &cgFunc;
CGSSAInfo *optSsaInfo = nullptr;
LiveIntervalAnalysis *regll = nullptr;
};
class ReplaceRegOpndVisitor : public OperandVisitorBase,
public OperandVisitors<RegOperand, ListOperand, MemOperand>,
public OperandVisitor<PhiOperand> {
public:
ReplaceRegOpndVisitor(CGFunc &f, Insn &cInsn, uint32 cIdx, RegOperand &oldR, RegOperand &newR)
: cgFunc(&f), insn(&cInsn), idx(cIdx), oldReg(&oldR), newReg(&newR)
{
}
virtual ~ReplaceRegOpndVisitor() = default;
protected:
CGFunc *cgFunc;
Insn *insn;
uint32 idx;
RegOperand *oldReg;
RegOperand *newReg;
};
MAPLE_FUNC_PHASE_DECLARE(CgCopyProp, maplebe::CGFunc)
MAPLE_FUNC_PHASE_DECLARE(CgTargetProp, maplebe::CGFunc)
} // namespace maplebe
#endif /* MAPLEBE_INCLUDE_CG_PROP_H */

View File

@ -0,0 +1,338 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_CG_INCLUDE_CG_SSA_H
#define MAPLEBE_CG_INCLUDE_CG_SSA_H
#include "cgfunc.h"
#include "cg_dominance.h"
#include "live.h"
#include "operand.h"
#include "visitor_common.h"
namespace maplebe {
class CGSSAInfo;
enum SSAOpndDefBy { kDefByNo, kDefByInsn, kDefByPhi };
/* precise def/use info in machine instrcution */
class DUInsnInfo {
public:
DUInsnInfo(Insn *cInsn, uint32 cIdx, MapleAllocator &alloc) : insn(cInsn), DUInfo(alloc.Adapter())
{
IncreaseDU(cIdx);
}
void IncreaseDU(uint32 idx)
{
if (!DUInfo.count(idx)) {
DUInfo[idx] = 0;
}
DUInfo[idx]++;
}
void DecreaseDU(uint32 idx)
{
DEBUG_ASSERT(DUInfo[idx] > 0, "no def/use any more");
DUInfo[idx]--;
}
void ClearDU(uint32 idx)
{
DEBUG_ASSERT(DUInfo.count(idx), "no def/use find");
DUInfo[idx] = 0;
}
bool HasNoDU()
{
for (auto it : DUInfo) {
if (it.second != 0) {
return false;
}
}
return true;
}
Insn *GetInsn()
{
return insn;
}
MapleMap<uint32, uint32> &GetOperands()
{
return DUInfo;
}
private:
Insn *insn;
/* operand idx --- count */
MapleMap<uint32, uint32> DUInfo;
};
class VRegVersion {
public:
VRegVersion(const MapleAllocator &alloc, RegOperand &vReg, uint32 vIdx, regno_t vregNO)
: versionAlloc(alloc),
ssaRegOpnd(&vReg),
versionIdx(vIdx),
originalRegNO(vregNO),
useInsnInfos(versionAlloc.Adapter())
{
}
void SetDefInsn(DUInsnInfo *duInfo, SSAOpndDefBy defTy)
{
defInsnInfo = duInfo;
defType = defTy;
}
DUInsnInfo *GetDefInsnInfo() const
{
return defInsnInfo;
}
SSAOpndDefBy GetDefType() const
{
return defType;
}
RegOperand *GetSSAvRegOpnd(bool isDef = true)
{
if (!isDef) {
return implicitCvtedRegOpnd;
}
return ssaRegOpnd;
}
uint32 GetVersionIdx() const
{
return versionIdx;
}
regno_t GetOriginalRegNO() const
{
return originalRegNO;
}
void AddUseInsn(CGSSAInfo &ssaInfo, Insn &useInsn, uint32 idx);
/* elimate dead use */
void CheckDeadUse(const Insn &useInsn);
void RemoveUseInsn(const Insn &useInsn, uint32 idx);
MapleUnorderedMap<uint32, DUInsnInfo *> &GetAllUseInsns()
{
return useInsnInfos;
}
void MarkDeleted()
{
deleted = true;
}
void MarkRecovery()
{
deleted = false;
}
bool IsDeleted() const
{
return deleted;
}
void SetImplicitCvt()
{
hasImplicitCvt = true;
}
bool HasImplicitCvt() const
{
return hasImplicitCvt;
}
private:
MapleAllocator versionAlloc;
/* if this version has implicit conversion, it refers to def reg */
RegOperand *ssaRegOpnd;
RegOperand *implicitCvtedRegOpnd = nullptr;
uint32 versionIdx;
regno_t originalRegNO;
DUInsnInfo *defInsnInfo = nullptr;
SSAOpndDefBy defType = kDefByNo;
/* insn ID -> insn* & operand Idx */
// --> vector?
MapleUnorderedMap<uint32, DUInsnInfo *> useInsnInfos;
bool deleted = false;
/*
* def reg (size:64) or def reg (size:32) -->
* all use reg (size:32) all use reg (size:64)
* do not support single use which has implicit conversion yet
* support single use in DUInfo in future
*/
bool hasImplicitCvt = false;
};
class CGSSAInfo {
public:
CGSSAInfo(CGFunc &f, DomAnalysis &da, MemPool &mp, MemPool &tmp)
: cgFunc(&f),
memPool(&mp),
tempMp(&tmp),
ssaAlloc(&mp),
domInfo(&da),
renamedBBs(ssaAlloc.Adapter()),
vRegDefCount(ssaAlloc.Adapter()),
vRegStk(ssaAlloc.Adapter()),
allSSAOperands(ssaAlloc.Adapter()),
noDefVRegs(ssaAlloc.Adapter()),
reversePostOrder(ssaAlloc.Adapter()),
safePropInsns(ssaAlloc.Adapter())
{
}
virtual ~CGSSAInfo() = default;
void ConstructSSA();
VRegVersion *FindSSAVersion(regno_t ssaRegNO); /* Get specific ssa info */
/* replace insn & update ssaInfo */
virtual void ReplaceInsn(Insn &oriInsn, Insn &newInsn) = 0;
virtual void ReplaceAllUse(VRegVersion *toBeReplaced, VRegVersion *newVersion) = 0;
virtual void CreateNewInsnSSAInfo(Insn &newInsn) = 0;
PhiOperand &CreatePhiOperand();
DUInsnInfo *CreateDUInsnInfo(Insn *cInsn, uint32 idx)
{
return memPool->New<DUInsnInfo>(cInsn, idx, ssaAlloc);
}
const MapleUnorderedMap<regno_t, VRegVersion *> &GetAllSSAOperands() const
{
return allSSAOperands;
}
bool IsNoDefVReg(regno_t vRegNO) const
{
return noDefVRegs.find(vRegNO) != noDefVRegs.end();
}
uint32 GetVersionNOOfOriginalVreg(regno_t vRegNO)
{
if (vRegDefCount.count(vRegNO)) {
return vRegDefCount[vRegNO];
}
DEBUG_ASSERT(false, " original vreg is not existed");
return 0;
}
MapleVector<uint32> &GetReversePostOrder()
{
return reversePostOrder;
}
void InsertSafePropInsn(uint32 insnId)
{
(void)safePropInsns.emplace_back(insnId);
}
MapleVector<uint32> &GetSafePropInsns()
{
return safePropInsns;
}
void DumpFuncCGIRinSSAForm() const;
virtual void DumpInsnInSSAForm(const Insn &insn) const = 0;
static uint32 SSARegNObase;
protected:
VRegVersion *CreateNewVersion(RegOperand &virtualOpnd, Insn &defInsn, uint32 idx, bool isDefByPhi = false);
virtual RegOperand *CreateSSAOperand(RegOperand &virtualOpnd) = 0;
bool IncreaseSSAOperand(regno_t vRegNO, VRegVersion *vst);
uint32 IncreaseVregCount(regno_t vRegNO);
VRegVersion *GetVersion(const RegOperand &virtualOpnd);
MapleUnorderedMap<regno_t, VRegVersion *> &GetPrivateAllSSAOperands()
{
return allSSAOperands;
}
void AddNoDefVReg(regno_t noDefVregNO)
{
DEBUG_ASSERT(!noDefVRegs.count(noDefVregNO), "duplicate no def Reg, please check");
noDefVRegs.emplace(noDefVregNO);
}
void MarkInsnsInSSA(Insn &insn);
CGFunc *cgFunc = nullptr;
MemPool *memPool = nullptr;
MemPool *tempMp = nullptr;
MapleAllocator ssaAlloc;
private:
void InsertPhiInsn();
void RenameVariablesForBB(uint32 bbID);
void RenameBB(BB &bb);
void RenamePhi(BB &bb);
virtual void RenameInsn(Insn &insn) = 0;
/* build ssa on virtual register only */
virtual RegOperand *GetRenamedOperand(RegOperand &vRegOpnd, bool isDef, Insn &curInsn, uint32 idx) = 0;
void RenameSuccPhiUse(const BB &bb);
void PrunedPhiInsertion(const BB &bb, RegOperand &virtualOpnd);
void AddRenamedBB(uint32 bbID)
{
DEBUG_ASSERT(!renamedBBs.count(bbID), "cgbb has been renamed already");
renamedBBs.emplace(bbID);
}
bool IsBBRenamed(uint32 bbID) const
{
return renamedBBs.count(bbID);
}
void SetReversePostOrder();
DomAnalysis *domInfo = nullptr;
MapleSet<uint32> renamedBBs;
/* original regNO - number of definitions (start from 0) */
MapleMap<regno_t, uint32> vRegDefCount;
/* original regNO - ssa version stk */
MapleMap<regno_t, MapleStack<VRegVersion *>> vRegStk;
/* ssa regNO - ssa virtual operand version */
MapleUnorderedMap<regno_t, VRegVersion *> allSSAOperands;
/* For virtual registers which do not have definition */
MapleSet<regno_t> noDefVRegs;
/* only save bb_id to reduce space */
MapleVector<uint32> reversePostOrder;
/* destSize < srcSize but can be propagated */
MapleVector<uint32> safePropInsns;
int32 insnCount = 0;
};
class SSAOperandVisitor : public OperandVisitorBase, public OperandVisitors<RegOperand, ListOperand, MemOperand> {
public:
SSAOperandVisitor(Insn &cInsn, const OpndDesc &cDes, uint32 idx) : insn(&cInsn), opndDes(&cDes), idx(idx) {}
SSAOperandVisitor() = default;
virtual ~SSAOperandVisitor() = default;
void SetInsnOpndInfo(Insn &cInsn, const OpndDesc &cDes, uint32 index)
{
insn = &cInsn;
opndDes = &cDes;
this->idx = index;
}
protected:
Insn *insn = nullptr;
const OpndDesc *opndDes = nullptr;
uint32 idx = 0;
};
class SSAOperandDumpVisitor : public OperandVisitorBase,
public OperandVisitors<RegOperand, ListOperand, MemOperand>,
public OperandVisitor<PhiOperand> {
public:
explicit SSAOperandDumpVisitor(const MapleUnorderedMap<regno_t, VRegVersion *> &allssa) : allSSAOperands(allssa) {}
virtual ~SSAOperandDumpVisitor() = default;
void SetHasDumped()
{
hasDumped = true;
}
bool HasDumped() const
{
return hasDumped;
}
bool hasDumped = false;
protected:
const MapleUnorderedMap<regno_t, VRegVersion *> &allSSAOperands;
};
MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgSSAConstruct, maplebe::CGFunc);
CGSSAInfo *GetResult()
{
return ssaInfo;
}
CGSSAInfo *ssaInfo = nullptr;
private:
void GetAnalysisDependence(maple::AnalysisDep &aDep) const override;
MAPLE_FUNC_PHASE_DECLARE_END
} // namespace maplebe
#endif // MAPLEBE_CG_INCLUDE_CG_SSA_H

View File

@ -0,0 +1,218 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_CG_INCLUDE_CG_SSU_PRE_H
#define MAPLEBE_CG_INCLUDE_CG_SSU_PRE_H
#include <vector>
#include "mempool.h"
#include "mempool_allocator.h"
#include "cg_dominance.h"
// Use SSAPRE to determine where to insert saves for callee-saved registers.
// The external interface is DoSavePlacementOpt(). Class SsaPreWorkCand is used
// as input/output interface.
namespace maplebe {
using BBId = uint32;
// This must have been constructed by the caller of DoSavePlacementOpt() and
// passed to it as parameter. The caller of DoSavePlacementOpt() describes
// the problem via occBBs. DoSavePlacementOpt()'s outputs are returned to the
// caller by setting saveAtEntryBBs.
class SsaPreWorkCand {
public:
explicit SsaPreWorkCand(MapleAllocator *alloc) : occBBs(alloc->Adapter()), saveAtEntryBBs(alloc->Adapter()) {}
// inputs
MapleSet<BBId> occBBs; // Id's of BBs with appearances of the callee-saved reg
// outputs
MapleSet<BBId> saveAtEntryBBs; // Id's of BBs to insert saves of the register at BB entry
bool saveAtProlog = false; // if true, no shrinkwrapping can be done and
// the other outputs can be ignored
};
extern void DoSavePlacementOpt(CGFunc *f, DomAnalysis *dom, SsaPreWorkCand *workCand);
enum AOccType {
kAOccUndef,
kAOccReal,
kAOccPhi,
kAOccPhiOpnd,
kAOccExit,
};
class Occ {
public:
Occ(AOccType ty, BB *bb) : occTy(ty), cgbb(bb) {}
virtual ~Occ() = default;
virtual void Dump() const = 0;
bool IsDominate(DomAnalysis *dom, const Occ *occ) const
{
return dom->Dominate(*cgbb, *occ->cgbb);
}
AOccType occTy;
uint32 classId = 0;
BB *cgbb; // the BB it occurs in
Occ *def = nullptr; // points to its single def
};
class RealOcc : public Occ {
public:
explicit RealOcc(BB *bb) : Occ(kAOccReal, bb) {}
virtual ~RealOcc() = default;
void Dump() const override
{
LogInfo::MapleLogger() << "RealOcc at bb" << cgbb->GetId();
LogInfo::MapleLogger() << " classId" << classId;
}
bool redundant = true;
};
class PhiOcc;
class PhiOpndOcc : public Occ {
public:
explicit PhiOpndOcc(BB *bb) : Occ(kAOccPhiOpnd, bb) {}
virtual ~PhiOpndOcc() = default;
void Dump() const override
{
LogInfo::MapleLogger() << "PhiOpndOcc at bb" << cgbb->GetId() << " classId" << classId;
}
PhiOcc *defPhiOcc = nullptr; // its lhs definition
bool hasRealUse = false;
bool insertHere = false;
};
class PhiOcc : public Occ {
public:
PhiOcc(BB *bb, MapleAllocator &alloc) : Occ(kAOccPhi, bb), phiOpnds(alloc.Adapter()) {}
virtual ~PhiOcc() = default;
bool WillBeAvail() const
{
return isCanBeAvail && !isLater;
}
void Dump() const override
{
LogInfo::MapleLogger() << "PhiOcc at bb" << cgbb->GetId() << " classId" << classId << " Phi[";
for (size_t i = 0; i < phiOpnds.size(); i++) {
phiOpnds[i]->Dump();
if (i != phiOpnds.size() - 1) {
LogInfo::MapleLogger() << ", ";
}
}
LogInfo::MapleLogger() << "]";
}
bool isDownsafe = true;
bool speculativeDownsafe = false; // true if set to downsafe via speculation
bool isCanBeAvail = true;
bool isLater = true;
MapleVector<PhiOpndOcc *> phiOpnds;
};
class ExitOcc : public Occ {
public:
explicit ExitOcc(BB *bb) : Occ(kAOccExit, bb) {}
virtual ~ExitOcc() = default;
void Dump() const override
{
LogInfo::MapleLogger() << "ExitOcc at bb" << cgbb->GetId();
}
};
class SSAPre {
public:
SSAPre(CGFunc *cgfunc, DomAnalysis *dm, MemPool *memPool, SsaPreWorkCand *wkcand, bool aeap, bool enDebug)
: cgFunc(cgfunc),
dom(dm),
preMp(memPool),
preAllocator(memPool),
workCand(wkcand),
fullyAntBBs(cgfunc->GetAllBBs().size(), true, preAllocator.Adapter()),
phiDfns(std::less<uint32>(), preAllocator.Adapter()),
classCount(0),
realOccs(preAllocator.Adapter()),
allOccs(preAllocator.Adapter()),
phiOccs(preAllocator.Adapter()),
exitOccs(preAllocator.Adapter()),
asEarlyAsPossible(aeap),
enabledDebug(enDebug)
{
}
~SSAPre() = default;
void ApplySSAPre();
private:
// step 6 methods
void CodeMotion();
// step 5 methods
void Finalize();
// step 4 methods
void ResetCanBeAvail(PhiOcc *phi) const;
void ComputeCanBeAvail() const;
void ResetLater(PhiOcc *phi) const;
void ComputeLater() const;
// step 3 methods
void ResetDownsafe(const PhiOpndOcc *phiOpnd) const;
void ComputeDownsafe() const;
// step 2 methods
void Rename();
// step 1 methods
void GetIterDomFrontier(const BB *bb, MapleSet<uint32> *dfset) const
{
for (BBId bbid : dom->GetIdomFrontier(bb->GetId())) {
(void)dfset->insert(dom->GetDtDfnItem(bbid));
}
}
void FormPhis();
void CreateSortedOccs();
// step 0 methods
void PropagateNotAnt(BB *bb, std::set<BB *, BBIdCmp> *visitedBBs);
void FormRealsNExits();
CGFunc *cgFunc;
DomAnalysis *dom;
MemPool *preMp;
MapleAllocator preAllocator;
SsaPreWorkCand *workCand;
// step 0
MapleVector<bool> fullyAntBBs; // index is BBid; true if occ is fully anticipated at BB entry
// step 1 phi insertion data structures:
MapleSet<uint32> phiDfns; // set by FormPhis(); set of BBs in terms of their
// dfn's; index into dominance->dt_preorder to get
// their bbid's
// step 2 renaming
uint32 classCount; // for assigning new class id
// the following 4 lists are all maintained in order of dt_preorder
MapleVector<Occ *> realOccs;
MapleVector<Occ *> allOccs;
MapleVector<PhiOcc *> phiOccs;
MapleVector<ExitOcc *> exitOccs;
bool asEarlyAsPossible;
bool enabledDebug;
};
}; // namespace maplebe
#endif // MAPLEBE_CG_INCLUDE_CG_SSA_PRE_H

View File

@ -0,0 +1,242 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_CG_INCLUDE_CGSSUPRE_H
#define MAPLEBE_CG_INCLUDE_CGSSUPRE_H
#include <vector>
#include "mempool.h"
#include "mempool_allocator.h"
#include "cg_dominance.h"
#include "cg_ssa_pre.h"
// Use SSUPRE to determine where to insert restores for callee-saved registers.
// The external interface is DoRestorePlacementOpt(). Class SPreWorkCand is used
// as input/output interface.
namespace maplebe {
// This must have been constructed by the caller of DoRestorePlacementOpt() and
// passed to it as parameter. The caller of DoRestorePlacementOpt() describes
// the problem via occBBs and saveBBs. DoRestorePlacementOpt()'s outputs are
// returned to the caller by setting restoreAtEntryBBs and restoreAtExitBBs.
class SPreWorkCand {
public:
explicit SPreWorkCand(MapleAllocator *alloc)
: occBBs(alloc->Adapter()),
saveBBs(alloc->Adapter()),
restoreAtEntryBBs(alloc->Adapter()),
restoreAtExitBBs(alloc->Adapter())
{
}
// inputs
MapleSet<BBId> occBBs; // Id's of BBs with appearances of the callee-saved reg
MapleSet<BBId> saveBBs; // Id's of BBs with saves of the callee-saved reg
// outputs
MapleSet<BBId> restoreAtEntryBBs; // Id's of BBs to insert restores of the register at BB entry
MapleSet<BBId> restoreAtExitBBs; // Id's of BBs to insert restores of the register at BB exit
bool restoreAtEpilog = false; // if true, no shrinkwrapping can be done and
// the other outputs can be ignored
};
extern void DoRestorePlacementOpt(CGFunc *f, PostDomAnalysis *pdom, SPreWorkCand *workCand);
enum SOccType {
kSOccUndef,
kSOccReal,
kSOccLambda,
kSOccLambdaRes,
kSOccEntry,
kSOccKill,
};
class SOcc {
public:
SOcc(SOccType ty, BB *bb) : occTy(ty), cgbb(bb) {}
virtual ~SOcc() = default;
virtual void Dump() const = 0;
bool IsPostDominate(PostDomAnalysis *pdom, const SOcc *occ) const
{
return pdom->PostDominate(*cgbb, *occ->cgbb);
}
SOccType occTy;
uint32 classId = 0;
BB *cgbb; // the BB it occurs in
SOcc *use = nullptr; // points to its single use
};
class SRealOcc : public SOcc {
public:
explicit SRealOcc(BB *bb) : SOcc(kSOccReal, bb) {}
virtual ~SRealOcc() = default;
void Dump() const override
{
LogInfo::MapleLogger() << "RealOcc at bb" << cgbb->GetId();
LogInfo::MapleLogger() << " classId" << classId;
}
bool redundant = true;
};
class SLambdaOcc;
class SLambdaResOcc : public SOcc {
public:
explicit SLambdaResOcc(BB *bb) : SOcc(kSOccLambdaRes, bb) {}
virtual ~SLambdaResOcc() = default;
void Dump() const override
{
LogInfo::MapleLogger() << "LambdaResOcc at bb" << cgbb->GetId() << " classId" << classId;
}
SLambdaOcc *useLambdaOcc = nullptr; // its rhs use
bool hasRealUse = false;
bool insertHere = false;
};
class SLambdaOcc : public SOcc {
public:
SLambdaOcc(BB *bb, MapleAllocator &alloc) : SOcc(kSOccLambda, bb), lambdaRes(alloc.Adapter()) {}
virtual ~SLambdaOcc() = default;
bool WillBeAnt() const
{
return isCanBeAnt && !isEarlier;
}
void Dump() const override
{
LogInfo::MapleLogger() << "LambdaOcc at bb" << cgbb->GetId() << " classId" << classId << " Lambda[";
for (size_t i = 0; i < lambdaRes.size(); i++) {
lambdaRes[i]->Dump();
if (i != lambdaRes.size() - 1) {
LogInfo::MapleLogger() << ", ";
}
}
LogInfo::MapleLogger() << "]";
}
bool isUpsafe = true;
bool isCanBeAnt = true;
bool isEarlier = true;
MapleVector<SLambdaResOcc *> lambdaRes;
};
class SEntryOcc : public SOcc {
public:
explicit SEntryOcc(BB *bb) : SOcc(kSOccEntry, bb) {}
virtual ~SEntryOcc() = default;
void Dump() const
{
LogInfo::MapleLogger() << "EntryOcc at bb" << cgbb->GetId();
}
};
class SKillOcc : public SOcc {
public:
explicit SKillOcc(BB *bb) : SOcc(kSOccKill, bb) {}
virtual ~SKillOcc() = default;
void Dump() const override
{
LogInfo::MapleLogger() << "KillOcc at bb" << cgbb->GetId();
}
};
class SSUPre {
public:
SSUPre(CGFunc *cgfunc, PostDomAnalysis *pd, MemPool *memPool, SPreWorkCand *wkcand, bool alap, bool enDebug)
: cgFunc(cgfunc),
pdom(pd),
spreMp(memPool),
spreAllocator(memPool),
workCand(wkcand),
fullyAvailBBs(cgfunc->GetAllBBs().size(), true, spreAllocator.Adapter()),
lambdaDfns(std::less<uint32>(), spreAllocator.Adapter()),
classCount(0),
realOccs(spreAllocator.Adapter()),
allOccs(spreAllocator.Adapter()),
lambdaOccs(spreAllocator.Adapter()),
entryOccs(spreAllocator.Adapter()),
asLateAsPossible(alap),
enabledDebug(enDebug)
{
CreateEntryOcc(cgfunc->GetFirstBB());
}
~SSUPre() = default;
void ApplySSUPre();
private:
// step 6 methods
void CodeMotion();
// step 5 methods
void Finalize();
// step 4 methods
void ResetCanBeAnt(SLambdaOcc *lambda) const;
void ComputeCanBeAnt() const;
void ResetEarlier(SLambdaOcc *lambda) const;
void ComputeEarlier() const;
// step 3 methods
void ResetUpsafe(const SLambdaResOcc *lambdaRes) const;
void ComputeUpsafe() const;
// step 2 methods
void Rename();
// step 1 methods
void GetIterPdomFrontier(const BB *bb, MapleSet<uint32> *pdfset) const
{
for (BBId bbid : pdom->GetIpdomFrontier(bb->GetId())) {
(void)pdfset->insert(pdom->GetPdtDfnItem(bbid));
}
}
void FormLambdas();
void CreateSortedOccs();
// step 0 methods
void CreateEntryOcc(BB *bb)
{
SEntryOcc *entryOcc = spreMp->New<SEntryOcc>(bb);
entryOccs.push_back(entryOcc);
}
void PropagateNotAvail(BB *bb, std::set<BB *, BBIdCmp> *visitedBBs);
void FormReals();
CGFunc *cgFunc;
PostDomAnalysis *pdom;
MemPool *spreMp;
MapleAllocator spreAllocator;
SPreWorkCand *workCand;
// step 0
MapleVector<bool> fullyAvailBBs; // index is BBid; true if occ is fully available at BB exit
// step 1 lambda insertion data structures:
MapleSet<uint32> lambdaDfns; // set by FormLambdas(); set of BBs in terms of
// their dfn's; index into
// dominance->pdt_preorder to get their bbid's
// step 2 renaming
uint32 classCount; // for assigning new class id
// the following 4 lists are all maintained in order of pdt_preorder
MapleVector<SOcc *> realOccs; // both real and kill occurrences
MapleVector<SOcc *> allOccs;
MapleVector<SLambdaOcc *> lambdaOccs;
MapleVector<SEntryOcc *> entryOccs;
bool asLateAsPossible;
bool enabledDebug;
};
}; // namespace maplebe
#endif // MAPLEBE_CG_INCLUDE_CGSSUPRE_H

View File

@ -0,0 +1,101 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_VALIDBIT_OPT_H
#define MAPLEBE_INCLUDE_CG_VALIDBIT_OPT_H
#include "cg.h"
#include "cgfunc.h"
#include "bb.h"
#include "insn.h"
#include "cg_ssa.h"
namespace maplebe {
#define CG_VALIDBIT_OPT_DUMP CG_DEBUG_FUNC(*cgFunc)
class ValidBitPattern {
public:
ValidBitPattern(CGFunc &f, CGSSAInfo &info) : cgFunc(&f), ssaInfo(&info) {}
virtual ~ValidBitPattern()
{
cgFunc = nullptr;
ssaInfo = nullptr;
}
std::string PhaseName() const
{
return "cgvalidbitopt";
}
virtual std::string GetPatternName() = 0;
virtual bool CheckCondition(Insn &insn) = 0;
virtual void Run(BB &bb, Insn &insn) = 0;
Insn *GetDefInsn(const RegOperand &useReg);
InsnSet GetAllUseInsn(const RegOperand &defReg);
void DumpAfterPattern(std::vector<Insn *> &prevInsns, const Insn *replacedInsn, const Insn *newInsn);
protected:
CGFunc *cgFunc;
CGSSAInfo *ssaInfo;
};
class ValidBitOpt {
public:
ValidBitOpt(CGFunc &f, CGSSAInfo &info) : cgFunc(&f), ssaInfo(&info) {}
virtual ~ValidBitOpt()
{
cgFunc = nullptr;
ssaInfo = nullptr;
}
void Run();
static uint32 GetImmValidBit(int64 value, uint32 size)
{
if (value < 0) {
return size;
} else if (value == 0) {
return k1BitSize;
}
uint32 pos = 0;
constexpr int64 mask = 1;
for (uint32 i = 0; i <= k8BitSize * sizeof(int64); ++i, value >>= 1) {
if ((value & mask) == mask) {
pos = i + 1;
}
}
return pos;
}
static int64 GetLogValueAtBase2(int64 val)
{
return (__builtin_popcountll(static_cast<uint64>(val)) == 1) ? (__builtin_ffsll(val) - 1) : -1;
}
template <typename VBOpt>
void Optimize(BB &bb, Insn &insn)
{
VBOpt opt(*cgFunc, *ssaInfo);
opt.Run(bb, insn);
}
virtual void DoOpt(BB &bb, Insn &insn) = 0;
void RectifyValidBitNum();
void RecoverValidBitNum();
virtual void SetValidBits(Insn &insn) = 0;
virtual bool SetPhiValidBits(Insn &insn) = 0;
protected:
CGFunc *cgFunc;
CGSSAInfo *ssaInfo;
};
MAPLE_FUNC_PHASE_DECLARE(CgValidBitOpt, maplebe::CGFunc)
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_VALIDBIT_OPT_H */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,225 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_DATAINFO_H
#define MAPLEBE_INCLUDE_CG_DATAINFO_H
#include "maple_string.h"
#include "common_utils.h"
#include "mempool.h"
#include "mempool_allocator.h"
namespace maplebe {
class DataInfo {
public:
DataInfo(uint32 bitNum, MapleAllocator &alloc) : info(alloc.Adapter())
{
info.resize(bitNum / kWordSize + 1, 0);
}
DataInfo(const DataInfo &other, MapleAllocator &alloc) : info(other.info, alloc.Adapter()) {}
DataInfo &Clone(MapleAllocator &alloc)
{
auto *dataInfo = alloc.New<DataInfo>(*this, alloc);
return *dataInfo;
}
~DataInfo() = default;
void SetBit(int64 bitNO)
{
DEBUG_ASSERT(bitNO < info.size() * kWordSize, "Out of Range");
info[static_cast<size_t>(bitNO / kWordSize)] |= (1ULL << static_cast<uint64>((bitNO % kWordSize)));
}
void ResetBit(uint32 bitNO)
{
info[bitNO / kWordSize] &= (~(1ULL << (bitNO % kWordSize)));
}
bool TestBit(uint32 bitNO) const
{
return (info[bitNO / kWordSize] & (1ULL << (bitNO % kWordSize))) != 0ULL;
}
const uint64 &GetElem(uint32 index) const
{
DEBUG_ASSERT(index < info.size(), "out of range");
return info[index];
}
void SetElem(uint32 index, uint64 val)
{
DEBUG_ASSERT(index < info.size(), "out of range");
info[index] = val;
}
bool NoneBit() const
{
for (auto &data : info) {
if (data != 0ULL) {
return false;
}
}
return true;
}
size_t Size() const
{
return info.size() * kWordSize;
}
const MapleVector<uint64> &GetInfo() const
{
return info;
}
bool IsEqual(const DataInfo &secondInfo) const
{
auto infoSize = static_cast<const uint32>(info.size());
DEBUG_ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different");
for (uint32 i = 0; i != infoSize; ++i) {
if (info[i] != secondInfo.GetElem(i)) {
return false;
}
}
return true;
}
bool IsEqual(const MapleVector<uint64> &LiveInfoBak) const
{
size_t infoSize = info.size();
DEBUG_ASSERT(infoSize == LiveInfoBak.size(), "two dataInfo's size different");
for (size_t i = 0; i != infoSize; ++i) {
if (info[i] != LiveInfoBak[i]) {
return false;
}
}
return true;
}
void AndBits(const DataInfo &secondInfo)
{
auto infoSize = static_cast<const uint32>(info.size());
DEBUG_ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different");
for (uint32 i = 0; i != infoSize; ++i) {
info[i] &= secondInfo.GetElem(i);
}
}
void OrBits(const DataInfo &secondInfo)
{
auto infoSize = static_cast<const uint32>(info.size());
DEBUG_ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different");
for (uint32 i = 0; i != infoSize; i++) {
info[i] |= secondInfo.GetElem(i);
}
}
void OrDesignateBits(const DataInfo &secondInfo, uint32 infoIndex)
{
DEBUG_ASSERT(infoIndex < secondInfo.GetInfo().size(), "out of secondInfo's range");
DEBUG_ASSERT(infoIndex < info.size(), "out of secondInfo's range");
info[infoIndex] |= secondInfo.GetElem(infoIndex);
}
void EorBits(const DataInfo &secondInfo)
{
auto infoSize = static_cast<const uint32>(info.size());
DEBUG_ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different");
for (uint32 i = 0; i != infoSize; i++) {
info[i] ^= secondInfo.GetElem(i);
}
}
/* if bit in secondElem is 1, bit in current DataInfo is set 0 */
void Difference(const DataInfo &secondInfo)
{
auto infoSize = static_cast<const uint32>(info.size());
DEBUG_ASSERT(infoSize == secondInfo.GetInfo().size(), "two dataInfo's size different");
for (uint32 i = 0; i != infoSize; i++) {
info[i] &= (~(secondInfo.GetElem(i)));
}
}
void ResetAllBit()
{
for (auto &data : info) {
data = 0ULL;
}
}
void EnlargeCapacityToAdaptSize(uint32 bitNO)
{
/* add one more size for each enlarge action */
info.resize(bitNO / kWordSize + 1, 0);
}
void GetNonZeroElemsIndex(std::set<uint32> &index)
{
auto infoSize = static_cast<const int32>(info.size());
for (int32 i = 0; i < infoSize; i++) {
if (info[i] != 0ULL) {
(void)index.insert(i);
}
}
}
template <typename T>
void GetBitsOfInfo(T &wordRes) const
{
wordRes.clear();
for (size_t i = 0; i != info.size(); ++i) {
uint32 result = 0;
uint64 word = info[i];
uint32 offset = 0;
uint32 baseWord = 0;
bool firstTime = true;
while (word) {
int32 index = __builtin_ffsll(static_cast<int64>(word));
if (index == 0) {
continue;
}
if (index == k64BitSize) {
/* when the highest bit is 1, the shift operation will cause error, need special treatment. */
result = i * kWordSize + (index - 1);
(void)wordRes.insert(result);
break;
}
if (firstTime) {
offset = static_cast<uint32>(index - 1);
baseWord = i * kWordSize;
firstTime = false;
} else {
offset = static_cast<uint32>(index);
baseWord = 0;
}
result += baseWord + offset;
(void)wordRes.insert(result);
word = word >> static_cast<uint64>(index);
}
}
}
void ClearDataInfo()
{
info.clear();
}
private:
/* long type has 8 bytes, 64 bits */
static constexpr int32 kWordSize = 64;
MapleVector<uint64> info;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_INSN_H */

View File

@ -0,0 +1,17 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* .loc fileNum lineNum */
DBG_DEFINE(loc, , 2, Immediate, Immediate, Undef)

View File

@ -0,0 +1,146 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_DBG_H
#define MAPLEBE_INCLUDE_CG_DBG_H
#include "insn.h"
#include "mempool_allocator.h"
#include "mir_symbol.h"
#include "debug_info.h"
namespace mpldbg {
using namespace maple;
/* https://sourceware.org/binutils/docs-2.28/as/Loc.html */
enum LocOpt { kBB, kProEnd, kEpiBeg, kIsStmt, kIsa, kDisc };
enum DbgOpcode : uint8 {
#define DBG_DEFINE(k, sub, n, o0, o1, o2) OP_DBG_##k##sub,
#define ARM_DIRECTIVES_DEFINE(k, sub, n, o0, o1, o2) OP_ARM_DIRECTIVES_##k##sub,
#include "dbg.def"
#undef DBG_DEFINE
#undef ARM_DIRECTIVES_DEFINE
kOpDbgLast
};
class DbgInsn : public maplebe::Insn {
public:
DbgInsn(MemPool &memPool, maplebe::MOperator op) : Insn(memPool, op) {}
DbgInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0) : Insn(memPool, op, opnd0) {}
DbgInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0, maplebe::Operand &opnd1)
: Insn(memPool, op, opnd0, opnd1)
{
}
DbgInsn(MemPool &memPool, maplebe::MOperator op, maplebe::Operand &opnd0, maplebe::Operand &opnd1,
maplebe::Operand &opnd2)
: Insn(memPool, op, opnd0, opnd1, opnd2)
{
}
~DbgInsn() = default;
bool IsMachineInstruction() const override
{
return false;
}
void Dump() const override;
#if DEBUG
void Check() const override;
#endif
bool IsTargetInsn() const override
{
return false;
}
bool IsDbgInsn() const override
{
return true;
}
bool IsRegDefined(maplebe::regno_t regNO) const override
{
CHECK_FATAL(false, "dbg insn do not def regs");
return false;
}
std::set<uint32> GetDefRegs() const override
{
CHECK_FATAL(false, "dbg insn do not def regs");
return std::set<uint32>();
}
uint32 GetBothDefUseOpnd() const override
{
return maplebe::kInsnMaxOpnd;
}
uint32 GetLoc() const;
private:
DbgInsn &operator=(const DbgInsn &);
};
class ImmOperand : public maplebe::OperandVisitable<ImmOperand> {
public:
explicit ImmOperand(int64 val) : OperandVisitable(kOpdImmediate, 32), val(val) {}
~ImmOperand() = default;
using OperandVisitable<ImmOperand>::OperandVisitable;
Operand *Clone(MemPool &memPool) const override
{
Operand *opnd = memPool.Clone<ImmOperand>(*this);
return opnd;
}
void Dump() const override;
bool Less(const Operand &right) const override
{
(void)right;
return false;
}
int64 GetVal() const
{
return val;
}
private:
int64 val;
};
class DBGOpndEmitVisitor : public maplebe::OperandVisitorBase, public maplebe::OperandVisitor<ImmOperand> {
public:
explicit DBGOpndEmitVisitor(maplebe::Emitter &asmEmitter) : emitter(asmEmitter) {}
virtual ~DBGOpndEmitVisitor() = default;
protected:
maplebe::Emitter &emitter;
private:
void Visit(ImmOperand *v) final;
};
} /* namespace mpldbg */
#endif /* MAPLEBE_INCLUDE_CG_DBG_H */

View File

@ -0,0 +1,88 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_DEPENDENCE_H
#define MAPLEBE_INCLUDE_CG_DEPENDENCE_H
#include "deps.h"
#include "cgbb.h"
namespace maplebe {
using namespace maple;
namespace {
constexpr maple::uint32 kMaxDependenceNum = 200;
};
class DepAnalysis {
public:
DepAnalysis(CGFunc &func, MemPool &memPool, MAD &mad, bool beforeRA)
: cgFunc(func), memPool(memPool), alloc(&memPool), beforeRA(beforeRA), mad(mad), lastComments(alloc.Adapter())
{
}
virtual ~DepAnalysis() = default;
virtual void Run(BB &bb, MapleVector<DepNode *> &nodes) = 0;
const MapleVector<Insn *> &GetLastComments() const
{
return lastComments;
}
virtual void CombineClinit(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator) = 0;
virtual void CombineDependence(DepNode &firstNode, DepNode &secondNode, bool isAcrossSeparator,
bool isMemCombine = false) = 0;
virtual void CombineMemoryAccessPair(DepNode &firstNode, DepNode &secondNode, bool useFirstOffset) = 0;
virtual const std::string &GetDepTypeName(DepType depType) const = 0;
virtual void DumpDepNode(DepNode &node) const = 0;
virtual void DumpDepLink(DepLink &link, const DepNode *node) const = 0;
protected:
CGFunc &cgFunc;
MemPool &memPool;
MapleAllocator alloc;
bool beforeRA;
MAD &mad;
MapleVector<Insn *> lastComments;
virtual void Init(BB &bb, MapleVector<DepNode *> &nodes) = 0;
virtual void ClearAllDepData() = 0;
virtual void AnalysisAmbiInsns(BB &bb) = 0;
virtual void AppendRegUseList(Insn &insn, regno_t regNO) = 0;
virtual void AddDependence(DepNode &fromNode, DepNode &toNode, DepType depType) = 0;
virtual void RemoveSelfDeps(Insn &insn) = 0;
virtual void BuildDepsUseReg(Insn &insn, regno_t regNO) = 0;
virtual void BuildDepsDefReg(Insn &insn, regno_t regNO) = 0;
virtual void BuildDepsAmbiInsn(Insn &insn) = 0;
virtual void BuildDepsMayThrowInsn(Insn &insn) = 0;
virtual void BuildDepsUseMem(Insn &insn, MemOperand &memOpnd) = 0;
virtual void BuildDepsDefMem(Insn &insn, MemOperand &memOpnd) = 0;
virtual void BuildDepsMemBar(Insn &insn) = 0;
virtual void BuildDepsSeparator(DepNode &newSepNode, MapleVector<DepNode *> &nodes) = 0;
virtual void BuildDepsControlAll(DepNode &depNode, const MapleVector<DepNode *> &nodes) = 0;
virtual void BuildDepsAccessStImmMem(Insn &insn, bool isDest) = 0;
virtual void BuildCallerSavedDeps(Insn &insn) = 0;
virtual void BuildDepsBetweenControlRegAndCall(Insn &insn, bool isDest) = 0;
virtual void BuildStackPassArgsDeps(Insn &insn) = 0;
virtual void BuildDepsDirtyStack(Insn &insn) = 0;
virtual void BuildDepsUseStack(Insn &insn) = 0;
virtual void BuildDepsDirtyHeap(Insn &insn) = 0;
virtual DepNode *BuildSeparatorNode() = 0;
virtual bool IfInAmbiRegs(regno_t regNO) const = 0;
virtual bool IsFrameReg(const RegOperand &) const = 0;
};
} // namespace maplebe
#endif /* MAPLEBE_INCLUDE_CG_DEPENDENCE_H */

View File

@ -0,0 +1,587 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_DEPS_H
#define MAPLEBE_INCLUDE_CG_DEPS_H
#include "mad.h"
#include "pressure.h"
#include <array>
namespace maplebe {
#define PRINT_STR_VAL(STR, VAL) LogInfo::MapleLogger() << std::left << std::setw(12) << STR << VAL << " | ";
#define PRINT_VAL(VAL) LogInfo::MapleLogger() << std::left << std::setw(12) << VAL << " | ";
enum DepType : uint8 {
kDependenceTypeTrue,
kDependenceTypeOutput,
kDependenceTypeAnti,
kDependenceTypeControl,
kDependenceTypeMembar,
kDependenceTypeThrow,
kDependenceTypeSeparator,
kDependenceTypeNone
};
inline const std::array<std::string, kDependenceTypeNone + 1> kDepTypeName = {
"true-dep", "output-dep", "anti-dep", "control-dep", "membar-dep", "throw-dep", "separator-dep", "none-dep",
};
enum NodeType : uint8 { kNodeTypeNormal, kNodeTypeSeparator, kNodeTypeEmpty };
enum ScheduleState : uint8 {
kNormal,
kReady,
kScheduled,
};
class DepNode;
class DepLink {
public:
DepLink(DepNode &fromNode, DepNode &toNode, DepType typ) : from(fromNode), to(toNode), depType(typ), latency(0) {}
virtual ~DepLink() = default;
DepNode &GetFrom() const
{
return from;
}
DepNode &GetTo() const
{
return to;
}
void SetDepType(DepType dType)
{
depType = dType;
}
DepType GetDepType() const
{
return depType;
}
void SetLatency(uint32 lat)
{
latency = lat;
}
uint32 GetLatency() const
{
return latency;
}
private:
DepNode &from;
DepNode &to;
DepType depType;
uint32 latency;
};
class DepNode {
public:
bool CanBeScheduled() const;
void OccupyUnits();
uint32 GetUnitKind() const;
DepNode(Insn &insn, MapleAllocator &alloc)
: insn(&insn),
units(nullptr),
reservation(nullptr),
unitNum(0),
eStart(0),
lStart(0),
visit(0),
type(kNodeTypeNormal),
state(kNormal),
index(0),
simulateCycle(0),
schedCycle(0),
bruteForceSchedCycle(0),
validPredsSize(0),
validSuccsSize(0),
preds(alloc.Adapter()),
succs(alloc.Adapter()),
comments(alloc.Adapter()),
cfiInsns(alloc.Adapter()),
clinitInsns(alloc.Adapter()),
locInsn(nullptr),
useRegnos(alloc.Adapter()),
defRegnos(alloc.Adapter()),
regPressure(nullptr)
{
}
DepNode(Insn &insn, MapleAllocator &alloc, Unit *const *unit, uint32 num, Reservation &rev)
: insn(&insn),
units(unit),
reservation(&rev),
unitNum(num),
eStart(0),
lStart(0),
visit(0),
type(kNodeTypeNormal),
state(kNormal),
index(0),
simulateCycle(0),
schedCycle(0),
bruteForceSchedCycle(0),
validPredsSize(0),
validSuccsSize(0),
preds(alloc.Adapter()),
succs(alloc.Adapter()),
comments(alloc.Adapter()),
cfiInsns(alloc.Adapter()),
clinitInsns(alloc.Adapter()),
locInsn(nullptr),
useRegnos(alloc.Adapter()),
defRegnos(alloc.Adapter()),
regPressure(nullptr)
{
}
virtual ~DepNode() = default;
Insn *GetInsn() const
{
return insn;
}
void SetInsn(Insn &rvInsn)
{
insn = &rvInsn;
}
void SetUnits(Unit *const *unit)
{
units = unit;
}
const Unit *GetUnitByIndex(uint32 idx) const
{
DEBUG_ASSERT(index < unitNum, "out of units");
return units[idx];
}
Reservation *GetReservation() const
{
return reservation;
}
void SetReservation(Reservation &rev)
{
reservation = &rev;
}
uint32 GetUnitNum() const
{
return unitNum;
}
void SetUnitNum(uint32 num)
{
unitNum = num;
}
uint32 GetEStart() const
{
return eStart;
}
void SetEStart(uint32 start)
{
eStart = start;
}
uint32 GetLStart() const
{
return lStart;
}
void SetLStart(uint32 start)
{
lStart = start;
}
uint32 GetVisit() const
{
return visit;
}
void SetVisit(uint32 visitVal)
{
visit = visitVal;
}
void IncreaseVisit()
{
++visit;
}
NodeType GetType() const
{
return type;
}
void SetType(NodeType nodeType)
{
type = nodeType;
}
ScheduleState GetState() const
{
return state;
}
void SetState(ScheduleState scheduleState)
{
state = scheduleState;
}
uint32 GetIndex() const
{
return index;
}
void SetIndex(uint32 idx)
{
index = idx;
}
void SetSchedCycle(uint32 cycle)
{
schedCycle = cycle;
}
uint32 GetSchedCycle() const
{
return schedCycle;
}
void SetSimulateCycle(uint32 cycle)
{
simulateCycle = cycle;
}
uint32 GetSimulateCycle() const
{
return simulateCycle;
}
void SetBruteForceSchedCycle(uint32 cycle)
{
bruteForceSchedCycle = cycle;
}
uint32 GetBruteForceSchedCycle() const
{
return bruteForceSchedCycle;
}
void SetValidPredsSize(uint32 validSize)
{
validPredsSize = validSize;
}
uint32 GetValidPredsSize() const
{
return validPredsSize;
}
void DescreaseValidPredsSize()
{
--validPredsSize;
}
void IncreaseValidPredsSize()
{
++validPredsSize;
}
uint32 GetValidSuccsSize() const
{
return validSuccsSize;
}
void SetValidSuccsSize(uint32 size)
{
validSuccsSize = size;
}
const MapleVector<DepLink *> &GetPreds() const
{
return preds;
}
void ReservePreds(size_t size)
{
preds.reserve(size);
}
void AddPred(DepLink &depLink)
{
preds.emplace_back(&depLink);
}
void RemovePred()
{
preds.pop_back();
}
const MapleVector<DepLink *> &GetSuccs() const
{
return succs;
}
void ReserveSuccs(size_t size)
{
succs.reserve(size);
}
void AddSucc(DepLink &depLink)
{
succs.emplace_back(&depLink);
}
void RemoveSucc()
{
succs.pop_back();
}
const MapleVector<Insn *> &GetComments() const
{
return comments;
}
void SetComments(MapleVector<Insn *> com)
{
comments = com;
}
void AddComments(Insn &insn)
{
comments.emplace_back(&insn);
}
void ClearComments()
{
comments.clear();
}
const MapleVector<Insn *> &GetCfiInsns() const
{
return cfiInsns;
}
void SetCfiInsns(MapleVector<Insn *> insns)
{
cfiInsns = insns;
}
void AddCfiInsn(Insn &insn)
{
cfiInsns.emplace_back(&insn);
}
void ClearCfiInsns()
{
cfiInsns.clear();
}
const MapleVector<Insn *> &GetClinitInsns() const
{
return clinitInsns;
}
void SetClinitInsns(MapleVector<Insn *> insns)
{
clinitInsns = insns;
}
void AddClinitInsn(Insn &insn)
{
clinitInsns.emplace_back(&insn);
}
const RegPressure *GetRegPressure() const
{
return regPressure;
}
void SetRegPressure(RegPressure &pressure)
{
regPressure = &pressure;
}
void DumpRegPressure() const
{
if (regPressure) {
regPressure->DumpRegPressure();
}
}
void InitPressure() const
{
regPressure->InitPressure();
}
const MapleVector<int32> &GetPressure() const
{
return regPressure->GetPressure();
}
void IncPressureByIndex(int32 idx) const
{
regPressure->IncPressureByIndex(static_cast<uint32>(idx));
}
void DecPressureByIndex(int32 idx) const
{
regPressure->DecPressureByIndex(static_cast<uint32>(idx));
}
const MapleVector<int32> &GetDeadDefNum() const
{
return regPressure->GetDeadDefNum();
}
void IncDeadDefByIndex(int32 idx) const
{
regPressure->IncDeadDefByIndex(static_cast<uint32>(idx));
}
void SetRegUses(RegList &regList) const
{
regPressure->SetRegUses(&regList);
}
void SetRegDefs(size_t idx, RegList *regList) const
{
regPressure->SetRegDefs(idx, regList);
}
int32 GetIncPressure() const
{
return regPressure->GetIncPressure();
}
void SetIncPressure(bool value) const
{
regPressure->SetIncPressure(value);
}
int32 GetMaxDepth() const
{
return regPressure->GetMaxDepth();
}
void SetMaxDepth(int32 value) const
{
regPressure->SetMaxDepth(value);
}
int32 GetNear() const
{
return regPressure->GetNear();
}
void SetNear(int32 value) const
{
regPressure->SetNear(value);
}
int32 GetPriority() const
{
return regPressure->GetPriority();
}
void SetPriority(int32 value) const
{
regPressure->SetPriority(value);
}
RegList *GetRegUses(size_t idx) const
{
return regPressure->GetRegUses(idx);
}
void InitRegUsesSize(size_t size) const
{
regPressure->InitRegUsesSize(size);
}
RegList *GetRegDefs(size_t idx) const
{
return regPressure->GetRegDefs(idx);
}
void InitRegDefsSize(size_t size) const
{
regPressure->InitRegDefsSize(size);
}
void SetNumCall(int32 value) const
{
regPressure->SetNumCall(value);
}
int32 GetNumCall() const
{
return regPressure->GetNumCall();
}
void SetHasNativeCallRegister(bool value) const
{
regPressure->SetHasNativeCallRegister(value);
}
bool GetHasNativeCallRegister() const
{
return regPressure->GetHasNativeCallRegister();
}
const Insn *GetLocInsn() const
{
return locInsn;
}
void SetLocInsn(const Insn &insn)
{
locInsn = &insn;
}
/* printf dep-node's information of scheduling */
void DumpSchedInfo() const
{
PRINT_STR_VAL("estart: ", eStart);
PRINT_STR_VAL("lstart: ", lStart);
PRINT_STR_VAL("visit: ", visit);
PRINT_STR_VAL("state: ", state);
PRINT_STR_VAL("index: ", index);
PRINT_STR_VAL("validPredsSize: ", validPredsSize);
PRINT_STR_VAL("validSuccsSize: ", validSuccsSize);
LogInfo::MapleLogger() << '\n';
constexpr int32 width = 12;
LogInfo::MapleLogger() << std::left << std::setw(width) << "usereg: ";
for (const auto &useReg : useRegnos) {
LogInfo::MapleLogger() << "R" << useReg << " ";
}
LogInfo::MapleLogger() << "\n";
LogInfo::MapleLogger() << std::left << std::setw(width) << "defreg: ";
for (const auto &defReg : defRegnos) {
LogInfo::MapleLogger() << "R" << defReg << " ";
}
LogInfo::MapleLogger() << "\n";
}
void SetHasPreg(bool value) const
{
regPressure->SetHasPreg(value);
}
bool GetHasPreg() const
{
return regPressure->GetHasPreg();
}
void AddUseReg(regno_t reg)
{
useRegnos.emplace_back(reg);
}
const MapleVector<regno_t> &GetUseRegnos() const
{
return useRegnos;
}
void AddDefReg(regno_t reg)
{
defRegnos.emplace_back(reg);
}
const MapleVector<regno_t> &GetDefRegnos() const
{
return defRegnos;
}
private:
Insn *insn;
Unit *const *units;
Reservation *reservation;
uint32 unitNum;
uint32 eStart;
uint32 lStart;
uint32 visit;
NodeType type;
ScheduleState state;
uint32 index;
uint32 simulateCycle;
uint32 schedCycle;
uint32 bruteForceSchedCycle;
/* For scheduling, denotes unscheduled preds/succs number. */
uint32 validPredsSize;
uint32 validSuccsSize;
/* Dependence links. */
MapleVector<DepLink *> preds;
MapleVector<DepLink *> succs;
/* Non-machine instructions prior to insn, such as comments. */
MapleVector<Insn *> comments;
/* Non-machine instructions which follows insn, such as cfi instructions. */
MapleVector<Insn *> cfiInsns;
/* Special instructions which follows insn, such as clinit instructions. */
MapleVector<Insn *> clinitInsns;
/* loc insn which indicate insn location in source file */
const Insn *locInsn;
MapleVector<regno_t> useRegnos;
MapleVector<regno_t> defRegnos;
/* For register pressure analysis */
RegPressure *regPressure;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_DEPS_H */

View File

@ -0,0 +1,272 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_EBO_H
#define MAPLEBE_INCLUDE_CG_EBO_H
#include "cg_phase.h"
#include "cgbb.h"
#include "live.h"
#include "loop.h"
namespace maplebe {
namespace {
constexpr uint32 kEboDefaultMemHash = 0;
constexpr uint32 kEboNoAliasMemHash = 1;
constexpr uint32 kEboSpillMemHash = 2;
constexpr uint32 kEboCopyInsnHash = 3;
constexpr uint32 kEboReservedInsnHash = 4;
constexpr uint32 kEboMaxExpInsnHash = 1024;
constexpr uint32 kEboMaxOpndHash = 521;
constexpr uint32 kEboMaxInsnHash = kEboReservedInsnHash + kEboMaxExpInsnHash;
}; // namespace
#define EBO_EXP_INSN_HASH(val) ((kEboMaxExpInsnHash - 1ULL) & (static_cast<uint64>(val) >> 6))
/* forward decls */
class InsnInfo;
struct OpndInfo {
explicit OpndInfo(Operand &opnd) : opnd(&opnd) {}
virtual ~OpndInfo() = default;
int32 hashVal = 0; /* Mem operand is placed in hash table, this is the hashVal of it, and otherwise -1. */
Operand *opnd; /* Operand */
Operand *replacementOpnd = nullptr; /* Rename opnd with this new name. */
OpndInfo *replacementInfo = nullptr; /* Rename opnd with this info. */
BB *bb = nullptr; /* The Definining bb. */
Insn *insn = nullptr; /* The Defining insn. */
InsnInfo *insnInfo = nullptr;
bool redefinedInBB = false; /* A following definition exisit in bb. */
bool redefined = false; /* A following definition exisit. */
Insn *redefinedInsn = nullptr; /* Next defined insn if redefinedInBB is true */
#if TARGARM32
bool mayReDef = false;
#endif
OpndInfo *same = nullptr; /* Other definitions of the same operand. */
OpndInfo *prev = nullptr;
OpndInfo *next = nullptr;
OpndInfo *hashNext = nullptr;
int32 refCount = 0; /* Number of references to the operand. */
};
struct MemOpndInfo : public OpndInfo {
explicit MemOpndInfo(Operand &opnd) : OpndInfo(opnd) {}
~MemOpndInfo() override = default;
OpndInfo *GetBaseInfo() const
{
return base;
}
OpndInfo *GetOffsetInfo() const
{
return offset;
}
void SetBaseInfo(OpndInfo &baseInfo)
{
base = &baseInfo;
}
void SetOffsetInfo(OpndInfo &offInfo)
{
offset = &offInfo;
}
private:
OpndInfo *base = nullptr;
OpndInfo *offset = nullptr;
};
class InsnInfo {
public:
InsnInfo(MemPool &memPool, Insn &insn)
: alloc(&memPool),
bb(insn.GetBB()),
insn(&insn),
result(alloc.Adapter()),
origOpnd(alloc.Adapter()),
optimalOpnd(alloc.Adapter())
{
}
virtual ~InsnInfo() = default;
MapleAllocator alloc;
uint32 hashIndex = 0;
bool mustNotBeRemoved = false; /* Some condition requires this insn. */
BB *bb; /* The defining bb. */
Insn *insn; /* The defining insn. */
InsnInfo *same = nullptr; /* Other insns with the same hash value. */
InsnInfo *prev = nullptr;
InsnInfo *next = nullptr;
MapleVector<OpndInfo *> result; /* Result array. */
MapleVector<OpndInfo *> origOpnd;
MapleVector<OpndInfo *> optimalOpnd;
};
class Ebo {
public:
Ebo(CGFunc &func, MemPool &memPool, LiveAnalysis *live, bool before, const std::string &phase)
: cgFunc(&func),
beforeRegAlloc(before),
phaseName(phase),
live(live),
eboMp(&memPool),
eboAllocator(&memPool),
visitedBBs(eboAllocator.Adapter()),
vRegInfo(eboAllocator.Adapter()),
exprInfoTable(eboAllocator.Adapter()),
insnInfoTable(eboAllocator.Adapter())
{
}
virtual ~Ebo() = default;
MemOpndInfo *GetMemInfo(InsnInfo &insnInfo);
void SetInsnInfo(uint32 hashVal, InsnInfo &info)
{
DEBUG_ASSERT(hashVal < insnInfoTable.size(), "hashVal out of insnInfoTable range");
insnInfoTable.at(hashVal) = &info;
}
void IncRef(OpndInfo &info) const
{
++info.refCount;
}
void DecRef(OpndInfo &info) const
{
--info.refCount;
}
void EnlargeSpaceForLA(Insn &csetInsn);
bool IsSaveReg(const Operand &opnd);
bool IsFrameReg(Operand &opnd) const;
bool OperandEqual(const Operand &op1, const Operand &op2) const;
Operand *GetZeroOpnd(uint32 size) const;
bool IsPhysicalReg(const Operand &opnd) const;
bool HasAssignedReg(const Operand &opnd) const;
bool IsOfSameClass(const Operand &op0, const Operand &op1) const;
bool OpndAvailableInBB(const BB &bb, OpndInfo *info);
bool ForwardPropCheck(const Operand *opndReplace, const OpndInfo &opndInfo, const Operand &opnd, Insn &insn);
bool RegForwardCheck(Insn &insn, const Operand &opnd, const Operand *opndReplace, Operand &oldOpnd,
const OpndInfo *tmpInfo);
bool IsNotVisited(const BB &bb)
{
return !visitedBBs.at(bb.GetId());
};
void SetBBVisited(const BB &bb)
{
visitedBBs.at(bb.GetId()) = true;
};
void UpdateOpndInfo(const Operand &opnd, OpndInfo &opndInfo, OpndInfo *newInfo, int32 hashVal);
void SetOpndInfo(const Operand &opnd, OpndInfo *opndInfo, int32 hashVal);
bool RegistersIdentical(const Operand &op0, const Operand &op1) const;
OpndInfo *GetOpndInfo(const Operand &opnd, int32 hashVal) const;
OpndInfo *GetNewOpndInfo(BB &bb, Insn *insn, Operand &opnd, int32 hashVal);
OpndInfo *OperandInfoUse(BB &currentBB, Operand &localOpnd);
InsnInfo *GetNewInsnInfo(Insn &insn);
int32 ComputeOpndHash(const Operand &opnd) const;
uint32 ComputeHashVal(Insn &insn, const MapleVector<OpndInfo *> &opndInfos) const;
void MarkOpndLiveIntoBB(const Operand &opnd, BB &into, BB &def) const;
void RemoveInsn(InsnInfo &info);
void RemoveUses(uint32 opndNum, const MapleVector<OpndInfo *> &origInfo);
void HashInsn(Insn &insn, const MapleVector<OpndInfo *> &origInfo, const MapleVector<OpndInfo *> &opndInfos);
void BuildAllInfo(BB &bb);
InsnInfo *LocateInsnInfo(const OpndInfo &info);
void RemoveUnusedInsns(BB &bb, bool normal);
void UpdateNextInfo(const OpndInfo &opndInfo);
void BackupOpndInfoList(OpndInfo *saveLast);
void BackupInsnInfoList(InsnInfo *saveLast);
void AddBB2EB(BB &bb);
void EboInit();
void EboProcessSingleBB();
void EboProcess();
void Run();
std::string PhaseName() const
{
return phaseName;
}
protected:
CGFunc *cgFunc;
bool beforeRegAlloc; /* True if perform Ebo before register allocation. */
virtual OpndInfo *OperandInfoDef(BB &currentBB, Insn &currentInsn, Operand &localOpnd) = 0;
virtual const RegOperand &GetRegOperand(const Operand &opnd) const = 0;
virtual bool IsGlobalNeeded(Insn &insn) const = 0;
virtual bool IsDecoupleStaticOp(Insn &insn) const = 0;
virtual bool IsFmov(const Insn &insn) const = 0;
virtual bool SpecialSequence(Insn &insn, const MapleVector<OpndInfo *> &origInfos) = 0;
virtual bool DoConstProp(Insn &insn, uint32 i, Operand &opnd) = 0;
virtual bool Csel2Cset(Insn &insn, const MapleVector<Operand *> &opnds) = 0;
virtual bool SimplifyConstOperand(Insn &insn, const MapleVector<Operand *> &opnds,
const MapleVector<OpndInfo *> &opndInfo) = 0;
virtual int32 GetOffsetVal(const MemOperand &mem) const = 0;
virtual bool OperandEqSpecial(const Operand &op1, const Operand &op2) const = 0;
virtual void BuildCallerSaveRegisters() = 0;
virtual void DefineAsmRegisters(InsnInfo &insnInfo) = 0;
virtual void DefineCallerSaveRegisters(InsnInfo &insnInfo) = 0;
virtual void DefineReturnUseRegister(Insn &insn) = 0;
virtual void DefineCallUseSpecialRegister(Insn &insn) = 0;
virtual void DefineClinitSpecialRegisters(InsnInfo &insnInfo) = 0;
virtual bool IsMovToSIMDVmov(Insn &insn, const Insn &replaceInsn) const = 0;
virtual bool IsPseudoRet(Insn &insn) const = 0;
virtual bool ChangeLdrMop(Insn &insn, const Operand &opnd) const = 0;
virtual bool IsAdd(const Insn &insn) const = 0;
virtual bool IsClinitCheck(const Insn &insn) const = 0;
virtual bool IsLastAndBranch(BB &bb, Insn &insn) const = 0;
virtual bool IsSameRedefine(BB &bb, Insn &insn, OpndInfo &opndInfo) const = 0;
virtual bool ResIsNotDefAndUse(Insn &insn) const = 0;
virtual bool LiveOutOfBB(const Operand &opnd, const BB &bb) const = 0;
virtual bool IsInvalidReg(const RegOperand &opnd) const = 0;
virtual bool IsZeroRegister(const Operand &opnd) const = 0;
virtual bool IsConstantImmOrReg(const Operand &opnd) const = 0;
OpndInfo *BuildMemOpndInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex);
OpndInfo *BuildOperandInfo(BB &bb, Insn &insn, Operand &opnd, uint32 opndIndex, MapleVector<OpndInfo *> &origInfos);
bool ForwardPropagateOpnd(Insn &insn, Operand *&opnd, uint32 opndIndex, OpndInfo *&opndInfo,
MapleVector<OpndInfo *> &origInfos);
void SimplifyInsn(Insn &insn, bool &insnReplaced, bool opndsConstant, const MapleVector<Operand *> &opnds,
const MapleVector<OpndInfo *> &opndInfos, const MapleVector<OpndInfo *> &origInfos);
void FindRedundantInsns(BB &bb, Insn *&insn, const Insn *prev, bool insnReplaced, MapleVector<Operand *> &opnds,
MapleVector<OpndInfo *> &opndInfos, const MapleVector<OpndInfo *> &origInfos);
void PreProcessSpecialInsn(Insn &insn);
std::string phaseName;
LiveAnalysis *live;
uint32 bbNum = 0; /* bb numbers for an extend block. */
MemPool *eboMp;
MapleAllocator eboAllocator;
MapleVector<bool> visitedBBs;
OpndInfo *firstOpndInfo = nullptr;
OpndInfo *lastOpndInfo = nullptr;
InsnInfo *firstInsnInfo = nullptr;
InsnInfo *lastInsnInfo = nullptr;
MapleUnorderedMap<uint32, OpndInfo *> vRegInfo;
MapleVector<OpndInfo *> exprInfoTable;
MapleVector<InsnInfo *> insnInfoTable;
bool optSuccess = false;
};
MAPLE_FUNC_PHASE_DECLARE(CgEbo0, maplebe::CGFunc)
MAPLE_FUNC_PHASE_DECLARE(CgEbo1, maplebe::CGFunc)
MAPLE_FUNC_PHASE_DECLARE(CgPostEbo, maplebe::CGFunc)
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_EBO_H */

View File

@ -0,0 +1,226 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_EH_EH_FUNC_H
#define MAPLEBE_INCLUDE_EH_EH_FUNC_H
#include "mir_parser.h"
#include "mir_function.h"
#include "lsda.h"
#include "cg_phase.h"
#include "maple_phase.h"
namespace maplebe {
class EHTry {
public:
EHTry(MapleAllocator &alloc, TryNode &tryNode) : tryNode(&tryNode), catchVec(alloc.Adapter()) {}
~EHTry() = default;
TryNode *GetTryNode() const
{
return tryNode;
}
void SetEndtryNode(StmtNode &endtryNode)
{
this->endTryNode = &endtryNode;
}
StmtNode *GetEndtryNode()
{
return endTryNode;
}
void SetFallthruGoto(StmtNode *fallthruGoto)
{
this->fallThroughGoto = fallthruGoto;
}
StmtNode *GetFallthruGoto()
{
return fallThroughGoto;
}
size_t GetCatchVecSize() const
{
return catchVec.size();
}
void PushBackCatchVec(CatchNode &catchNode)
{
catchVec.emplace_back(&catchNode);
}
CatchNode *GetCatchNodeAt(size_t pos) const
{
CHECK_FATAL(pos < GetCatchVecSize(), "pos is out of range.");
return catchVec.at(pos);
}
void SetLSDACallSite(LSDACallSite &lsdaCallSite)
{
this->lsdaCallSite = &lsdaCallSite;
}
void SetCSAction(uint32 action) const
{
lsdaCallSite->csAction = action;
}
void DumpEHTry(const MIRModule &mirModule);
private:
TryNode *tryNode;
StmtNode *endTryNode = nullptr;
StmtNode *fallThroughGoto = nullptr; /* no throw in the try block, the goto stmt to the fall through */
MapleVector<CatchNode *> catchVec;
LSDACallSite *lsdaCallSite = nullptr; /* one try has a callsite */
};
class EHThrow {
public:
explicit EHThrow(UnaryStmtNode &rtNode) : rethrow(&rtNode) {}
~EHThrow() = default;
bool IsUnderTry() const
{
return javaTry != nullptr;
}
bool HasLSDA() const
{
return startLabel != nullptr;
}
const UnaryStmtNode *GetRethrow() const
{
return rethrow;
}
void SetJavaTry(EHTry *javaTry)
{
this->javaTry = javaTry;
}
LabelNode *GetStartLabel()
{
return startLabel;
}
LabelNode *GetEndLabel()
{
return endLabel;
}
void Lower(CGFunc &cgFunc);
void ConvertThrowToRethrow(CGFunc &cgFunc);
void ConvertThrowToRuntime(CGFunc &cgFunc, BaseNode &arg);
private:
UnaryStmtNode *rethrow; /* must be a throw stmt */
EHTry *javaTry = nullptr; /* the try statement wrapping this throw */
LabelNode *startLabel = nullptr; /* the label that "MCC_RethrowException" or "MCC_ThrowException" begin */
LabelNode *endLabel = nullptr; /* the label that "MCC_RethrowException" or "MCC_ThrowException" end */
};
class EHFunc {
public:
static constexpr uint8 kTypeEncoding = 0x9b; /* same thing as LSDAHeader.kTypeEncoding */
explicit EHFunc(CGFunc &func);
~EHFunc() = default;
void CollectEHInformation(std::vector<std::pair<LabelIdx, CatchNode *>> &catchVec);
void InsertEHSwitchTable();
void CreateLSDA();
bool NeedFullLSDA() const;
bool NeedFastLSDA() const;
void InsertCxaAfterEachCatch(const std::vector<std::pair<LabelIdx, CatchNode *>> &catchVec);
void GenerateCleanupLabel();
void MergeCatchToTry(const std::vector<std::pair<LabelIdx, CatchNode *>> &catchVec);
void BuildEHTypeTable(const std::vector<std::pair<LabelIdx, CatchNode *>> &catchVec);
void LowerThrow(); /* for non-personality function */
void CreateTypeInfoSt();
void DumpEHFunc() const;
bool HasThrow() const
{
return !rethrowVec.empty();
}
void AddTry(EHTry &ehTry)
{
tryVec.emplace_back(&ehTry);
}
size_t GetEHTyTableSize() const
{
return ehTyTable.size();
}
TyIdx &GetEHTyTableMember(int32 index)
{
CHECK_FATAL(static_cast<size_t>(index) < ehTyTable.size(), "out of ehTyTable");
return ehTyTable[index];
}
LSDAHeader *GetLSDAHeader()
{
return lsdaHeader;
}
LSDACallSiteTable *GetLSDACallSiteTable()
{
return lsdaCallSiteTable;
}
const LSDACallSiteTable *GetLSDACallSiteTable() const
{
return lsdaCallSiteTable;
}
const LSDAActionTable *GetLSDAActionTable() const
{
return lsdaActionTable;
}
void AddRethrow(EHThrow &rethrow)
{
rethrowVec.emplace_back(&rethrow);
}
private:
void CreateLSDAAction();
void InsertDefaultLabelAndAbortFunc(BlockNode &blkNode, SwitchNode &switchNode, const StmtNode &beforeEndLabel);
void FillSwitchTable(SwitchNode &switchNode, const EHTry &ehTry);
void CreateLSDAHeader();
void FillLSDACallSiteTable();
LabelIdx CreateLabel(const std::string &cstr);
bool HasTry() const;
CGFunc *cgFunc;
LabelIdx labelIdx = 0;
MapleVector<EHTry *> tryVec; /* try stmt node */
MapleVector<TyIdx> ehTyTable; /* the type that would emit in LSDA */
MapleMap<TyIdx, uint32> ty2IndexTable; /* use the TyIdx to get the index of ehTyTable; */
LSDAHeader *lsdaHeader = nullptr;
LSDACallSiteTable *lsdaCallSiteTable = nullptr;
LSDAActionTable *lsdaActionTable = nullptr;
MapleVector<EHThrow *> rethrowVec; /* EHRethrow */
};
MAPLE_FUNC_PHASE_DECLARE_BEGIN(CgBuildEHFunc, maplebe::CGFunc)
MAPLE_FUNC_PHASE_DECLARE_END
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_EH_EH_FUNC_H */

View File

@ -0,0 +1,145 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_IFILE_TYPE_H
#define MAPLEBE_INCLUDE_CG_IFILE_TYPE_H
#include <cstdint>
namespace maplebe {
// The following definitions are excerpted from the elf.h
#define EI_NIDENT (16)
#define EI_MAG0 0 /* File identification byte 0 index */
#define EI_MAG1 1 /* File identification byte 1 index */
#define EI_MAG2 2 /* File identification byte 2 index */
#define EI_MAG3 3 /* File identification byte 3 index */
#define EI_CLASS 4 /* File class byte index */
#define EI_DATA 5 /* Data encoding byte index */
#define EI_VERSION 6 /* File version byte index */
#define EI_OSABI 7 /* OS ABI identification */
#define EI_ABIVERSION 8 /* ABI version */
#define EI_PAD 9 /* Byte index of padding bytes */
#define ELFCLASS64 2 /* 64-bit objects */
#define ELFDATA2LSB 1 /* 2's complement, little endian */
#define ELFMAG0 0x7f /* Magic number byte 0 */
#define ELFOSABI_LINUX 3 /* Compatibility alias. */
#define ELFOSABI_NONE 0 /* UNIX System V ABI */
#define ELFMAG1 'E' /* Magic number byte 1 */
#define ELFMAG2 'L' /* Magic number byte 2 */
#define ELFMAG3 'F' /* Magic number byte 3 */
#define EM_AARCH64 183 /* ARM AARCH64 */
#define EM_X86_64 62 /* AMD x86-64 architecture */
#define ET_REL 1 /* Relocatable file */
#define EV_CURRENT 1 /* Current version */
#define R_AARCH64_ADR_PREL_PG_HI21 275 /* Page-rel. ADRP imm. from 32:12. */
#define R_AARCH64_ADD_ABS_LO12_NC 277 /* Dir. ADD imm. from bits 11:0. */
#define R_AARCH64_CALL26 283 /* Likewise for CALL. */
#define R_X86_64_32 10 /* Direct 32 bit zero extended */
#define R_X86_64_64 1 /* Direct 64 bit */
#define R_X86_64_NONE 0 /* No reloc */
#define R_X86_64_PC32 2 /* PC relative 32 bit signed */
#define R_X86_64_PC64 24 /* PC relative 64 bit */
#define R_X86_64_PLT32 4 /* 32 bit PLT address */
#define SHF_ALLOC (1 << 1) /* Occupies memory during execution */
#define SHF_EXECINSTR (1 << 2) /* Executable */
#define SHF_INFO_LINK (1 << 6) /* `sh_info' contains SHT index */
#define SHF_WRITE (1 << 0) /* Writable */
#define SHF_MASKPROC 0xf0000000 /* Processor-specific */
#define SHN_COMMON 0xfff2 /* Associated symbol is common */
#define SHT_NULL 0 /* Section header table entry unused */
#define SHT_PROGBITS 1 /* Program data */
#define SHT_SYMTAB 2 /* Symbol table */
#define SHT_STRTAB 3 /* String table */
#define SHT_RELA 4 /* Relocation entries with addends */
#define SHT_NOBITS 8 /* Program space with no data (bss) */
#define STB_LOCAL 0 /* Local symbol */
#define STB_GLOBAL 1 /* Global symbol */
#define STB_WEAK 2 /* Weak symbol */
#define STT_NOTYPE 0 /* Symbol type is unspecified */
#define STT_OBJECT 1 /* Symbol is a data object */
#define STT_FUNC 2 /* Symbol is a code object */
#define STT_SECTION 3 /* Symbol associated with a section */
using Address = uint64_t;
using Offset = uint64_t;
using Word = uint32_t;
using Xword = uint64_t;
using SectionIndex = uint16_t;
using Sxword = int64_t;
typedef struct {
unsigned char e_ident[EI_NIDENT]; /* ELF "magic number" */
uint16_t e_type;
uint16_t e_machine;
uint32_t e_version;
uint64_t e_entry; /* Entry point virtual address */
uint64_t e_phoff; /* Program header table file offset */
uint64_t e_shoff; /* Section header table file offset */
uint32_t e_flags;
uint16_t e_ehsize;
uint16_t e_phentsize;
uint16_t e_phnum;
uint16_t e_shentsize;
uint16_t e_shnum;
uint16_t e_shstrndx;
} FileHeader;
typedef struct {
uint32_t sh_name; /* Section name, index in string tbl */
uint32_t sh_type; /* Type of section */
uint64_t sh_flags; /* Miscellaneous section attributes */
uint64_t sh_addr; /* Section virtual addr at execution */
uint64_t sh_offset; /* Section file offset */
uint64_t sh_size; /* Size of section in bytes */
uint32_t sh_link; /* Index of another section */
uint32_t sh_info; /* Additional section information */
uint64_t sh_addralign; /* Section alignment */
uint64_t sh_entsize; /* Entry size if section holds table */
} SectionHeader;
typedef struct {
uint32_t st_name; /* Symbol name, index in string tbl */
unsigned char st_info; /* Type and binding attributes */
unsigned char st_other; /* No defined meaning, 0 */
uint16_t st_shndx; /* Associated section index */
uint64_t st_value; /* Value of the symbol */
uint64_t st_size; /* Associated symbol size */
} Symbol;
typedef struct {
uint32_t p_type;
uint32_t p_flags;
uint64_t p_offset;
uint64_t p_vaddr;
uint64_t p_paddr;
uint64_t p_filesz;
uint64_t p_memsz;
uint64_t p_align;
} SegmentHeader;
typedef struct {
int64_t d_tag; /* entry tag value */
union {
uint64_t d_val;
uint64_t d_ptr;
} d_un;
} DynSectionEntry;
typedef struct {
uint64_t r_offset; /* Location at which to apply the action */
uint64_t r_info; /* index and type of relocation */
int64_t r_addend; /* Constant addend used to compute value */
} Rela;
} // namespace maplebe
#endif /* MAPLEBE_INCLUDE_CG_IFILE_TYPE_H */

View File

@ -0,0 +1,442 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MAPLEBE_INCLUDE_CG_EMIT_H
#define MAPLEBE_INCLUDE_CG_EMIT_H
/* C++ headers */
#include <fstream>
#include <functional>
#include <map>
#include <array>
#include "isa.h"
#include "lsda.h"
#include "asm_info.h"
#include "cg.h"
/* Maple IR headers */
#include "mir_module.h"
#include "mir_const.h"
#include "mempool_allocator.h"
#include "muid_replacement.h"
#include "namemangler.h"
#include "debug_info.h"
#include "alignment.h"
namespace maple {
const char *GetDwTagName(unsigned n);
const char *GetDwFormName(unsigned n);
const char *GetDwAtName(unsigned n);
} /* namespace maple */
#if TARGRISCV64
#define CMNT "\t# "
#else
#define CMNT "\t// "
#endif
#define TEXT_BEGIN text0
#define TEXT_END etext0
#define DEBUG_INFO_0 debug_info0
#define DEBUG_ABBREV_0 debug_abbrev0
#define DEBUG_LINE_0 debug_line0
#define DEBUG_STR_LABEL ASF
namespace maplebe {
constexpr int32 kSizeOfDecoupleStaticStruct = 4;
constexpr uint32 kHugeSoInsnCountThreshold = 0x1f00000; /* 124M (4bytes per Insn), leave 4M rooms for 128M */
constexpr char kHugeSoPostFix[] = "$$hugeso_";
constexpr char kDebugMapleThis[] = "_this";
constexpr uint32 kDwarfVersion = 4;
constexpr uint32 kSizeOfPTR = 8;
class StructEmitInfo {
public:
/* default ctor */
StructEmitInfo() = default;
~StructEmitInfo() = default;
uint16 GetNextFieldOffset() const
{
return nextFieldOffset;
}
void SetNextFieldOffset(uint16 offset)
{
nextFieldOffset = offset;
}
void IncreaseNextFieldOffset(uint16 value)
{
nextFieldOffset += value;
}
uint8 GetCombineBitFieldWidth() const
{
return combineBitFieldWidth;
}
void SetCombineBitFieldWidth(uint8 offset)
{
combineBitFieldWidth = offset;
}
void IncreaseCombineBitFieldWidth(uint8 value)
{
combineBitFieldWidth += value;
}
void DecreaseCombineBitFieldWidth(uint8 value)
{
combineBitFieldWidth -= value;
}
uint64 GetCombineBitFieldValue() const
{
return combineBitFieldValue;
}
void SetCombineBitFieldValue(uint64 value)
{
combineBitFieldValue = value;
}
uint64 GetTotalSize() const
{
return totalSize;
}
void SetTotalSize(uint64 value)
{
totalSize = value;
}
void IncreaseTotalSize(uint64 value)
{
totalSize += value;
}
private:
/* Next field offset in struct. */
uint16 nextFieldOffset = 0;
uint8 combineBitFieldWidth = 0;
uint64 combineBitFieldValue = 0;
/* Total size emitted in current struct. */
uint64 totalSize = 0;
};
class FuncEmitInfo {
public:
CGFunc &GetCGFunc()
{
return cgFunc;
}
const CGFunc &GetCGFunc() const
{
return cgFunc;
}
protected:
explicit FuncEmitInfo(CGFunc &func) : cgFunc(func) {}
~FuncEmitInfo() = default;
private:
CGFunc &cgFunc;
};
class Emitter {
public:
virtual void Finish() {}
virtual void CloseOutput()
{
if (fileStream.is_open()) {
fileStream.close();
}
rangeIdx2PrefixStr.clear();
hugeSoTargets.clear();
labdie2labidxTable.clear();
fileMap.clear();
}
MOperator GetCurrentMOP() const
{
return currentMop;
}
void SetCurrentMOP(const MOperator &mOp)
{
currentMop = mOp;
}
void EmitAsmLabel(AsmLabel label);
void EmitAsmLabel(const MIRSymbol &mirSymbol, AsmLabel label);
void EmitFileInfo(const std::string &fileName);
/* a symbol start/end a block */
void EmitBlockMarker(const std::string &markerName, const std::string &sectionName, bool withAddr,
const std::string &addrName = "");
void EmitNullConstant(uint64 size);
void EmitCombineBfldValue(StructEmitInfo &structEmitInfo);
void EmitBitFieldConstant(StructEmitInfo &structEmitInfo, MIRConst &mirConst, const MIRType *nextType,
uint64 fieldOffset);
void EmitScalarConstant(MIRConst &mirConst, bool newLine = true, bool flag32 = false, bool isIndirect = false);
void EmitStr(const std::string &mplStr, bool emitAscii = false, bool emitNewline = false);
void EmitStrConstant(const MIRStrConst &mirStrConst, bool isIndirect = false);
void EmitStr16Constant(const MIRStr16Const &mirStr16Const);
void EmitIntConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst, uint32 itabConflictIndex,
const std::map<GStrIdx, MIRType *> &strIdx2Type, size_t idx);
void EmitAddrofFuncConst(const MIRSymbol &mirSymbol, MIRConst &elemConst, size_t idx);
void EmitAddrofSymbolConst(const MIRSymbol &mirSymbol, MIRConst &elemConst, size_t idx);
void EmitConstantTable(const MIRSymbol &mirSymbol, MIRConst &mirConst,
const std::map<GStrIdx, MIRType *> &strIdx2Type);
void EmitClassInfoSequential(const MIRSymbol &mirSymbol, const std::map<GStrIdx, MIRType *> &strIdx2Type,
const std::string &sectionName);
void EmitMethodFieldSequential(const MIRSymbol &mirSymbol, const std::map<GStrIdx, MIRType *> &strIdx2Type,
const std::string &sectionName);
void EmitLiterals(std::vector<std::pair<MIRSymbol *, bool>> &literals,
const std::map<GStrIdx, MIRType *> &strIdx2Type);
void EmitFuncLayoutInfo(const MIRSymbol &layout);
void EmitGlobalVars(std::vector<std::pair<MIRSymbol *, bool>> &globalVars);
void EmitGlobalVar(const MIRSymbol &globalVar);
void EmitStaticFields(const std::vector<MIRSymbol *> &fields);
void EmitLiteral(const MIRSymbol &literal, const std::map<GStrIdx, MIRType *> &strIdx2Type);
void EmitStringPointers();
void GetHotAndColdMetaSymbolInfo(const std::vector<MIRSymbol *> &mirSymbolVec,
std::vector<MIRSymbol *> &hotFieldInfoSymbolVec,
std::vector<MIRSymbol *> &coldFieldInfoSymbolVec, const std::string &prefixStr,
bool forceCold = false);
void EmitMetaDataSymbolWithMarkFlag(const std::vector<MIRSymbol *> &mirSymbolVec,
const std::map<GStrIdx, MIRType *> &strIdx2Type, const std::string &prefixStr,
const std::string &sectionName, bool isHotFlag);
void EmitMethodDeclaringClass(const MIRSymbol &mirSymbol, const std::string &sectionName);
void MarkVtabOrItabEndFlag(const std::vector<MIRSymbol *> &mirSymbolVec);
void EmitArrayConstant(MIRConst &mirConst);
void EmitStructConstant(MIRConst &mirConst);
void EmitStructConstant(MIRConst &mirConst, uint32 &subStructFieldCounts);
void EmitVectorConstant(MIRConst &mirConst);
void EmitLocalVariable(const CGFunc &cgFunc);
void EmitUninitializedSymbolsWithPrefixSection(const MIRSymbol &symbol, const std::string &sectionName);
void EmitGlobalVariable();
void EmitGlobalRootList(const MIRSymbol &mirSymbol);
void EmitMuidTable(const std::vector<MIRSymbol *> &vec, const std::map<GStrIdx, MIRType *> &strIdx2Type,
const std::string &sectionName);
MIRAddroffuncConst *GetAddroffuncConst(const MIRSymbol &mirSymbol, MIRAggConst &aggConst);
int64 GetFieldOffsetValue(const std::string &className, const MIRIntConst &intConst,
const std::map<GStrIdx, MIRType *> &strIdx2Type);
Emitter &Emit(int64 val)
{
fileStream << val;
return *this;
}
Emitter &Emit(const IntVal &val)
{
fileStream << val.GetExtValue();
return *this;
}
Emitter &Emit(const MapleString &str)
{
DEBUG_ASSERT(str.c_str() != nullptr, "nullptr check");
fileStream << str;
return *this;
}
Emitter &Emit(const std::string &str)
{
fileStream << str;
return *this;
}
Emitter &Emit(const void *data, size_t size)
{
fileStream.write(reinterpret_cast<const char *>(data), size);
return *this;
}
void SetFileOffset(uint64 offset)
{
fileStream.seekp(offset);
}
void EmitLabelRef(LabelIdx labIdx);
void EmitStmtLabel(LabelIdx labIdx);
void EmitLabelPair(const LabelPair &pairLabel);
void EmitLabelForFunc(const MIRFunction *func, LabelIdx labIdx);
/* Emit signed/unsigned integer literals in decimal or hexadecimal */
void EmitDecSigned(int64 num);
void EmitDecUnsigned(uint64 num);
void EmitHexUnsigned(uint64 num);
/* Dwarf debug info */
void FillInClassByteSize(DBGDie *die, DBGDieAttr *byteSizeAttr);
void SetupDBGInfo(DebugInfo *mirdi);
void ApplyInPrefixOrder(DBGDie *die, const std::function<void(DBGDie *)> &func);
void AddLabelDieToLabelIdxMapping(DBGDie *lblDie, LabelIdx lblIdx);
LabelIdx GetLabelIdxForLabelDie(DBGDie *lblDie);
void EmitDIHeader();
void EmitDIFooter();
void EmitDIHeaderFileInfo();
void EmitDIDebugInfoSection(DebugInfo *mirdi);
void EmitDIDebugAbbrevSection(DebugInfo *mirdi);
void EmitDIDebugARangesSection();
void EmitDIDebugRangesSection();
void EmitDIDebugLineSection();
void EmitDIDebugStrSection();
void EmitDIAttrValue(DBGDie *die, DBGDieAttr *attr, DwAt attrName, DwTag tagName, DebugInfo *di);
void EmitDIFormSpecification(unsigned int dwform);
void EmitDIFormSpecification(const DBGDieAttr *attr)
{
EmitDIFormSpecification(attr->GetDwForm());
}
#if 1 /* REQUIRE TO SEPERATE TARGAARCH64 TARGARM32 */
/* Following code is under TARGAARCH64 condition */
void EmitHugeSoRoutines(bool lastRoutine = false);
void EmitInlineAsmSection();
uint64 GetJavaInsnCount() const
{
return javaInsnCount;
}
uint64 GetFuncInsnCount() const
{
return funcInsnCount;
}
MapleMap<uint32_t, std::string> &GetFileMap()
{
return fileMap;
}
void SetFileMapValue(uint32_t n, const std::string &file)
{
fileMap[n] = file;
}
CG *GetCG() const
{
return cg;
}
void ClearFuncInsnCount()
{
funcInsnCount = 0;
}
void IncreaseJavaInsnCount(uint64 n = 1, bool alignToQuad = false)
{
if (alignToQuad) {
javaInsnCount = (javaInsnCount + 1) & (~0x1UL);
funcInsnCount = (funcInsnCount + 1) & (~0x1UL);
}
javaInsnCount += n;
funcInsnCount += n;
#ifdef EMIT_INSN_COUNT
Emit(" /* InsnCount: ");
Emit(javaInsnCount *);
Emit("*/ ");
#endif
}
bool NeedToDealWithHugeSo() const
{
return javaInsnCount > kHugeSoInsnCountThreshold;
}
std::string HugeSoPostFix() const
{
return std::string(kHugeSoPostFix) + std::to_string(hugeSoSeqence);
}
void InsertHugeSoTarget(const std::string &target)
{
(void)hugeSoTargets.insert(target);
}
#endif
void InsertLabdie2labidxTable(DBGDie *lbldie, LabelIdx lab)
{
if (labdie2labidxTable.find(lbldie) == labdie2labidxTable.end()) {
labdie2labidxTable[lbldie] = lab;
}
}
protected:
Emitter(CG &cg, const std::string &fileName)
: cg(&cg),
rangeIdx2PrefixStr(cg.GetMIRModule()->GetMPAllocator().Adapter()),
arraySize(0),
isFlexibleArray(false),
stringPtr(cg.GetMIRModule()->GetMPAllocator().Adapter()),
localStrPtr(cg.GetMIRModule()->GetMPAllocator().Adapter()),
hugeSoTargets(cg.GetMIRModule()->GetMPAllocator().Adapter()),
labdie2labidxTable(std::less<DBGDie *>(), cg.GetMIRModule()->GetMPAllocator().Adapter()),
fileMap(std::less<uint32_t>(), cg.GetMIRModule()->GetMPAllocator().Adapter())
{
MIRModule &mirModule = *cg.GetMIRModule();
memPool = mirModule.GetMemPool();
asmInfo = memPool->New<AsmInfo>(*memPool);
}
~Emitter() = default;
protected:
std::ofstream fileStream;
MemPool *memPool;
CG *cg;
private:
AsmLabel GetTypeAsmInfoName(PrimType primType) const;
void EmitDWRef(const std::string &name);
void InitRangeIdx2PerfixStr();
void EmitAddressString(const std::string &address);
void EmitAliasAndRef(const MIRSymbol &sym); /* handle function symbol which has alias and weak ref */
MOperator currentMop = UINT_MAX;
MapleUnorderedMap<int, std::string> rangeIdx2PrefixStr;
const AsmInfo *asmInfo;
uint32 arraySize;
bool isFlexibleArray;
MapleSet<UStrIdx> stringPtr;
MapleVector<UStrIdx> localStrPtr;
#if 1 /* REQUIRE TO SEPERATE TARGAARCH64 TARGARM32 */
/* Following code is under TARGAARCH64 condition */
uint64 javaInsnCount = 0;
uint64 funcInsnCount = 0;
MapleSet<std::string> hugeSoTargets;
uint32 hugeSoSeqence = 2;
#endif
MapleMap<DBGDie *, LabelIdx> labdie2labidxTable;
MapleMap<uint32_t, std::string> fileMap;
};
class OpndEmitVisitor : public OperandVisitorBase,
public OperandVisitors<RegOperand, ImmOperand, MemOperand, OfstOperand, ListOperand,
LabelOperand, FuncNameOperand, StImmOperand, CondOperand,
BitShiftOperand, ExtendShiftOperand, CommentOperand> {
public:
explicit OpndEmitVisitor(Emitter &asmEmitter) : emitter(asmEmitter) {}
virtual ~OpndEmitVisitor() = default;
protected:
Emitter &emitter;
};
} /* namespace maplebe */
#endif /* MAPLEBE_INCLUDE_CG_EMIT_H */

View File

@ -0,0 +1,50 @@
/*
* Copyright (c) 2023 Huawei Device Co., Ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"Ldalvik_2Fsystem_2FVMStack_3B_7CgetStackClass1_7C_28_29Ljava_2Flang_2FClass_3B",
"Ldalvik_2Fsystem_2FVMStack_3B_7CgetStackClass2_7C_28_29Ljava_2Flang_2FClass_3B",
"Ljava_2Flang_2FClass_3B_7CnewInstance_7C_28_29Ljava_2Flang_2FObject_3B",
"Ljava_2Flang_2Freflect_2FConstructor_3B_7CnewInstance_7C_28ALjava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B",
"Ljava_2Flang_2Freflect_2FField_3B_7CgetShort_7C_28Ljava_2Flang_2FObject_3B_29S",
"Ljava_2Flang_2Freflect_2FField_3B_7CgetInt_7C_28Ljava_2Flang_2FObject_3B_29I",
"Ljava_2Flang_2Freflect_2FField_3B_7CgetFloat_7C_28Ljava_2Flang_2FObject_3B_29F",
"Ljava_2Flang_2Freflect_2FField_3B_7CgetDouble_7C_28Ljava_2Flang_2FObject_3B_29D",
"Ljava_2Flang_2Freflect_2FField_3B_7CgetChar_7C_28Ljava_2Flang_2FObject_3B_29C",
"Ljava_2Flang_2Freflect_2FField_3B_7CgetByte_7C_28Ljava_2Flang_2FObject_3B_29B",
"Ljava_2Flang_2Freflect_2FField_3B_7CgetBoolean_7C_28Ljava_2Flang_2FObject_3B_29Z",
"Ljava_2Flang_2Freflect_2FField_3B_7CgetLong_7C_28Ljava_2Flang_2FObject_3B_29J",
"Ljava_2Flang_2Freflect_2FField_3B_7Cget_7C_28Ljava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B",
"Ljava_2Flang_2Freflect_2FField_3B_7CsetBoolean_7C_28Ljava_2Flang_2FObject_3BZ_29V",
"Ljava_2Flang_2Freflect_2FField_3B_7Cset_7C_28Ljava_2Flang_2FObject_3BLjava_2Flang_2FObject_3B_29V",
"Ljava_2Flang_2Freflect_2FField_3B_7CsetShort_7C_28Ljava_2Flang_2FObject_3BS_29V",
"Ljava_2Flang_2Freflect_2FField_3B_7CsetLong_7C_28Ljava_2Flang_2FObject_3BJ_29V",
"Ljava_2Flang_2Freflect_2FField_3B_7CsetInt_7C_28Ljava_2Flang_2FObject_3BI_29V",
"Ljava_2Flang_2Freflect_2FField_3B_7CsetFloat_7C_28Ljava_2Flang_2FObject_3BF_29V",
"Ljava_2Flang_2Freflect_2FField_3B_7CsetDouble_7C_28Ljava_2Flang_2FObject_3BD_29V",
"Ljava_2Flang_2Freflect_2FField_3B_7CsetChar_7C_28Ljava_2Flang_2FObject_3BC_29V",
"Ljava_2Flang_2Freflect_2FField_3B_7CsetByte_7C_28Ljava_2Flang_2FObject_3BB_29V",
"LThrowableNativeUncover_3B_7Cmain_7C_28ALjava_2Flang_2FString_3B_29V",
"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_24InterfaceWithDefault_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B",
"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_247_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B",
"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_249_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B",
"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_2410_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B",
"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_24InterfaceWithRedefinedMethods_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B",
"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_2413_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B",
"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_241ImplementationSuperUser_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B",
"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_24InterfaceWithStatic_3B_7CstaticMethod_7C_28_29Ljava_2Flang_2FString_3B",
"Ljava_2Flang_2Freflect_2FMethod_3B_7Cinvoke_7C_28Ljava_2Flang_2FObject_3BALjava_2Flang_2FObject_3B_29Ljava_2Flang_2FObject_3B",
"Llibcore_2Fjava_2Flang_2Freflect_2FMethodTest_24OtherInterfaceWithDefault_3B_7CdefaultMethod_7C_28_29Ljava_2Flang_2FString_3B",
"LStackoverflow_3B_7CstackOverflow_7C_28_29V",
"Llibcore_2Fsun_2Fmisc_2FUnsafeTest_241_3B_7Crun_7C_28_29V"

Some files were not shown because too many files have changed in this diff Show More