mirror of
https://github.com/CTCaer/switch-l4t-atf.git
synced 2024-11-27 03:40:22 +00:00
Switch AARCH32/AARCH64 to __aarch64__
NOTE: AARCH32/AARCH64 macros are now deprecated in favor of __aarch64__. All common C compilers pre-define the same macros to signal which architecture the code is being compiled for: __arm__ for AArch32 (or earlier versions) and __aarch64__ for AArch64. There's no need for TF-A to define its own custom macros for this. In order to unify code with the export headers (which use __aarch64__ to avoid another dependency), let's deprecate the AARCH32 and AARCH64 macros and switch the code base over to the pre-defined standard macro. (Since it is somewhat unintuitive that __arm__ only means AArch32, let's standardize on only using __aarch64__.) Change-Id: Ic77de4b052297d77f38fc95f95f65a8ee70cf200 Signed-off-by: Julius Werner <jwerner@chromium.org>
This commit is contained in:
parent
d5dfdeb65f
commit
402b3cf876
12
Makefile
12
Makefile
@ -734,12 +734,6 @@ else
|
||||
$(eval $(call add_define,PRELOADED_BL33_BASE))
|
||||
endif
|
||||
endif
|
||||
# Define the AARCH32/AARCH64 flag based on the ARCH flag
|
||||
ifeq (${ARCH},aarch32)
|
||||
$(eval $(call add_define,AARCH32))
|
||||
else
|
||||
$(eval $(call add_define,AARCH64))
|
||||
endif
|
||||
|
||||
# Define the DYN_DISABLE_AUTH flag only if set.
|
||||
ifeq (${DYN_DISABLE_AUTH},1)
|
||||
@ -771,6 +765,12 @@ else
|
||||
endif
|
||||
# __ASSEMBLY__ is deprecated in favor of the compiler-builtin __ASSEMBLER__.
|
||||
ASFLAGS += -D__ASSEMBLY__
|
||||
# AARCH32/AARCH64 macros are deprecated in favor of the compiler-builtin __aarch64__.
|
||||
ifeq (${ARCH},aarch32)
|
||||
$(eval $(call add_define,AARCH32))
|
||||
else
|
||||
$(eval $(call add_define,AARCH64))
|
||||
endif
|
||||
endif # !ERROR_DEPRECATED
|
||||
|
||||
$(eval $(call MAKE_LIB_DIRS))
|
||||
|
@ -520,7 +520,7 @@ static int bl1_fwu_image_execute(unsigned int image_id,
|
||||
|
||||
INFO("BL1-FWU: Executing Secure image\n");
|
||||
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
/* Save NS-EL1 system registers. */
|
||||
cm_el1_sysregs_context_save(NON_SECURE);
|
||||
#endif
|
||||
@ -531,7 +531,7 @@ static int bl1_fwu_image_execute(unsigned int image_id,
|
||||
/* Update the secure image id. */
|
||||
sec_exec_image_id = image_id;
|
||||
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
*handle = cm_get_context(SECURE);
|
||||
#else
|
||||
*handle = smc_get_ctx(SECURE);
|
||||
@ -584,7 +584,7 @@ static register_t bl1_fwu_image_resume(register_t image_param,
|
||||
INFO("BL1-FWU: Resuming %s world context\n",
|
||||
(resume_sec_state == SECURE) ? "secure" : "normal");
|
||||
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
/* Save the EL1 system registers of calling world. */
|
||||
cm_el1_sysregs_context_save(caller_sec_state);
|
||||
|
||||
@ -641,7 +641,7 @@ static int bl1_fwu_sec_image_done(void **handle, unsigned int flags)
|
||||
sec_exec_image_id = INVALID_IMAGE_ID;
|
||||
|
||||
INFO("BL1-FWU: Resuming Normal world context\n");
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
/*
|
||||
* Secure world is done so no need to save the context.
|
||||
* Just restore the Non-Secure context.
|
||||
|
@ -59,7 +59,7 @@ void bl1_setup(void)
|
||||
/* Perform early platform-specific setup */
|
||||
bl1_early_platform_setup();
|
||||
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
/*
|
||||
* Update pointer authentication key before the MMU is enabled. It is
|
||||
* saved in the rodata section, that can be writen before enabling the
|
||||
@ -67,7 +67,7 @@ void bl1_setup(void)
|
||||
* in the early platform setup.
|
||||
*/
|
||||
bl_handle_pauth();
|
||||
#endif /* AARCH64 */
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
/* Perform late platform-specific setup */
|
||||
bl1_plat_arch_setup();
|
||||
@ -97,10 +97,10 @@ void bl1_main(void)
|
||||
/*
|
||||
* Ensure that MMU/Caches and coherency are turned on
|
||||
*/
|
||||
#ifdef AARCH32
|
||||
val = read_sctlr();
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
val = read_sctlr_el3();
|
||||
#else
|
||||
val = read_sctlr();
|
||||
#endif
|
||||
assert(val & SCTLR_M_BIT);
|
||||
assert(val & SCTLR_C_BIT);
|
||||
@ -198,11 +198,11 @@ static void bl1_load_bl2(void)
|
||||
******************************************************************************/
|
||||
void bl1_print_next_bl_ep_info(const entry_point_info_t *bl_ep_info)
|
||||
{
|
||||
#ifdef AARCH32
|
||||
NOTICE("BL1: Booting BL32\n");
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
NOTICE("BL1: Booting BL31\n");
|
||||
#endif /* AARCH32 */
|
||||
#else
|
||||
NOTICE("BL1: Booting BL32\n");
|
||||
#endif /* __aarch64__ */
|
||||
print_entry_point_info(bl_ep_info);
|
||||
}
|
||||
|
||||
|
@ -15,10 +15,10 @@
|
||||
|
||||
#include "bl2_private.h"
|
||||
|
||||
#ifdef AARCH32
|
||||
#define NEXT_IMAGE "BL32"
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
#define NEXT_IMAGE "BL31"
|
||||
#else
|
||||
#define NEXT_IMAGE "BL32"
|
||||
#endif
|
||||
|
||||
#if !BL2_AT_EL3
|
||||
@ -31,7 +31,7 @@ void bl2_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
|
||||
/* Perform early platform-specific setup */
|
||||
bl2_early_platform_setup2(arg0, arg1, arg2, arg3);
|
||||
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
/*
|
||||
* Update pointer authentication key before the MMU is enabled. It is
|
||||
* saved in the rodata section, that can be writen before enabling the
|
||||
@ -39,7 +39,7 @@ void bl2_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
|
||||
* in the early platform setup.
|
||||
*/
|
||||
bl_handle_pauth();
|
||||
#endif /* AARCH64 */
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
/* Perform late platform-specific setup */
|
||||
bl2_plat_arch_setup();
|
||||
@ -55,7 +55,7 @@ void bl2_el3_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
|
||||
/* Perform early platform-specific setup */
|
||||
bl2_el3_early_platform_setup(arg0, arg1, arg2, arg3);
|
||||
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
/*
|
||||
* Update pointer authentication key before the MMU is enabled. It is
|
||||
* saved in the rodata section, that can be writen before enabling the
|
||||
@ -63,7 +63,7 @@ void bl2_el3_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2,
|
||||
* in the early platform setup.
|
||||
*/
|
||||
bl_handle_pauth();
|
||||
#endif /* AARCH64 */
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
/* Perform late platform-specific setup */
|
||||
bl2_el3_plat_arch_setup();
|
||||
@ -97,14 +97,14 @@ void bl2_main(void)
|
||||
next_bl_ep_info = bl2_load_images();
|
||||
|
||||
#if !BL2_AT_EL3
|
||||
#ifdef AARCH32
|
||||
#ifndef __aarch64__
|
||||
/*
|
||||
* For AArch32 state BL1 and BL2 share the MMU setup.
|
||||
* Given that BL2 does not map BL1 regions, MMU needs
|
||||
* to be disabled in order to go back to BL1.
|
||||
*/
|
||||
disable_mmu_icache_secure();
|
||||
#endif /* AARCH32 */
|
||||
#endif /* !__aarch64__ */
|
||||
|
||||
console_flush();
|
||||
|
||||
|
@ -45,14 +45,14 @@ void bl2u_main(void)
|
||||
|
||||
console_flush();
|
||||
|
||||
#ifdef AARCH32
|
||||
#ifndef __aarch64__
|
||||
/*
|
||||
* For AArch32 state BL1 and BL2U share the MMU setup.
|
||||
* Given that BL2U does not map BL1 regions, MMU needs
|
||||
* to be disabled in order to go back to BL1.
|
||||
*/
|
||||
disable_mmu_icache_secure();
|
||||
#endif /* AARCH32 */
|
||||
#endif /* !__aarch64__ */
|
||||
|
||||
/*
|
||||
* Indicate that BL2U is done and resume back to
|
||||
|
@ -93,7 +93,7 @@ static const char *get_el_str(unsigned int el)
|
||||
* Returns true if the address points to a virtual address that can be read at
|
||||
* the current EL, false otherwise.
|
||||
*/
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
static bool is_address_readable(uintptr_t addr)
|
||||
{
|
||||
unsigned int el = get_current_el();
|
||||
@ -123,7 +123,7 @@ static bool is_address_readable(uintptr_t addr)
|
||||
|
||||
return true;
|
||||
}
|
||||
#else /* if AARCH32 */
|
||||
#else /* !__aarch64__ */
|
||||
static bool is_address_readable(uintptr_t addr)
|
||||
{
|
||||
unsigned int el = get_current_el();
|
||||
@ -144,7 +144,7 @@ static bool is_address_readable(uintptr_t addr)
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
/*
|
||||
* Returns true if all the bytes in a given object are in mapped memory and an
|
||||
@ -207,7 +207,7 @@ static bool is_valid_frame_record(struct frame_record *fr)
|
||||
*/
|
||||
static struct frame_record *adjust_frame_record(struct frame_record *fr)
|
||||
{
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
return fr;
|
||||
#else
|
||||
return (struct frame_record *)((uintptr_t)fr - 4U);
|
||||
|
@ -236,7 +236,7 @@ void print_entry_point_info(const entry_point_info_t *ep_info)
|
||||
PRINT_IMAGE_ARG(1);
|
||||
PRINT_IMAGE_ARG(2);
|
||||
PRINT_IMAGE_ARG(3);
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
PRINT_IMAGE_ARG(4);
|
||||
PRINT_IMAGE_ARG(5);
|
||||
PRINT_IMAGE_ARG(6);
|
||||
@ -245,7 +245,7 @@ void print_entry_point_info(const entry_point_info_t *ep_info)
|
||||
#undef PRINT_IMAGE_ARG
|
||||
}
|
||||
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
/*******************************************************************************
|
||||
* Handle all possible cases regarding ARMv8.3-PAuth.
|
||||
******************************************************************************/
|
||||
@ -293,4 +293,4 @@ void bl_handle_pauth(void)
|
||||
|
||||
#endif /* ENABLE_PAUTH */
|
||||
}
|
||||
#endif /* AARCH64 */
|
||||
#endif /* __aarch64__ */
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include "ccn_private.h"
|
||||
|
||||
static const ccn_desc_t *ccn_plat_desc;
|
||||
#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32))
|
||||
#if defined(IMAGE_BL31) || (!defined(__aarch64__) && defined(IMAGE_BL32))
|
||||
DEFINE_BAKERY_LOCK(ccn_lock);
|
||||
#endif
|
||||
|
||||
@ -264,7 +264,7 @@ static void ccn_snoop_dvm_do_op(unsigned long long rn_id_map,
|
||||
assert(ccn_plat_desc);
|
||||
assert(ccn_plat_desc->periphbase);
|
||||
|
||||
#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32))
|
||||
#if defined(IMAGE_BL31) || (!defined(__aarch64__) && defined(IMAGE_BL32))
|
||||
bakery_lock_get(&ccn_lock);
|
||||
#endif
|
||||
start_region_id = region_id;
|
||||
@ -284,7 +284,7 @@ static void ccn_snoop_dvm_do_op(unsigned long long rn_id_map,
|
||||
rn_id_map);
|
||||
}
|
||||
|
||||
#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32))
|
||||
#if defined(IMAGE_BL31) || (!defined(__aarch64__) && defined(IMAGE_BL32))
|
||||
bakery_lock_release(&ccn_lock);
|
||||
#endif
|
||||
}
|
||||
|
@ -73,12 +73,12 @@ void __init gicv3_driver_init(const gicv3_driver_data_t *plat_driver_data)
|
||||
plat_driver_data->interrupt_props != NULL : 1);
|
||||
|
||||
/* Check for system register support */
|
||||
#ifdef AARCH32
|
||||
assert((read_id_pfr1() & (ID_PFR1_GIC_MASK << ID_PFR1_GIC_SHIFT)) != 0U);
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
assert((read_id_aa64pfr0_el1() &
|
||||
(ID_AA64PFR0_GIC_MASK << ID_AA64PFR0_GIC_SHIFT)) != 0U);
|
||||
#endif /* AARCH32 */
|
||||
#else
|
||||
assert((read_id_pfr1() & (ID_PFR1_GIC_MASK << ID_PFR1_GIC_SHIFT)) != 0U);
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
/* The GIC version should be 3.0 */
|
||||
gic_version = gicd_read_pidr2(plat_driver_data->gicd_base);
|
||||
|
@ -40,17 +40,17 @@ static inline u_register_t gicd_irouter_val_from_mpidr(u_register_t mpidr,
|
||||
* Macro to convert a GICR_TYPER affinity value into a MPIDR value. Bits[31:24]
|
||||
* are zeroes.
|
||||
*/
|
||||
#ifdef AARCH32
|
||||
static inline u_register_t mpidr_from_gicr_typer(uint64_t typer_val)
|
||||
{
|
||||
return (((typer_val) >> 32) & U(0xffffff));
|
||||
}
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
static inline u_register_t mpidr_from_gicr_typer(uint64_t typer_val)
|
||||
{
|
||||
return (((typer_val >> 56) & MPIDR_AFFLVL_MASK) << MPIDR_AFF3_SHIFT) |
|
||||
((typer_val >> 32) & U(0xffffff));
|
||||
}
|
||||
#else
|
||||
static inline u_register_t mpidr_from_gicr_typer(uint64_t typer_val)
|
||||
{
|
||||
return (((typer_val) >> 32) & U(0xffffff));
|
||||
}
|
||||
#endif
|
||||
|
||||
/*******************************************************************************
|
||||
|
@ -41,7 +41,7 @@ CASSERT(ENTRY_POINT_INFO_PC_OFFSET ==
|
||||
__builtin_offsetof(entry_point_info_t, pc), \
|
||||
assert_BL31_pc_offset_mismatch);
|
||||
|
||||
#ifdef AARCH32
|
||||
#ifndef __aarch64__
|
||||
CASSERT(ENTRY_POINT_INFO_LR_SVC_OFFSET ==
|
||||
__builtin_offsetof(entry_point_info_t, lr_svc),
|
||||
assert_entrypoint_lr_offset_error);
|
||||
|
@ -20,15 +20,15 @@
|
||||
* Constants to allow the assembler access a runtime service
|
||||
* descriptor
|
||||
*/
|
||||
#ifdef AARCH32
|
||||
#define RT_SVC_SIZE_LOG2 U(4)
|
||||
#define RT_SVC_DESC_INIT U(8)
|
||||
#define RT_SVC_DESC_HANDLE U(12)
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
#define RT_SVC_SIZE_LOG2 U(5)
|
||||
#define RT_SVC_DESC_INIT U(16)
|
||||
#define RT_SVC_DESC_HANDLE U(24)
|
||||
#endif /* AARCH32 */
|
||||
#else
|
||||
#define RT_SVC_SIZE_LOG2 U(4)
|
||||
#define RT_SVC_DESC_INIT U(8)
|
||||
#define RT_SVC_DESC_HANDLE U(12)
|
||||
#endif /* __aarch64__ */
|
||||
#define SIZEOF_RT_SVC_DESC (U(1) << RT_SVC_SIZE_LOG2)
|
||||
|
||||
|
||||
|
@ -35,7 +35,7 @@ void cm_init_context_by_index(unsigned int cpu_idx,
|
||||
void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep);
|
||||
void cm_prepare_el3_exit(uint32_t security_state);
|
||||
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
void cm_el1_sysregs_context_save(uint32_t security_state);
|
||||
void cm_el1_sysregs_context_restore(uint32_t security_state);
|
||||
void cm_set_elr_el3(uint32_t security_state, uintptr_t entrypoint);
|
||||
@ -78,6 +78,6 @@ static inline void cm_set_next_context(void *context)
|
||||
#else
|
||||
void *cm_get_next_context(void);
|
||||
void cm_set_next_context(void *context);
|
||||
#endif /* AARCH32 */
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
#endif /* CONTEXT_MGMT_H */
|
||||
|
@ -11,15 +11,7 @@
|
||||
|
||||
#include <bl31/ehf.h>
|
||||
|
||||
#ifdef AARCH32
|
||||
|
||||
#if CRASH_REPORTING
|
||||
#error "Crash reporting is not supported in AArch32"
|
||||
#endif
|
||||
#define CPU_DATA_CPU_OPS_PTR 0x0
|
||||
#define CPU_DATA_CRASH_BUF_OFFSET 0x4
|
||||
|
||||
#else /* AARCH32 */
|
||||
#ifdef __aarch64__
|
||||
|
||||
/* Offsets for the cpu_data structure */
|
||||
#define CPU_DATA_CRASH_BUF_OFFSET 0x18
|
||||
@ -27,7 +19,15 @@
|
||||
#define CPU_DATA_CRASH_BUF_SIZE 64
|
||||
#define CPU_DATA_CPU_OPS_PTR 0x10
|
||||
|
||||
#endif /* AARCH32 */
|
||||
#else /* __aarch64__ */
|
||||
|
||||
#if CRASH_REPORTING
|
||||
#error "Crash reporting is not supported in AArch32"
|
||||
#endif
|
||||
#define CPU_DATA_CPU_OPS_PTR 0x0
|
||||
#define CPU_DATA_CRASH_BUF_OFFSET 0x4
|
||||
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
#if CRASH_REPORTING
|
||||
#define CPU_DATA_CRASH_BUF_END (CPU_DATA_CRASH_BUF_OFFSET + \
|
||||
@ -84,7 +84,7 @@
|
||||
* used for this.
|
||||
******************************************************************************/
|
||||
typedef struct cpu_data {
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
void *cpu_context[2];
|
||||
#endif
|
||||
uintptr_t cpu_ops_ptr;
|
||||
@ -127,7 +127,7 @@ CASSERT(CPU_DATA_PMF_TS0_OFFSET == __builtin_offsetof
|
||||
|
||||
struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
|
||||
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
/* Return the cpu_data structure for the current CPU. */
|
||||
static inline struct cpu_data *_cpu_data(void)
|
||||
{
|
||||
|
@ -24,7 +24,7 @@ REGISTER_PUBSUB_EVENT(psci_cpu_on_finish);
|
||||
REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_start);
|
||||
REGISTER_PUBSUB_EVENT(psci_suspend_pwrdown_finish);
|
||||
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
/*
|
||||
* These events are published by the AArch64 context management framework
|
||||
* after the secure context is restored/saved via
|
||||
@ -40,4 +40,4 @@ REGISTER_PUBSUB_EVENT(cm_exited_secure_world);
|
||||
*/
|
||||
REGISTER_PUBSUB_EVENT(cm_entering_normal_world);
|
||||
REGISTER_PUBSUB_EVENT(cm_exited_normal_world);
|
||||
#endif /* AARCH64 */
|
||||
#endif /* __aarch64__ */
|
||||
|
@ -21,10 +21,10 @@
|
||||
#define BIT_32(nr) (U(1) << (nr))
|
||||
#define BIT_64(nr) (ULL(1) << (nr))
|
||||
|
||||
#ifdef AARCH32
|
||||
#define BIT BIT_32
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
#define BIT BIT_64
|
||||
#else
|
||||
#define BIT BIT_32
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -46,10 +46,10 @@
|
||||
(((~UINT64_C(0)) << (l)) & (~UINT64_C(0) >> (64 - 1 - (h))))
|
||||
#endif
|
||||
|
||||
#ifdef AARCH32
|
||||
#define GENMASK GENMASK_32
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
#define GENMASK GENMASK_64
|
||||
#else
|
||||
#define GENMASK GENMASK_32
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -109,10 +109,10 @@
|
||||
((_u32) > (UINT32_MAX - (_inc)))
|
||||
|
||||
/* Register size of the current architecture. */
|
||||
#ifdef AARCH32
|
||||
#define REGSZ U(4)
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
#define REGSZ U(8)
|
||||
#else
|
||||
#define REGSZ U(4)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -65,14 +65,7 @@ void setup_mmu_cfg(uint64_t *params, unsigned int flags,
|
||||
const uint64_t *base_table, unsigned long long max_pa,
|
||||
uintptr_t max_va, int xlat_regime);
|
||||
|
||||
#ifdef AARCH32
|
||||
/* AArch32 specific translation table API */
|
||||
void enable_mmu_svc_mon(unsigned int flags);
|
||||
void enable_mmu_hyp(unsigned int flags);
|
||||
|
||||
void enable_mmu_direct_svc_mon(unsigned int flags);
|
||||
void enable_mmu_direct_hyp(unsigned int flags);
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
/* AArch64 specific translation table APIs */
|
||||
void enable_mmu_el1(unsigned int flags);
|
||||
void enable_mmu_el2(unsigned int flags);
|
||||
@ -81,7 +74,14 @@ void enable_mmu_el3(unsigned int flags);
|
||||
void enable_mmu_direct_el1(unsigned int flags);
|
||||
void enable_mmu_direct_el2(unsigned int flags);
|
||||
void enable_mmu_direct_el3(unsigned int flags);
|
||||
#endif /* AARCH32 */
|
||||
#else
|
||||
/* AArch32 specific translation table API */
|
||||
void enable_mmu_svc_mon(unsigned int flags);
|
||||
void enable_mmu_hyp(unsigned int flags);
|
||||
|
||||
void enable_mmu_direct_svc_mon(unsigned int flags);
|
||||
void enable_mmu_direct_hyp(unsigned int flags);
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
bool xlat_arch_is_granule_size_supported(size_t size);
|
||||
size_t xlat_arch_get_max_supported_granule_size(void);
|
||||
|
@ -7,10 +7,10 @@
|
||||
#ifndef XLAT_TABLES_ARCH_H
|
||||
#define XLAT_TABLES_ARCH_H
|
||||
|
||||
#ifdef AARCH32
|
||||
#include "aarch32/xlat_tables_aarch32.h"
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
#include "aarch64/xlat_tables_aarch64.h"
|
||||
#else
|
||||
#include "aarch32/xlat_tables_aarch32.h"
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -62,7 +62,7 @@
|
||||
#define OSH (U(0x2) << 6)
|
||||
#define ISH (U(0x3) << 6)
|
||||
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
/* Guarded Page bit */
|
||||
#define GP (ULL(1) << 50)
|
||||
#endif
|
||||
|
@ -434,7 +434,7 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(AARCH32) || JUNO_AARCH32_EL3_RUNTIME
|
||||
#if !defined(__aarch64__) || JUNO_AARCH32_EL3_RUNTIME
|
||||
/*******************************************************************************
|
||||
* BL32 specific defines for EL3 runtime in AArch32 mode
|
||||
******************************************************************************/
|
||||
@ -498,17 +498,17 @@
|
||||
# else
|
||||
# error "Unsupported ARM_TSP_RAM_LOCATION_ID value"
|
||||
# endif
|
||||
#endif /* AARCH32 || JUNO_AARCH32_EL3_RUNTIME */
|
||||
#endif /* !__aarch64__ || JUNO_AARCH32_EL3_RUNTIME */
|
||||
|
||||
/*
|
||||
* BL32 is mandatory in AArch32. In AArch64, undefine BL32_BASE if there is no
|
||||
* SPD and no SPM, as they are the only ones that can be used as BL32.
|
||||
*/
|
||||
#if !(defined(AARCH32) || JUNO_AARCH32_EL3_RUNTIME)
|
||||
#if defined(__aarch64__) && !JUNO_AARCH32_EL3_RUNTIME
|
||||
# if defined(SPD_none) && !ENABLE_SPM
|
||||
# undef BL32_BASE
|
||||
# endif /* defined(SPD_none) && !ENABLE_SPM */
|
||||
#endif /* !(defined(AARCH32) || JUNO_AARCH32_EL3_RUNTIME) */
|
||||
#endif /* defined(__aarch64__) && !JUNO_AARCH32_EL3_RUNTIME */
|
||||
|
||||
/*******************************************************************************
|
||||
* FWU Images: NS_BL1U, BL2U & NS_BL2U defines.
|
||||
|
@ -69,7 +69,7 @@ typedef struct arm_tzc_regions_info {
|
||||
|
||||
void arm_setup_romlib(void);
|
||||
|
||||
#if defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32))
|
||||
#if defined(IMAGE_BL31) || (!defined(__aarch64__) && defined(IMAGE_BL32))
|
||||
/*
|
||||
* Use this macro to instantiate lock before it is used in below
|
||||
* arm_lock_xxx() macros
|
||||
@ -102,7 +102,7 @@ void arm_setup_romlib(void);
|
||||
#define arm_lock_get()
|
||||
#define arm_lock_release()
|
||||
|
||||
#endif /* defined(IMAGE_BL31) || (defined(AARCH32) && defined(IMAGE_BL32)) */
|
||||
#endif /* defined(IMAGE_BL31) || (!defined(__aarch64__) && defined(IMAGE_BL32)) */
|
||||
|
||||
#if ARM_RECOM_STATE_ID_ENC
|
||||
/*
|
||||
|
@ -20,13 +20,13 @@
|
||||
/*
|
||||
* Platform binary types for linking
|
||||
*/
|
||||
#ifdef AARCH32
|
||||
#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
|
||||
#define PLATFORM_LINKER_ARCH arm
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
#define PLATFORM_LINKER_FORMAT "elf64-littleaarch64"
|
||||
#define PLATFORM_LINKER_ARCH aarch64
|
||||
#endif /* AARCH32 */
|
||||
#else
|
||||
#define PLATFORM_LINKER_FORMAT "elf32-littlearm"
|
||||
#define PLATFORM_LINKER_ARCH arm
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
/*
|
||||
* Generic platform constants
|
||||
|
@ -18,9 +18,9 @@
|
||||
|
||||
#ifdef IMAGE_BL1
|
||||
# define BL_STRING "BL1"
|
||||
#elif defined(AARCH64) && defined(IMAGE_BL31)
|
||||
#elif defined(__aarch64__) && defined(IMAGE_BL31)
|
||||
# define BL_STRING "BL31"
|
||||
#elif defined(AARCH32) && defined(IMAGE_BL32)
|
||||
#elif !defined(__arch64__) && defined(IMAGE_BL32)
|
||||
# define BL_STRING "BL32"
|
||||
#elif defined(IMAGE_BL2) && BL2_AT_EL3
|
||||
# define BL_STRING "BL2"
|
||||
|
@ -167,10 +167,10 @@ void bakery_lock_get(bakery_lock_t *lock)
|
||||
unsigned int their_bakery_data;
|
||||
|
||||
me = plat_my_core_pos();
|
||||
#ifdef AARCH32
|
||||
is_cached = read_sctlr() & SCTLR_C_BIT;
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
is_cached = read_sctlr_el3() & SCTLR_C_BIT;
|
||||
#else
|
||||
is_cached = read_sctlr() & SCTLR_C_BIT;
|
||||
#endif
|
||||
|
||||
/* Get a ticket */
|
||||
@ -228,10 +228,10 @@ void bakery_lock_get(bakery_lock_t *lock)
|
||||
void bakery_lock_release(bakery_lock_t *lock)
|
||||
{
|
||||
bakery_info_t *my_bakery_info;
|
||||
#ifdef AARCH32
|
||||
unsigned int is_cached = read_sctlr() & SCTLR_C_BIT;
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
unsigned int is_cached = read_sctlr_el3() & SCTLR_C_BIT;
|
||||
#else
|
||||
unsigned int is_cached = read_sctlr() & SCTLR_C_BIT;
|
||||
#endif
|
||||
|
||||
my_bakery_info = get_bakery_info(plat_my_core_pos(), lock);
|
||||
|
@ -176,7 +176,7 @@ int parse_optee_header(entry_point_info_t *header_ep,
|
||||
*/
|
||||
if (!tee_validate_header(header)) {
|
||||
INFO("Invalid OPTEE header, set legacy mode.\n");
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
header_ep->args.arg0 = MODE_RW_64;
|
||||
#else
|
||||
header_ep->args.arg0 = MODE_RW_32;
|
||||
@ -222,7 +222,7 @@ int parse_optee_header(entry_point_info_t *header_ep,
|
||||
if (header->arch == 0) {
|
||||
header_ep->args.arg0 = MODE_RW_32;
|
||||
} else {
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
header_ep->args.arg0 = MODE_RW_64;
|
||||
#else
|
||||
ERROR("Cannot boot an AArch64 OP-TEE\n");
|
||||
|
@ -619,53 +619,7 @@ int psci_validate_mpidr(u_register_t mpidr)
|
||||
* This function determines the full entrypoint information for the requested
|
||||
* PSCI entrypoint on power on/resume and returns it.
|
||||
******************************************************************************/
|
||||
#ifdef AARCH32
|
||||
static int psci_get_ns_ep_info(entry_point_info_t *ep,
|
||||
uintptr_t entrypoint,
|
||||
u_register_t context_id)
|
||||
{
|
||||
u_register_t ep_attr;
|
||||
unsigned int aif, ee, mode;
|
||||
u_register_t scr = read_scr();
|
||||
u_register_t ns_sctlr, sctlr;
|
||||
|
||||
/* Switch to non secure state */
|
||||
write_scr(scr | SCR_NS_BIT);
|
||||
isb();
|
||||
ns_sctlr = read_sctlr();
|
||||
|
||||
sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
|
||||
|
||||
/* Return to original state */
|
||||
write_scr(scr);
|
||||
isb();
|
||||
ee = 0;
|
||||
|
||||
ep_attr = NON_SECURE | EP_ST_DISABLE;
|
||||
if (sctlr & SCTLR_EE_BIT) {
|
||||
ep_attr |= EP_EE_BIG;
|
||||
ee = 1;
|
||||
}
|
||||
SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
|
||||
|
||||
ep->pc = entrypoint;
|
||||
zeromem(&ep->args, sizeof(ep->args));
|
||||
ep->args.arg0 = context_id;
|
||||
|
||||
mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
|
||||
|
||||
/*
|
||||
* TODO: Choose async. exception bits if HYP mode is not
|
||||
* implemented according to the values of SCR.{AW, FW} bits
|
||||
*/
|
||||
aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
|
||||
|
||||
ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
|
||||
|
||||
return PSCI_E_SUCCESS;
|
||||
}
|
||||
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
static int psci_get_ns_ep_info(entry_point_info_t *ep,
|
||||
uintptr_t entrypoint,
|
||||
u_register_t context_id)
|
||||
@ -722,7 +676,53 @@ static int psci_get_ns_ep_info(entry_point_info_t *ep,
|
||||
|
||||
return PSCI_E_SUCCESS;
|
||||
}
|
||||
#endif
|
||||
#else /* !__aarch64__ */
|
||||
static int psci_get_ns_ep_info(entry_point_info_t *ep,
|
||||
uintptr_t entrypoint,
|
||||
u_register_t context_id)
|
||||
{
|
||||
u_register_t ep_attr;
|
||||
unsigned int aif, ee, mode;
|
||||
u_register_t scr = read_scr();
|
||||
u_register_t ns_sctlr, sctlr;
|
||||
|
||||
/* Switch to non secure state */
|
||||
write_scr(scr | SCR_NS_BIT);
|
||||
isb();
|
||||
ns_sctlr = read_sctlr();
|
||||
|
||||
sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
|
||||
|
||||
/* Return to original state */
|
||||
write_scr(scr);
|
||||
isb();
|
||||
ee = 0;
|
||||
|
||||
ep_attr = NON_SECURE | EP_ST_DISABLE;
|
||||
if (sctlr & SCTLR_EE_BIT) {
|
||||
ep_attr |= EP_EE_BIG;
|
||||
ee = 1;
|
||||
}
|
||||
SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
|
||||
|
||||
ep->pc = entrypoint;
|
||||
zeromem(&ep->args, sizeof(ep->args));
|
||||
ep->args.arg0 = context_id;
|
||||
|
||||
mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
|
||||
|
||||
/*
|
||||
* TODO: Choose async. exception bits if HYP mode is not
|
||||
* implemented according to the values of SCR.{AW, FW} bits
|
||||
*/
|
||||
aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
|
||||
|
||||
ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
|
||||
|
||||
return PSCI_E_SUCCESS;
|
||||
}
|
||||
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
/*******************************************************************************
|
||||
* This function validates the entrypoint with the platform layer if the
|
||||
|
@ -136,25 +136,7 @@ int xlat_change_mem_attributes(uintptr_t base_va, size_t size, uint32_t attr)
|
||||
#define MAX_PHYS_ADDR tf_xlat_ctx.max_pa
|
||||
#endif
|
||||
|
||||
#ifdef AARCH32
|
||||
|
||||
void enable_mmu_svc_mon(unsigned int flags)
|
||||
{
|
||||
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
|
||||
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
|
||||
tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
|
||||
enable_mmu_direct_svc_mon(flags);
|
||||
}
|
||||
|
||||
void enable_mmu_hyp(unsigned int flags)
|
||||
{
|
||||
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
|
||||
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
|
||||
tf_xlat_ctx.va_max_address, EL2_REGIME);
|
||||
enable_mmu_direct_hyp(flags);
|
||||
}
|
||||
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
|
||||
void enable_mmu_el1(unsigned int flags)
|
||||
{
|
||||
@ -180,4 +162,22 @@ void enable_mmu_el3(unsigned int flags)
|
||||
enable_mmu_direct_el3(flags);
|
||||
}
|
||||
|
||||
#endif /* AARCH32 */
|
||||
#else /* !__aarch64__ */
|
||||
|
||||
void enable_mmu_svc_mon(unsigned int flags)
|
||||
{
|
||||
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
|
||||
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
|
||||
tf_xlat_ctx.va_max_address, EL1_EL0_REGIME);
|
||||
enable_mmu_direct_svc_mon(flags);
|
||||
}
|
||||
|
||||
void enable_mmu_hyp(unsigned int flags)
|
||||
{
|
||||
setup_mmu_cfg((uint64_t *)&mmu_cfg_params, flags,
|
||||
tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
|
||||
tf_xlat_ctx.va_max_address, EL2_REGIME);
|
||||
enable_mmu_direct_hyp(flags);
|
||||
}
|
||||
|
||||
#endif /* __aarch64__ */
|
||||
|
@ -97,7 +97,7 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc)
|
||||
|
||||
printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S");
|
||||
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
/* Check Guarded Page bit */
|
||||
if ((desc & GP) != 0ULL) {
|
||||
printf("-GP");
|
||||
|
@ -273,7 +273,7 @@ $(eval IMAGE := IMAGE_BL$(call uppercase,$(3)))
|
||||
|
||||
$(1): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | bl$(3)_dirs
|
||||
$$(ECHO) " PP $$<"
|
||||
$$(Q)$$(CPP) $$(CPPFLAGS) -P -x assembler-with-cpp -D__LINKER__ $(MAKE_DEP) -D$(IMAGE) -o $$@ $$<
|
||||
$$(Q)$$(CPP) $$(CPPFLAGS) $(TF_CFLAGS_$(ARCH)) -P -x assembler-with-cpp -D__LINKER__ $(MAKE_DEP) -D$(IMAGE) -o $$@ $$<
|
||||
|
||||
-include $(DEP)
|
||||
|
||||
|
@ -81,7 +81,7 @@ const mmap_region_t plat_arm_mmap[] = {
|
||||
MAP_DEVICE0,
|
||||
MAP_DEVICE1,
|
||||
ARM_MAP_NS_DRAM1,
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
ARM_MAP_DRAM2,
|
||||
#endif
|
||||
#ifdef SPD_tspd
|
||||
@ -150,7 +150,7 @@ const mmap_region_t plat_arm_secure_partition_mmap[] = {
|
||||
#endif
|
||||
#ifdef IMAGE_BL32
|
||||
const mmap_region_t plat_arm_mmap[] = {
|
||||
#ifdef AARCH32
|
||||
#ifndef __aarch64__
|
||||
ARM_MAP_SHARED_RAM,
|
||||
ARM_V2M_MAP_MEM_PROTECT,
|
||||
#endif
|
||||
|
@ -120,7 +120,7 @@
|
||||
#define PLAT_ARM_MAX_BL31_SIZE UL(0x3B000)
|
||||
#endif
|
||||
|
||||
#ifdef AARCH32
|
||||
#ifndef __aarch64__
|
||||
/*
|
||||
* Since BL32 NOBITS overlays BL2 and BL1-RW, PLAT_ARM_MAX_BL32_SIZE is
|
||||
* calculated using the current SP_MIN PROGBITS debug size plus the sizes of
|
||||
@ -259,7 +259,7 @@
|
||||
/*
|
||||
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
|
||||
*/
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
|
||||
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
|
||||
#else
|
||||
|
@ -331,7 +331,7 @@
|
||||
/*
|
||||
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
|
||||
*/
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
|
||||
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
|
||||
#else
|
||||
|
@ -291,7 +291,7 @@
|
||||
/*
|
||||
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
|
||||
*/
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
|
||||
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
|
||||
#else
|
||||
|
@ -37,7 +37,7 @@ const mmap_region_t plat_arm_mmap[] = {
|
||||
CSS_MAP_DEVICE,
|
||||
SOC_CSS_MAP_DEVICE,
|
||||
ARM_MAP_NS_DRAM1,
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
ARM_MAP_DRAM2,
|
||||
#endif
|
||||
#ifdef SPD_tspd
|
||||
@ -74,7 +74,7 @@ const mmap_region_t plat_arm_mmap[] = {
|
||||
#endif
|
||||
#ifdef IMAGE_BL32
|
||||
const mmap_region_t plat_arm_mmap[] = {
|
||||
#ifdef AARCH32
|
||||
#ifndef __aarch64__
|
||||
ARM_MAP_SHARED_RAM,
|
||||
#ifdef PLAT_ARM_MEM_PROT_ADDR
|
||||
ARM_V2M_MAP_MEM_PROTECT,
|
||||
|
@ -34,7 +34,7 @@
|
||||
* space the physical & virtual address space limits are extended to
|
||||
* 40-bits.
|
||||
*/
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 40)
|
||||
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 40)
|
||||
#else
|
||||
|
@ -29,7 +29,7 @@
|
||||
/*
|
||||
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
|
||||
*/
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
|
||||
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
|
||||
#else
|
||||
|
@ -30,7 +30,7 @@
|
||||
/*
|
||||
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
|
||||
*/
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
|
||||
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
|
||||
#else
|
||||
|
@ -30,7 +30,7 @@
|
||||
/*
|
||||
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
|
||||
*/
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
|
||||
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
|
||||
#else
|
||||
|
@ -15,7 +15,7 @@
|
||||
/*
|
||||
* Physical and virtual address space limits for MMU in AARCH64 & AARCH32 modes
|
||||
*/
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
#define PLAT_PHY_ADDR_SPACE_SIZE (1ULL << 36)
|
||||
#define PLAT_VIRT_ADDR_SPACE_SIZE (1ULL << 36)
|
||||
#else
|
||||
|
@ -121,11 +121,11 @@ void arm_bl1_plat_arch_setup(void)
|
||||
};
|
||||
|
||||
setup_page_tables(bl_regions, plat_arm_get_mmap());
|
||||
#ifdef AARCH32
|
||||
enable_mmu_svc_mon(0);
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
enable_mmu_el3(0);
|
||||
#endif /* AARCH32 */
|
||||
#else
|
||||
enable_mmu_svc_mon(0);
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
arm_setup_romlib();
|
||||
}
|
||||
|
@ -83,10 +83,10 @@ void arm_bl2_el3_plat_arch_setup(void)
|
||||
|
||||
setup_page_tables(bl_regions, plat_arm_get_mmap());
|
||||
|
||||
#ifdef AARCH32
|
||||
enable_mmu_svc_mon(0);
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
enable_mmu_el3(0);
|
||||
#else
|
||||
enable_mmu_svc_mon(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -128,10 +128,10 @@ void arm_bl2_plat_arch_setup(void)
|
||||
|
||||
setup_page_tables(bl_regions, plat_arm_get_mmap());
|
||||
|
||||
#ifdef AARCH32
|
||||
enable_mmu_svc_mon(0);
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
enable_mmu_el1(0);
|
||||
#else
|
||||
enable_mmu_svc_mon(0);
|
||||
#endif
|
||||
|
||||
arm_setup_romlib();
|
||||
@ -153,7 +153,7 @@ int arm_bl2_handle_post_image_load(unsigned int image_id)
|
||||
assert(bl_mem_params);
|
||||
|
||||
switch (image_id) {
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
case BL32_IMAGE_ID:
|
||||
#ifdef SPD_opteed
|
||||
pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
|
||||
|
@ -83,10 +83,10 @@ void arm_bl2u_plat_arch_setup(void)
|
||||
|
||||
setup_page_tables(bl_regions, plat_arm_get_mmap());
|
||||
|
||||
#ifdef AARCH32
|
||||
enable_mmu_svc_mon(0);
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
enable_mmu_el1(0);
|
||||
#else
|
||||
enable_mmu_svc_mon(0);
|
||||
#endif
|
||||
arm_setup_romlib();
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ uint32_t arm_get_spsr_for_bl32_entry(void)
|
||||
/*******************************************************************************
|
||||
* Gets SPSR for BL33 entry
|
||||
******************************************************************************/
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
uint32_t arm_get_spsr_for_bl33_entry(void)
|
||||
{
|
||||
unsigned int mode;
|
||||
@ -97,7 +97,7 @@ uint32_t arm_get_spsr_for_bl33_entry(void)
|
||||
SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
|
||||
return spsr;
|
||||
}
|
||||
#endif /* AARCH32 */
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
/*******************************************************************************
|
||||
* Configures access to the system counter timer module.
|
||||
|
@ -83,8 +83,8 @@ void __init plat_arm_gic_driver_init(void)
|
||||
* can use GIC system registers to manage interrupts and does
|
||||
* not need GIC interface base addresses to be configured.
|
||||
*/
|
||||
#if (defined(AARCH32) && defined(IMAGE_BL32)) || \
|
||||
(defined(IMAGE_BL31) && !defined(AARCH32))
|
||||
#if (!defined(__aarch64__) && defined(IMAGE_BL32)) || \
|
||||
(defined(__aarch64__) && defined(IMAGE_BL31))
|
||||
gicv3_driver_init(&arm_gic_data);
|
||||
#endif
|
||||
}
|
||||
|
@ -26,7 +26,7 @@
|
||||
|
||||
static mem_region_t arm_ram_ranges[] = {
|
||||
{DRAM1_NS_IMAGE_LIMIT, DRAM1_PROTECTED_SIZE},
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
{ARM_DRAM2_BASE, 1u << ONE_GB_SHIFT},
|
||||
#endif
|
||||
};
|
||||
|
@ -116,7 +116,7 @@ int arm_validate_ns_entrypoint(uintptr_t entrypoint)
|
||||
(ARM_NS_DRAM1_BASE + ARM_NS_DRAM1_SIZE))) {
|
||||
return 0;
|
||||
}
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
if ((entrypoint >= ARM_DRAM2_BASE) && (entrypoint <
|
||||
(ARM_DRAM2_BASE + ARM_DRAM2_SIZE))) {
|
||||
return 0;
|
||||
|
@ -40,7 +40,7 @@ int arm_execution_state_switch(unsigned int smc_fid,
|
||||
void *handle)
|
||||
{
|
||||
/* Execution state can be switched only if EL3 is AArch64 */
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
bool caller_64, thumb = false, from_el2;
|
||||
unsigned int el, endianness;
|
||||
u_register_t spsr, pc, scr, sctlr;
|
||||
@ -173,7 +173,7 @@ invalid_param:
|
||||
SMC_RET1(handle, STATE_SW_E_PARAM);
|
||||
|
||||
exec_denied:
|
||||
#endif
|
||||
#endif /* __aarch64__ */
|
||||
/* State switch denied */
|
||||
SMC_RET1(handle, STATE_SW_E_DENIED);
|
||||
}
|
||||
|
@ -300,7 +300,7 @@ unsigned int plat_ic_get_interrupt_id(unsigned int raw)
|
||||
#pragma weak plat_ic_end_of_interrupt
|
||||
|
||||
/* In AArch32, the secure group1 interrupts are targeted to Secure PL1 */
|
||||
#ifdef AARCH32
|
||||
#ifndef __aarch64__
|
||||
#define IS_IN_EL1() IS_IN_SECURE()
|
||||
#endif
|
||||
|
||||
|
@ -20,10 +20,10 @@
|
||||
#define MHZ_TICKS_PER_SEC 1000000U
|
||||
|
||||
/* Maximum time-stamp value read from architectural counters */
|
||||
#ifdef AARCH32
|
||||
#define MAX_TS UINT32_MAX
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
#define MAX_TS UINT64_MAX
|
||||
#else
|
||||
#define MAX_TS UINT32_MAX
|
||||
#endif
|
||||
|
||||
/* Following are used as ID's to capture time-stamp */
|
||||
|
@ -77,7 +77,7 @@ uint32_t hikey_get_spsr_for_bl32_entry(void)
|
||||
/*******************************************************************************
|
||||
* Gets SPSR for BL33 entry
|
||||
******************************************************************************/
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
uint32_t hikey_get_spsr_for_bl33_entry(void)
|
||||
{
|
||||
unsigned int mode;
|
||||
@ -112,7 +112,7 @@ uint32_t hikey_get_spsr_for_bl33_entry(void)
|
||||
SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
|
||||
return spsr;
|
||||
}
|
||||
#endif /* AARCH32 */
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
int hikey_bl2_handle_post_image_load(unsigned int image_id)
|
||||
{
|
||||
@ -125,7 +125,7 @@ int hikey_bl2_handle_post_image_load(unsigned int image_id)
|
||||
assert(bl_mem_params);
|
||||
|
||||
switch (image_id) {
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
case BL32_IMAGE_ID:
|
||||
#ifdef SPD_opteed
|
||||
pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
|
||||
|
@ -113,7 +113,7 @@
|
||||
#endif
|
||||
|
||||
/* BL32 is mandatory in AArch32 */
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
#ifdef SPD_none
|
||||
#undef BL32_BASE
|
||||
#endif /* SPD_none */
|
||||
|
@ -168,7 +168,7 @@ uint32_t hikey960_get_spsr_for_bl32_entry(void)
|
||||
/*******************************************************************************
|
||||
* Gets SPSR for BL33 entry
|
||||
******************************************************************************/
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
uint32_t hikey960_get_spsr_for_bl33_entry(void)
|
||||
{
|
||||
unsigned int mode;
|
||||
@ -203,7 +203,7 @@ uint32_t hikey960_get_spsr_for_bl33_entry(void)
|
||||
SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
|
||||
return spsr;
|
||||
}
|
||||
#endif /* AARCH32 */
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
int hikey960_bl2_handle_post_image_load(unsigned int image_id)
|
||||
{
|
||||
@ -216,7 +216,7 @@ int hikey960_bl2_handle_post_image_load(unsigned int image_id)
|
||||
assert(bl_mem_params);
|
||||
|
||||
switch (image_id) {
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
case BL32_IMAGE_ID:
|
||||
#ifdef SPD_opteed
|
||||
pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
|
||||
|
@ -95,7 +95,7 @@
|
||||
#endif
|
||||
|
||||
/* BL32 is mandatory in AArch32 */
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
#ifdef SPD_none
|
||||
#undef BL32_BASE
|
||||
#endif /* SPD_none */
|
||||
|
@ -54,7 +54,7 @@ uint32_t poplar_get_spsr_for_bl32_entry(void)
|
||||
/*******************************************************************************
|
||||
* Gets SPSR for BL33 entry
|
||||
******************************************************************************/
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
uint32_t poplar_get_spsr_for_bl33_entry(void)
|
||||
{
|
||||
unsigned long el_status;
|
||||
@ -93,7 +93,7 @@ uint32_t poplar_get_spsr_for_bl33_entry(void)
|
||||
SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
|
||||
return spsr;
|
||||
}
|
||||
#endif /* AARCH32 */
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
int poplar_bl2_handle_post_image_load(unsigned int image_id)
|
||||
{
|
||||
@ -107,7 +107,7 @@ int poplar_bl2_handle_post_image_load(unsigned int image_id)
|
||||
assert(bl_mem_params);
|
||||
|
||||
switch (image_id) {
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
case BL32_IMAGE_ID:
|
||||
#ifdef SPD_opteed
|
||||
pager_mem_params = get_bl_mem_params_node(BL32_EXTRA1_IMAGE_ID);
|
||||
|
@ -107,7 +107,7 @@
|
||||
#endif
|
||||
|
||||
/* BL32 is mandatory in AArch32 */
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
#ifdef SPD_none
|
||||
#undef BL32_BASE
|
||||
#endif /* SPD_none */
|
||||
|
@ -59,11 +59,11 @@ void ls_bl1_plat_arch_setup(void)
|
||||
#endif
|
||||
);
|
||||
VERBOSE("After setup the page tables\n");
|
||||
#ifdef AARCH32
|
||||
enable_mmu_svc_mon(0);
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
enable_mmu_el3(0);
|
||||
#endif /* AARCH32 */
|
||||
#else
|
||||
enable_mmu_svc_mon(0);
|
||||
#endif /* __aarch64__ */
|
||||
VERBOSE("After MMU enabled\n");
|
||||
}
|
||||
|
||||
|
@ -54,10 +54,10 @@ void ls_bl2_plat_arch_setup(void)
|
||||
#endif
|
||||
);
|
||||
|
||||
#ifdef AARCH32
|
||||
enable_mmu_svc_mon(0);
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
enable_mmu_el1(0);
|
||||
#else
|
||||
enable_mmu_svc_mon(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -74,7 +74,7 @@ int ls_bl2_handle_post_image_load(unsigned int image_id)
|
||||
assert(bl_mem_params);
|
||||
|
||||
switch (image_id) {
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
case BL32_IMAGE_ID:
|
||||
bl_mem_params->ep_info.spsr = ls_get_spsr_for_bl32_entry();
|
||||
break;
|
||||
|
@ -143,7 +143,7 @@ uint32_t ls_get_spsr_for_bl32_entry(void)
|
||||
/*******************************************************************************
|
||||
* Gets SPSR for BL33 entry
|
||||
******************************************************************************/
|
||||
#ifndef AARCH32
|
||||
#ifdef __aarch64__
|
||||
uint32_t ls_get_spsr_for_bl33_entry(void)
|
||||
{
|
||||
unsigned int mode;
|
||||
@ -181,7 +181,7 @@ uint32_t ls_get_spsr_for_bl33_entry(void)
|
||||
SPSR_E_LITTLE, DISABLE_ALL_EXCEPTIONS);
|
||||
return spsr;
|
||||
}
|
||||
#endif /* AARCH32 */
|
||||
#endif /* __aarch64__ */
|
||||
|
||||
/*******************************************************************************
|
||||
* Returns Layerscape platform specific memory map regions.
|
||||
|
@ -41,10 +41,10 @@ void bl1_early_platform_setup(void)
|
||||
* does basic initialization. Later architectural setup (bl1_arch_setup())
|
||||
* does not do anything platform specific.
|
||||
*****************************************************************************/
|
||||
#ifdef AARCH32
|
||||
#define QEMU_CONFIGURE_BL1_MMU(...) qemu_configure_mmu_svc_mon(__VA_ARGS__)
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
#define QEMU_CONFIGURE_BL1_MMU(...) qemu_configure_mmu_el3(__VA_ARGS__)
|
||||
#else
|
||||
#define QEMU_CONFIGURE_BL1_MMU(...) qemu_configure_mmu_svc_mon(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
void bl1_plat_arch_setup(void)
|
||||
|
@ -35,7 +35,7 @@ static bl_mem_params_node_t bl2_mem_params_descs[] = {
|
||||
.next_handoff_image_id = INVALID_IMAGE_ID,
|
||||
},
|
||||
#else /* EL3_PAYLOAD_BASE */
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
/* Fill BL31 related information */
|
||||
{ .image_id = BL31_IMAGE_ID,
|
||||
|
||||
@ -59,10 +59,10 @@ static bl_mem_params_node_t bl2_mem_params_descs[] = {
|
||||
.next_handoff_image_id = BL33_IMAGE_ID,
|
||||
# endif
|
||||
},
|
||||
#endif /* AARCH64 */
|
||||
#endif /* __aarch64__ */
|
||||
# ifdef QEMU_LOAD_BL32
|
||||
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
#define BL32_EP_ATTRIBS (SECURE | EXECUTABLE)
|
||||
#define BL32_IMG_ATTRIBS 0
|
||||
#else
|
||||
|
@ -81,10 +81,10 @@ void bl2_platform_setup(void)
|
||||
/* TODO Initialize timer */
|
||||
}
|
||||
|
||||
#ifdef AARCH32
|
||||
#define QEMU_CONFIGURE_BL2_MMU(...) qemu_configure_mmu_svc_mon(__VA_ARGS__)
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
#define QEMU_CONFIGURE_BL2_MMU(...) qemu_configure_mmu_el1(__VA_ARGS__)
|
||||
#else
|
||||
#define QEMU_CONFIGURE_BL2_MMU(...) qemu_configure_mmu_svc_mon(__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
void bl2_plat_arch_setup(void)
|
||||
@ -101,7 +101,7 @@ void bl2_plat_arch_setup(void)
|
||||
******************************************************************************/
|
||||
static uint32_t qemu_get_spsr_for_bl32_entry(void)
|
||||
{
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
/*
|
||||
* The Secure Payload Dispatcher service is responsible for
|
||||
* setting the SPSR prior to entry into the BL3-2 image.
|
||||
@ -119,7 +119,7 @@ static uint32_t qemu_get_spsr_for_bl32_entry(void)
|
||||
static uint32_t qemu_get_spsr_for_bl33_entry(void)
|
||||
{
|
||||
uint32_t spsr;
|
||||
#ifdef AARCH64
|
||||
#ifdef __aarch64__
|
||||
unsigned int mode;
|
||||
|
||||
/* Figure out what mode we enter the non-secure world in */
|
||||
|
@ -132,11 +132,11 @@ static const mmap_region_t plat_qemu_mmap[] = {
|
||||
}
|
||||
|
||||
/* Define EL1 and EL3 variants of the function initialising the MMU */
|
||||
#ifdef AARCH32
|
||||
DEFINE_CONFIGURE_MMU_EL(svc_mon)
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
DEFINE_CONFIGURE_MMU_EL(el1)
|
||||
DEFINE_CONFIGURE_MMU_EL(el3)
|
||||
#else
|
||||
DEFINE_CONFIGURE_MMU_EL(svc_mon)
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -60,16 +60,7 @@ extern uint32_t __sram_incbin_real_end;
|
||||
/******************************************************************************
|
||||
* Function and variable prototypes
|
||||
*****************************************************************************/
|
||||
#ifdef AARCH32
|
||||
void plat_configure_mmu_svc_mon(unsigned long total_base,
|
||||
unsigned long total_size,
|
||||
unsigned long,
|
||||
unsigned long,
|
||||
unsigned long,
|
||||
unsigned long);
|
||||
|
||||
void rockchip_plat_mmu_svc_mon(void);
|
||||
#else
|
||||
#ifdef __aarch64__
|
||||
void plat_configure_mmu_el3(unsigned long total_base,
|
||||
unsigned long total_size,
|
||||
unsigned long,
|
||||
@ -78,6 +69,15 @@ void plat_configure_mmu_el3(unsigned long total_base,
|
||||
unsigned long);
|
||||
|
||||
void rockchip_plat_mmu_el3(void);
|
||||
#else
|
||||
void plat_configure_mmu_svc_mon(unsigned long total_base,
|
||||
unsigned long total_size,
|
||||
unsigned long,
|
||||
unsigned long,
|
||||
unsigned long,
|
||||
unsigned long);
|
||||
|
||||
void rockchip_plat_mmu_svc_mon(void);
|
||||
#endif
|
||||
|
||||
void plat_cci_init(void);
|
||||
|
@ -22,7 +22,7 @@
|
||||
#include <services/sdei.h>
|
||||
#include <setjmp.h>
|
||||
|
||||
#ifdef AARCH32
|
||||
#ifndef __aarch64__
|
||||
# error SDEI is implemented only for AArch64 systems
|
||||
#endif
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user