mirror of
https://github.com/CTCaer/switch-l4t-atf.git
synced 2025-02-20 10:30:52 +00:00
Merge pull request #1429 from jeenu-arm/mmu-direct
Enable MMU without stack for xlat v2/DynamIQ
This commit is contained in:
commit
d48f193d11
@ -170,15 +170,12 @@ func bl31_warm_entrypoint
|
||||
* enter coherency (as CPUs already are); and there's no reason to have
|
||||
* caches disabled either.
|
||||
*/
|
||||
mov x0, #DISABLE_DCACHE
|
||||
bl bl31_plat_enable_mmu
|
||||
|
||||
#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
|
||||
mrs x0, sctlr_el3
|
||||
orr x0, x0, #SCTLR_C_BIT
|
||||
msr sctlr_el3, x0
|
||||
isb
|
||||
mov x0, xzr
|
||||
#else
|
||||
mov x0, #DISABLE_DCACHE
|
||||
#endif
|
||||
bl bl31_plat_enable_mmu
|
||||
|
||||
bl psci_warmboot_entrypoint
|
||||
|
||||
|
@ -298,20 +298,17 @@ func sp_min_warm_entrypoint
|
||||
* enter coherency (as CPUs already are); and there's no reason to have
|
||||
* caches disabled either.
|
||||
*/
|
||||
#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
|
||||
mov r0, #0
|
||||
#else
|
||||
mov r0, #DISABLE_DCACHE
|
||||
#endif
|
||||
bl bl32_plat_enable_mmu
|
||||
|
||||
#if SP_MIN_WITH_SECURE_FIQ
|
||||
route_fiq_to_sp_min r0
|
||||
#endif
|
||||
|
||||
#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
|
||||
ldcopr r0, SCTLR
|
||||
orr r0, r0, #SCTLR_C_BIT
|
||||
stcopr r0, SCTLR
|
||||
isb
|
||||
#endif
|
||||
|
||||
bl sp_min_warm_boot
|
||||
bl smc_get_next_ctx
|
||||
/* r0 points to `smc_ctx_t` */
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
|
||||
* Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
@ -247,40 +247,12 @@ func tsp_cpu_on_entry
|
||||
bl plat_set_my_stack
|
||||
|
||||
/* --------------------------------------------
|
||||
* Enable the MMU with the DCache disabled. It
|
||||
* is safe to use stacks allocated in normal
|
||||
* memory as a result. All memory accesses are
|
||||
* marked nGnRnE when the MMU is disabled. So
|
||||
* all the stack writes will make it to memory.
|
||||
* All memory accesses are marked Non-cacheable
|
||||
* when the MMU is enabled but D$ is disabled.
|
||||
* So used stack memory is guaranteed to be
|
||||
* visible immediately after the MMU is enabled
|
||||
* Enabling the DCache at the same time as the
|
||||
* MMU can lead to speculatively fetched and
|
||||
* possibly stale stack memory being read from
|
||||
* other caches. This can lead to coherency
|
||||
* issues.
|
||||
* Enable MMU and D-caches together.
|
||||
* --------------------------------------------
|
||||
*/
|
||||
mov x0, #DISABLE_DCACHE
|
||||
mov x0, #0
|
||||
bl bl32_plat_enable_mmu
|
||||
|
||||
/* ---------------------------------------------
|
||||
* Enable the Data cache now that the MMU has
|
||||
* been enabled. The stack has been unwound. It
|
||||
* will be written first before being read. This
|
||||
* will invalidate any stale cache lines resi-
|
||||
* -dent in other caches. We assume that
|
||||
* interconnect coherency has been enabled for
|
||||
* this cluster by EL3 firmware.
|
||||
* ---------------------------------------------
|
||||
*/
|
||||
mrs x0, sctlr_el1
|
||||
orr x0, x0, #SCTLR_C_BIT
|
||||
msr sctlr_el1, x0
|
||||
isb
|
||||
|
||||
/* ---------------------------------------------
|
||||
* Enter C runtime to perform any remaining
|
||||
* book keeping
|
||||
|
@ -1997,6 +1997,25 @@ state. This function must return a pointer to the ``entry_point_info`` structure
|
||||
(that was copied during ``bl31_early_platform_setup()``) if the image exists. It
|
||||
should return NULL otherwise.
|
||||
|
||||
Function : bl31_plat_enable_mmu [optional]
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
::
|
||||
|
||||
Argument : uint32_t
|
||||
Return : void
|
||||
|
||||
This function enables the MMU. The boot code calls this function with MMU and
|
||||
caches disabled. This function should program necessary registers to enable
|
||||
translation, and upon return, the MMU on the calling PE must be enabled.
|
||||
|
||||
The function must honor flags passed in the first argument. These flags are
|
||||
defined by the translation library, and can be found in the file
|
||||
``include/lib/xlat_tables/xlat_mmu_helpers.h``.
|
||||
|
||||
On DynamIQ systems, this function must not use stack while enabling MMU, which
|
||||
is how the function in xlat table library version 2 is implementated.
|
||||
|
||||
Function : plat\_get\_syscnt\_freq2() [mandatory]
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
@ -454,6 +454,10 @@ Common build options
|
||||
management operations. This option defaults to 0 and if it is enabled,
|
||||
then it implies ``WARMBOOT_ENABLE_DCACHE_EARLY`` is also enabled.
|
||||
|
||||
Note that, when ``HW_ASSISTED_COHERENCY`` is enabled, version 2 of
|
||||
translation library (xlat tables v2) must be used; version 1 of translation
|
||||
library is not supported.
|
||||
|
||||
- ``JUNO_AARCH32_EL3_RUNTIME``: This build flag enables you to execute EL3
|
||||
runtime software in AArch32 mode, which is required to run AArch32 on Juno.
|
||||
By default this flag is set to '0'. Enabling this flag builds BL1 and BL2 in
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
|
||||
* Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
@ -10,6 +10,20 @@
|
||||
#include <asm_macros_common.S>
|
||||
#include <spinlock.h>
|
||||
|
||||
/*
|
||||
* TLBI instruction with type specifier that implements the workaround for
|
||||
* errata 813419 of Cortex-A57.
|
||||
*/
|
||||
#if ERRATA_A57_813419
|
||||
#define TLB_INVALIDATE(_reg, _coproc) \
|
||||
stcopr _reg, _coproc; \
|
||||
dsb ish; \
|
||||
stcopr _reg, _coproc
|
||||
#else
|
||||
#define TLB_INVALIDATE(_reg, _coproc) \
|
||||
stcopr _reg, _coproc
|
||||
#endif
|
||||
|
||||
#define WORD_SIZE 4
|
||||
|
||||
/*
|
||||
|
@ -10,6 +10,20 @@
|
||||
#include <asm_macros_common.S>
|
||||
#include <spinlock.h>
|
||||
|
||||
/*
|
||||
* TLBI instruction with type specifier that implements the workaround for
|
||||
* errata 813419 of Cortex-A57.
|
||||
*/
|
||||
#if ERRATA_A57_813419
|
||||
#define TLB_INVALIDATE(_type) \
|
||||
tlbi _type; \
|
||||
dsb ish; \
|
||||
tlbi _type
|
||||
#else
|
||||
#define TLB_INVALIDATE(_type) \
|
||||
tlbi _type
|
||||
#endif
|
||||
|
||||
|
||||
.macro func_prologue
|
||||
stp x29, x30, [sp, #-0x10]!
|
||||
|
@ -340,7 +340,7 @@
|
||||
/*
|
||||
* TTBR definitions
|
||||
*/
|
||||
#define TTBR_CNP_BIT 0x1
|
||||
#define TTBR_CNP_BIT U(0x1)
|
||||
|
||||
/*
|
||||
* CTR definitions
|
||||
|
@ -127,8 +127,8 @@
|
||||
* expected.
|
||||
*/
|
||||
#define ARM_ARCH_AT_LEAST(_maj, _min) \
|
||||
((ARM_ARCH_MAJOR > _maj) || \
|
||||
((ARM_ARCH_MAJOR == _maj) && (ARM_ARCH_MINOR >= _min)))
|
||||
((ARM_ARCH_MAJOR > (_maj)) || \
|
||||
((ARM_ARCH_MAJOR == (_maj)) && (ARM_ARCH_MINOR >= (_min))))
|
||||
|
||||
/*
|
||||
* Import an assembly or linker symbol as a C expression with the specified
|
||||
|
@ -48,10 +48,15 @@
|
||||
#ifdef AARCH32
|
||||
/* AArch32 specific translation table API */
|
||||
void enable_mmu_secure(unsigned int flags);
|
||||
|
||||
void enable_mmu_direct(unsigned int flags);
|
||||
#else
|
||||
/* AArch64 specific translation table APIs */
|
||||
void enable_mmu_el1(unsigned int flags);
|
||||
void enable_mmu_el3(unsigned int flags);
|
||||
|
||||
void enable_mmu_direct_el1(unsigned int flags);
|
||||
void enable_mmu_direct_el3(unsigned int flags);
|
||||
#endif /* AARCH32 */
|
||||
|
||||
int xlat_arch_is_granule_size_supported(size_t size);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
|
||||
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
@ -8,12 +8,12 @@
|
||||
#define __XLAT_TABLES_V2_H__
|
||||
|
||||
#include <xlat_tables_defs.h>
|
||||
#include <xlat_tables_v2_helpers.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <xlat_mmu_helpers.h>
|
||||
#include <xlat_tables_v2_helpers.h>
|
||||
|
||||
/*
|
||||
* Default granularity size for an mmap_region_t.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
|
||||
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
@ -16,6 +16,13 @@
|
||||
#error "Do not include this header file directly. Include xlat_tables_v2.h instead."
|
||||
#endif
|
||||
|
||||
/* Offsets into mmu_cfg_params array. All parameters are 32 bits wide. */
|
||||
#define MMU_CFG_MAIR0 0
|
||||
#define MMU_CFG_TCR 1
|
||||
#define MMU_CFG_TTBR0_LO 2
|
||||
#define MMU_CFG_TTBR0_HI 3
|
||||
#define MMU_CFG_PARAM_MAX 4
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <cassert.h>
|
||||
@ -24,6 +31,9 @@
|
||||
#include <xlat_tables_arch.h>
|
||||
#include <xlat_tables_defs.h>
|
||||
|
||||
/* Parameters of register values required when enabling MMU */
|
||||
extern uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
|
||||
|
||||
/* Forward declaration */
|
||||
struct mmap_region;
|
||||
|
||||
@ -162,6 +172,8 @@ struct xlat_ctx {
|
||||
.initialized = 0, \
|
||||
}
|
||||
|
||||
#endif /*__ASSEMBLY__*/
|
||||
|
||||
#if AARCH64
|
||||
|
||||
/*
|
||||
@ -187,6 +199,4 @@ struct xlat_ctx {
|
||||
|
||||
#endif /* AARCH64 */
|
||||
|
||||
#endif /*__ASSEMBLY__*/
|
||||
|
||||
#endif /* __XLAT_TABLES_V2_HELPERS_H__ */
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016-2017, ARM Limited and Contributors. All rights reserved.
|
||||
* Copyright (c) 2016-2018, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
@ -130,3 +130,8 @@ void enable_mmu_secure(unsigned int flags)
|
||||
/* Ensure the MMU enable takes effect immediately */
|
||||
isb();
|
||||
}
|
||||
|
||||
void enable_mmu_direct(unsigned int flags)
|
||||
{
|
||||
enable_mmu_secure(flags);
|
||||
}
|
||||
|
@ -181,6 +181,11 @@ void init_xlat_tables(void)
|
||||
\
|
||||
/* Ensure the MMU enable takes effect immediately */ \
|
||||
isb(); \
|
||||
} \
|
||||
\
|
||||
void enable_mmu_direct_el##_el(unsigned int flags) \
|
||||
{ \
|
||||
enable_mmu_el##_el(flags); \
|
||||
}
|
||||
|
||||
/* Define EL1 and EL3 variants of the function enabling the MMU */
|
||||
|
@ -11,6 +11,10 @@
|
||||
#include <platform_def.h>
|
||||
#include <xlat_tables_arch.h>
|
||||
|
||||
#if HW_ASSISTED_COHERENCY
|
||||
#error xlat tables v2 must be used with HW_ASSISTED_COHERENCY
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If the platform hasn't defined a physical and a virtual address space size
|
||||
* default to ADDR_SPACE_SIZE.
|
||||
|
66
lib/xlat_tables_v2/aarch32/enable_mmu.S
Normal file
66
lib/xlat_tables_v2/aarch32/enable_mmu.S
Normal file
@ -0,0 +1,66 @@
|
||||
/*
|
||||
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <asm_macros.S>
|
||||
#include <assert_macros.S>
|
||||
#include <xlat_tables_v2.h>
|
||||
|
||||
.global enable_mmu_direct
|
||||
|
||||
func enable_mmu_direct
|
||||
/* Assert that MMU is turned off */
|
||||
#if ENABLE_ASSERTIONS
|
||||
ldcopr r1, SCTLR
|
||||
tst r1, #SCTLR_M_BIT
|
||||
ASM_ASSERT(eq)
|
||||
#endif
|
||||
|
||||
/* Invalidate TLB entries */
|
||||
TLB_INVALIDATE(r0, TLBIALL)
|
||||
|
||||
mov r3, r0
|
||||
ldr r0, =mmu_cfg_params
|
||||
|
||||
/* MAIR0 */
|
||||
ldr r1, [r0, #(MMU_CFG_MAIR0 << 2)]
|
||||
stcopr r1, MAIR0
|
||||
|
||||
/* TTBCR */
|
||||
ldr r2, [r0, #(MMU_CFG_TCR << 2)]
|
||||
stcopr r2, TTBCR
|
||||
|
||||
/* TTBR0 */
|
||||
ldr r1, [r0, #(MMU_CFG_TTBR0_LO << 2)]
|
||||
ldr r2, [r0, #(MMU_CFG_TTBR0_HI << 2)]
|
||||
stcopr16 r1, r2, TTBR0_64
|
||||
|
||||
/* TTBR1 is unused right now; set it to 0. */
|
||||
mov r1, #0
|
||||
mov r2, #0
|
||||
stcopr16 r1, r2, TTBR1_64
|
||||
|
||||
/*
|
||||
* Ensure all translation table writes have drained into memory, the TLB
|
||||
* invalidation is complete, and translation register writes are
|
||||
* committed before enabling the MMU
|
||||
*/
|
||||
dsb ish
|
||||
isb
|
||||
|
||||
/* Enable enable MMU by honoring flags */
|
||||
ldcopr r1, SCTLR
|
||||
ldr r2, =(SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT)
|
||||
orr r1, r1, r2
|
||||
|
||||
/* Clear C bit if requested */
|
||||
tst r3, #DISABLE_DCACHE
|
||||
bicne r1, r1, #SCTLR_C_BIT
|
||||
|
||||
stcopr r1, SCTLR
|
||||
isb
|
||||
|
||||
bx lr
|
||||
endfunc enable_mmu_direct
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
|
||||
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
@ -18,6 +18,8 @@
|
||||
#error ARMv7 target does not support LPAE MMU descriptors
|
||||
#endif
|
||||
|
||||
uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
|
||||
|
||||
/*
|
||||
* Returns 1 if the provided granule size is supported, 0 otherwise.
|
||||
*/
|
||||
@ -109,22 +111,16 @@ int xlat_arch_current_el(void)
|
||||
* Function for enabling the MMU in Secure PL1, assuming that the page tables
|
||||
* have already been created.
|
||||
******************************************************************************/
|
||||
void enable_mmu_arch(unsigned int flags,
|
||||
uint64_t *base_table,
|
||||
void setup_mmu_cfg(unsigned int flags,
|
||||
const uint64_t *base_table,
|
||||
unsigned long long max_pa,
|
||||
uintptr_t max_va)
|
||||
{
|
||||
u_register_t mair0, ttbcr, sctlr;
|
||||
u_register_t mair0, ttbcr;
|
||||
uint64_t ttbr0;
|
||||
|
||||
assert(IS_IN_SECURE());
|
||||
|
||||
sctlr = read_sctlr();
|
||||
assert((sctlr & SCTLR_M_BIT) == 0);
|
||||
|
||||
/* Invalidate TLBs at the current exception level */
|
||||
tlbiall();
|
||||
|
||||
/* Set attributes in the right indices of the MAIR */
|
||||
mair0 = MAIR0_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
|
||||
mair0 |= MAIR0_ATTR_SET(ATTR_IWBWA_OWBWA_NTR,
|
||||
@ -185,30 +181,9 @@ void enable_mmu_arch(unsigned int flags,
|
||||
ttbr0 |= TTBR_CNP_BIT;
|
||||
#endif
|
||||
|
||||
/* Now program the relevant system registers */
|
||||
write_mair0(mair0);
|
||||
write_ttbcr(ttbcr);
|
||||
write64_ttbr0(ttbr0);
|
||||
write64_ttbr1(0);
|
||||
|
||||
/*
|
||||
* Ensure all translation table writes have drained
|
||||
* into memory, the TLB invalidation is complete,
|
||||
* and translation register writes are committed
|
||||
* before enabling the MMU
|
||||
*/
|
||||
dsbish();
|
||||
isb();
|
||||
|
||||
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT;
|
||||
|
||||
if (flags & DISABLE_DCACHE)
|
||||
sctlr &= ~SCTLR_C_BIT;
|
||||
else
|
||||
sctlr |= SCTLR_C_BIT;
|
||||
|
||||
write_sctlr(sctlr);
|
||||
|
||||
/* Ensure the MMU enable takes effect immediately */
|
||||
isb();
|
||||
/* Now populate MMU configuration */
|
||||
mmu_cfg_params[MMU_CFG_MAIR0] = mair0;
|
||||
mmu_cfg_params[MMU_CFG_TCR] = ttbcr;
|
||||
mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr0;
|
||||
mmu_cfg_params[MMU_CFG_TTBR0_HI] = ttbr0 >> 32;
|
||||
}
|
||||
|
91
lib/xlat_tables_v2/aarch64/enable_mmu.S
Normal file
91
lib/xlat_tables_v2/aarch64/enable_mmu.S
Normal file
@ -0,0 +1,91 @@
|
||||
/*
|
||||
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <asm_macros.S>
|
||||
#include <assert_macros.S>
|
||||
#include <xlat_tables_v2.h>
|
||||
|
||||
.global enable_mmu_direct_el1
|
||||
.global enable_mmu_direct_el3
|
||||
|
||||
/* Macros to read and write to system register for a given EL. */
|
||||
.macro _msr reg_name, el, gp_reg
|
||||
msr \reg_name\()_el\()\el, \gp_reg
|
||||
.endm
|
||||
|
||||
.macro _mrs gp_reg, reg_name, el
|
||||
mrs \gp_reg, \reg_name\()_el\()\el
|
||||
.endm
|
||||
|
||||
.macro define_mmu_enable_func el
|
||||
func enable_mmu_direct_\()el\el
|
||||
#if ENABLE_ASSERTIONS
|
||||
_mrs x1, sctlr, \el
|
||||
tst x1, #SCTLR_M_BIT
|
||||
ASM_ASSERT(eq)
|
||||
#endif
|
||||
|
||||
/* Invalidate TLB entries */
|
||||
.if \el == 1
|
||||
TLB_INVALIDATE(vmalle1)
|
||||
.else
|
||||
.if \el == 3
|
||||
TLB_INVALIDATE(alle3)
|
||||
.else
|
||||
.error "EL must be 1 or 3"
|
||||
.endif
|
||||
.endif
|
||||
|
||||
mov x7, x0
|
||||
ldr x0, =mmu_cfg_params
|
||||
|
||||
/* MAIR */
|
||||
ldr w1, [x0, #(MMU_CFG_MAIR0 << 2)]
|
||||
_msr mair, \el, x1
|
||||
|
||||
/* TCR */
|
||||
ldr w2, [x0, #(MMU_CFG_TCR << 2)]
|
||||
_msr tcr, \el, x2
|
||||
|
||||
/* TTBR */
|
||||
ldr w3, [x0, #(MMU_CFG_TTBR0_LO << 2)]
|
||||
ldr w4, [x0, #(MMU_CFG_TTBR0_HI << 2)]
|
||||
orr x3, x3, x4, lsl #32
|
||||
_msr ttbr0, \el, x3
|
||||
|
||||
/*
|
||||
* Ensure all translation table writes have drained into memory, the TLB
|
||||
* invalidation is complete, and translation register writes are
|
||||
* committed before enabling the MMU
|
||||
*/
|
||||
dsb ish
|
||||
isb
|
||||
|
||||
/* Set and clear required fields of SCTLR */
|
||||
_mrs x4, sctlr, \el
|
||||
mov_imm x5, SCTLR_WXN_BIT | SCTLR_C_BIT | SCTLR_M_BIT
|
||||
orr x4, x4, x5
|
||||
|
||||
/* Additionally, amend SCTLR fields based on flags */
|
||||
bic x5, x4, #SCTLR_C_BIT
|
||||
tst x7, #DISABLE_DCACHE
|
||||
csel x4, x5, x4, ne
|
||||
|
||||
_msr sctlr, \el, x4
|
||||
isb
|
||||
|
||||
ret
|
||||
endfunc enable_mmu_direct_\()el\el
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Define MMU-enabling functions for EL1 and EL3:
|
||||
*
|
||||
* enable_mmu_direct_el1
|
||||
* enable_mmu_direct_el3
|
||||
*/
|
||||
define_mmu_enable_func 1
|
||||
define_mmu_enable_func 3
|
@ -16,6 +16,8 @@
|
||||
#include <xlat_tables_v2.h>
|
||||
#include "../xlat_tables_private.h"
|
||||
|
||||
uint32_t mmu_cfg_params[MMU_CFG_PARAM_MAX];
|
||||
|
||||
/*
|
||||
* Returns 1 if the provided granule size is supported, 0 otherwise.
|
||||
*/
|
||||
@ -183,70 +185,13 @@ int xlat_arch_current_el(void)
|
||||
return el;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* Macro generating the code for the function enabling the MMU in the given
|
||||
* exception level, assuming that the pagetables have already been created.
|
||||
*
|
||||
* _el: Exception level at which the function will run
|
||||
* _tlbi_fct: Function to invalidate the TLBs at the current
|
||||
* exception level
|
||||
******************************************************************************/
|
||||
#define DEFINE_ENABLE_MMU_EL(_el, _tlbi_fct) \
|
||||
static void enable_mmu_internal_el##_el(int flags, \
|
||||
uint64_t mair, \
|
||||
uint64_t tcr, \
|
||||
uint64_t ttbr) \
|
||||
{ \
|
||||
uint32_t sctlr = read_sctlr_el##_el(); \
|
||||
assert((sctlr & SCTLR_M_BIT) == 0); \
|
||||
\
|
||||
/* Invalidate TLBs at the current exception level */ \
|
||||
_tlbi_fct(); \
|
||||
\
|
||||
write_mair_el##_el(mair); \
|
||||
write_tcr_el##_el(tcr); \
|
||||
\
|
||||
/* Set TTBR bits as well */ \
|
||||
if (ARM_ARCH_AT_LEAST(8, 2)) { \
|
||||
/* Enable CnP bit so as to share page tables */ \
|
||||
/* with all PEs. This is mandatory for */ \
|
||||
/* ARMv8.2 implementations. */ \
|
||||
ttbr |= TTBR_CNP_BIT; \
|
||||
} \
|
||||
write_ttbr0_el##_el(ttbr); \
|
||||
\
|
||||
/* Ensure all translation table writes have drained */ \
|
||||
/* into memory, the TLB invalidation is complete, */ \
|
||||
/* and translation register writes are committed */ \
|
||||
/* before enabling the MMU */ \
|
||||
dsbish(); \
|
||||
isb(); \
|
||||
\
|
||||
sctlr |= SCTLR_WXN_BIT | SCTLR_M_BIT; \
|
||||
if (flags & DISABLE_DCACHE) \
|
||||
sctlr &= ~SCTLR_C_BIT; \
|
||||
else \
|
||||
sctlr |= SCTLR_C_BIT; \
|
||||
\
|
||||
write_sctlr_el##_el(sctlr); \
|
||||
\
|
||||
/* Ensure the MMU enable takes effect immediately */ \
|
||||
isb(); \
|
||||
}
|
||||
|
||||
/* Define EL1 and EL3 variants of the function enabling the MMU */
|
||||
#if IMAGE_EL == 1
|
||||
DEFINE_ENABLE_MMU_EL(1, tlbivmalle1)
|
||||
#elif IMAGE_EL == 3
|
||||
DEFINE_ENABLE_MMU_EL(3, tlbialle3)
|
||||
#endif
|
||||
|
||||
void enable_mmu_arch(unsigned int flags,
|
||||
uint64_t *base_table,
|
||||
void setup_mmu_cfg(unsigned int flags,
|
||||
const uint64_t *base_table,
|
||||
unsigned long long max_pa,
|
||||
uintptr_t max_va)
|
||||
{
|
||||
uint64_t mair, ttbr, tcr;
|
||||
uintptr_t virtual_addr_space_size;
|
||||
|
||||
/* Set attributes in the right indices of the MAIR. */
|
||||
mair = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX);
|
||||
@ -255,28 +200,26 @@ void enable_mmu_arch(unsigned int flags,
|
||||
|
||||
ttbr = (uint64_t) base_table;
|
||||
|
||||
/*
|
||||
* Set TCR bits as well.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Limit the input address ranges and memory region sizes translated
|
||||
* using TTBR0 to the given virtual address space size.
|
||||
*/
|
||||
assert(max_va < UINTPTR_MAX);
|
||||
uintptr_t virtual_addr_space_size = max_va + 1;
|
||||
assert(max_va < ((uint64_t) UINTPTR_MAX));
|
||||
|
||||
virtual_addr_space_size = max_va + 1;
|
||||
assert(CHECK_VIRT_ADDR_SPACE_SIZE(virtual_addr_space_size));
|
||||
|
||||
/*
|
||||
* __builtin_ctzll(0) is undefined but here we are guaranteed that
|
||||
* virtual_addr_space_size is in the range [1,UINTPTR_MAX].
|
||||
*/
|
||||
tcr = 64 - __builtin_ctzll(virtual_addr_space_size);
|
||||
tcr = (uint64_t) 64 - __builtin_ctzll(virtual_addr_space_size);
|
||||
|
||||
/*
|
||||
* Set the cacheability and shareability attributes for memory
|
||||
* associated with translation table walks.
|
||||
*/
|
||||
if (flags & XLAT_TABLE_NC) {
|
||||
if ((flags & XLAT_TABLE_NC) != 0) {
|
||||
/* Inner & outer non-cacheable non-shareable. */
|
||||
tcr |= TCR_SH_NON_SHAREABLE |
|
||||
TCR_RGN_OUTER_NC | TCR_RGN_INNER_NC;
|
||||
@ -299,10 +242,23 @@ void enable_mmu_arch(unsigned int flags,
|
||||
* translated using TTBR1_EL1.
|
||||
*/
|
||||
tcr |= TCR_EPD1_BIT | (tcr_ps_bits << TCR_EL1_IPS_SHIFT);
|
||||
enable_mmu_internal_el1(flags, mair, tcr, ttbr);
|
||||
#elif IMAGE_EL == 3
|
||||
assert(IS_IN_EL(3));
|
||||
tcr |= TCR_EL3_RES1 | (tcr_ps_bits << TCR_EL3_PS_SHIFT);
|
||||
enable_mmu_internal_el3(flags, mair, tcr, ttbr);
|
||||
#endif
|
||||
|
||||
mmu_cfg_params[MMU_CFG_MAIR0] = (uint32_t) mair;
|
||||
mmu_cfg_params[MMU_CFG_TCR] = (uint32_t) tcr;
|
||||
|
||||
/* Set TTBR bits as well */
|
||||
if (ARM_ARCH_AT_LEAST(8, 2)) {
|
||||
/*
|
||||
* Enable CnP bit so as to share page tables with all PEs. This
|
||||
* is mandatory for ARMv8.2 implementations.
|
||||
*/
|
||||
ttbr |= TTBR_CNP_BIT;
|
||||
}
|
||||
|
||||
mmu_cfg_params[MMU_CFG_TTBR0_LO] = (uint32_t) ttbr;
|
||||
mmu_cfg_params[MMU_CFG_TTBR0_HI] = (uint32_t) (ttbr >> 32);
|
||||
}
|
||||
|
@ -1,10 +1,11 @@
|
||||
#
|
||||
# Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
|
||||
# Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
#
|
||||
|
||||
XLAT_TABLES_LIB_SRCS := $(addprefix lib/xlat_tables_v2/, \
|
||||
${ARCH}/enable_mmu.S \
|
||||
${ARCH}/xlat_tables_arch.c \
|
||||
xlat_tables_internal.c)
|
||||
|
||||
|
@ -802,7 +802,7 @@ void mmap_add_region_ctx(xlat_ctx_t *ctx, const mmap_region_t *mm)
|
||||
* that there is free space.
|
||||
*/
|
||||
assert(mm_last->size == 0U);
|
||||
|
||||
|
||||
/* Make room for new region by moving other regions up by one place */
|
||||
mm_destination = mm_cursor + 1;
|
||||
memmove(mm_destination, mm_cursor,
|
||||
@ -1313,22 +1313,25 @@ void init_xlat_tables(void)
|
||||
|
||||
void enable_mmu_secure(unsigned int flags)
|
||||
{
|
||||
enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
|
||||
setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
|
||||
tf_xlat_ctx.va_max_address);
|
||||
enable_mmu_direct(flags);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void enable_mmu_el1(unsigned int flags)
|
||||
{
|
||||
enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
|
||||
setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
|
||||
tf_xlat_ctx.va_max_address);
|
||||
enable_mmu_direct_el1(flags);
|
||||
}
|
||||
|
||||
void enable_mmu_el3(unsigned int flags)
|
||||
{
|
||||
enable_mmu_arch(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
|
||||
setup_mmu_cfg(flags, tf_xlat_ctx.base_table, MAX_PHYS_ADDR,
|
||||
tf_xlat_ctx.va_max_address);
|
||||
enable_mmu_direct_el3(flags);
|
||||
}
|
||||
|
||||
#endif /* AARCH32 */
|
||||
|
@ -81,7 +81,7 @@ int xlat_arch_current_el(void);
|
||||
unsigned long long xlat_arch_get_max_supported_pa(void);
|
||||
|
||||
/* Enable MMU and configure it to use the specified translation tables. */
|
||||
void enable_mmu_arch(unsigned int flags, uint64_t *base_table,
|
||||
void setup_mmu_cfg(unsigned int flags, const uint64_t *base_table,
|
||||
unsigned long long max_pa, uintptr_t max_va);
|
||||
|
||||
/*
|
||||
|
@ -18,8 +18,6 @@
|
||||
* provide typical implementations that may be re-used by multiple
|
||||
* platforms but may also be overridden by a platform if required.
|
||||
*/
|
||||
#pragma weak bl31_plat_enable_mmu
|
||||
#pragma weak bl32_plat_enable_mmu
|
||||
#pragma weak bl31_plat_runtime_setup
|
||||
#if !ERROR_DEPRECATED
|
||||
#pragma weak plat_get_syscnt_freq2
|
||||
@ -33,16 +31,6 @@
|
||||
|
||||
#pragma weak plat_ea_handler
|
||||
|
||||
void bl31_plat_enable_mmu(uint32_t flags)
|
||||
{
|
||||
enable_mmu_el3(flags);
|
||||
}
|
||||
|
||||
void bl32_plat_enable_mmu(uint32_t flags)
|
||||
{
|
||||
enable_mmu_el1(flags);
|
||||
}
|
||||
|
||||
void bl31_plat_runtime_setup(void)
|
||||
{
|
||||
#if MULTI_CONSOLE_API
|
||||
|
@ -17,6 +17,8 @@
|
||||
.weak plat_disable_acp
|
||||
.weak bl1_plat_prepare_exit
|
||||
.weak plat_panic_handler
|
||||
.weak bl31_plat_enable_mmu
|
||||
.weak bl32_plat_enable_mmu
|
||||
|
||||
#if !ENABLE_PLAT_COMPAT
|
||||
.globl platform_get_core_pos
|
||||
@ -164,3 +166,23 @@ func plat_panic_handler
|
||||
wfi
|
||||
b plat_panic_handler
|
||||
endfunc plat_panic_handler
|
||||
|
||||
/* -----------------------------------------------------
|
||||
* void bl31_plat_enable_mmu(uint32_t flags);
|
||||
*
|
||||
* Enable MMU in BL31.
|
||||
* -----------------------------------------------------
|
||||
*/
|
||||
func bl31_plat_enable_mmu
|
||||
b enable_mmu_direct_el3
|
||||
endfunc bl31_plat_enable_mmu
|
||||
|
||||
/* -----------------------------------------------------
|
||||
* void bl32_plat_enable_mmu(uint32_t flags);
|
||||
*
|
||||
* Enable MMU in BL32.
|
||||
* -----------------------------------------------------
|
||||
*/
|
||||
func bl32_plat_enable_mmu
|
||||
b enable_mmu_direct_el1
|
||||
endfunc bl32_plat_enable_mmu
|
||||
|
Loading…
x
Reference in New Issue
Block a user