switch-l4t-atf/bl31/bl31.ld.S
Antonio Nino Diaz a2aedac221 Replace magic numbers in linkerscripts by PAGE_SIZE
When defining different sections in linker scripts it is needed to align
them to multiples of the page size. In most linker scripts this is done
by aligning to the hardcoded value 4096 instead of PAGE_SIZE.

This may be confusing when taking a look at all the codebase, as 4096
is used in some parts that aren't meant to be a multiple of the page
size.

Change-Id: I36c6f461c7782437a58d13d37ec8b822a1663ec1
Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
2017-11-29 12:09:52 +00:00

275 lines
8.0 KiB
ArmAsm

/*
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <platform_def.h>
#include <xlat_tables_defs.h>
OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
ENTRY(bl31_entrypoint)
MEMORY {
RAM (rwx): ORIGIN = BL31_BASE, LENGTH = BL31_LIMIT - BL31_BASE
}
#ifdef PLAT_EXTRA_LD_SCRIPT
#include <plat.ld.S>
#endif
SECTIONS
{
. = BL31_BASE;
ASSERT(. == ALIGN(PAGE_SIZE),
"BL31_BASE address is not aligned on a page boundary.")
#if SEPARATE_CODE_AND_RODATA
.text . : {
__TEXT_START__ = .;
*bl31_entrypoint.o(.text*)
*(.text*)
*(.vectors)
. = NEXT(PAGE_SIZE);
__TEXT_END__ = .;
} >RAM
.rodata . : {
__RODATA_START__ = .;
*(.rodata*)
/* Ensure 8-byte alignment for descriptors and ensure inclusion */
. = ALIGN(8);
__RT_SVC_DESCS_START__ = .;
KEEP(*(rt_svc_descs))
__RT_SVC_DESCS_END__ = .;
#if ENABLE_PMF
/* Ensure 8-byte alignment for descriptors and ensure inclusion */
. = ALIGN(8);
__PMF_SVC_DESCS_START__ = .;
KEEP(*(pmf_svc_descs))
__PMF_SVC_DESCS_END__ = .;
#endif /* ENABLE_PMF */
/*
* Ensure 8-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(8);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
/* Place pubsub sections for events */
. = ALIGN(8);
#include <pubsub_events.h>
. = NEXT(PAGE_SIZE);
__RODATA_END__ = .;
} >RAM
#else
ro . : {
__RO_START__ = .;
*bl31_entrypoint.o(.text*)
*(.text*)
*(.rodata*)
/* Ensure 8-byte alignment for descriptors and ensure inclusion */
. = ALIGN(8);
__RT_SVC_DESCS_START__ = .;
KEEP(*(rt_svc_descs))
__RT_SVC_DESCS_END__ = .;
#if ENABLE_PMF
/* Ensure 8-byte alignment for descriptors and ensure inclusion */
. = ALIGN(8);
__PMF_SVC_DESCS_START__ = .;
KEEP(*(pmf_svc_descs))
__PMF_SVC_DESCS_END__ = .;
#endif /* ENABLE_PMF */
/*
* Ensure 8-byte alignment for cpu_ops so that its fields are also
* aligned. Also ensure cpu_ops inclusion.
*/
. = ALIGN(8);
__CPU_OPS_START__ = .;
KEEP(*(cpu_ops))
__CPU_OPS_END__ = .;
/* Place pubsub sections for events */
. = ALIGN(8);
#include <pubsub_events.h>
*(.vectors)
__RO_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked as read-only,
* executable. No RW data from the next section must creep in.
* Ensure the rest of the current memory page is unused.
*/
. = NEXT(PAGE_SIZE);
__RO_END__ = .;
} >RAM
#endif
ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
"cpu_ops not defined for this platform.")
#if ENABLE_SPM
/*
* Exception vectors of the SPM shim layer. They must be aligned to a 2K
* address, but we need to place them in a separate page so that we can set
* individual permissions to them, so the actual alignment needed is 4K.
*
* There's no need to include this into the RO section of BL31 because it
* doesn't need to be accessed by BL31.
*/
spm_shim_exceptions : ALIGN(PAGE_SIZE) {
__SPM_SHIM_EXCEPTIONS_START__ = .;
*(.spm_shim_exceptions)
. = NEXT(PAGE_SIZE);
__SPM_SHIM_EXCEPTIONS_END__ = .;
} >RAM
#endif
/*
* Define a linker symbol to mark start of the RW memory area for this
* image.
*/
__RW_START__ = . ;
/*
* .data must be placed at a lower address than the stacks if the stack
* protector is enabled. Alternatively, the .data.stack_protector_canary
* section can be placed independently of the main .data section.
*/
.data . : {
__DATA_START__ = .;
*(.data*)
__DATA_END__ = .;
} >RAM
#ifdef BL31_PROGBITS_LIMIT
ASSERT(. <= BL31_PROGBITS_LIMIT, "BL31 progbits has exceeded its limit.")
#endif
stacks (NOLOAD) : {
__STACKS_START__ = .;
*(tzfw_normal_stacks)
__STACKS_END__ = .;
} >RAM
/*
* The .bss section gets initialised to 0 at runtime.
* Its base address should be 16-byte aligned for better performance of the
* zero-initialization code.
*/
.bss (NOLOAD) : ALIGN(16) {
__BSS_START__ = .;
*(.bss*)
*(COMMON)
#if !USE_COHERENT_MEM
/*
* Bakery locks are stored in normal .bss memory
*
* Each lock's data is spread across multiple cache lines, one per CPU,
* but multiple locks can share the same cache line.
* The compiler will allocate enough memory for one CPU's bakery locks,
* the remaining cache lines are allocated by the linker script
*/
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__BAKERY_LOCK_START__ = .;
*(bakery_lock)
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(. - __BAKERY_LOCK_START__);
. = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1));
__BAKERY_LOCK_END__ = .;
#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
ASSERT(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE,
"PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
#endif
#endif
#if ENABLE_PMF
/*
* Time-stamps are stored in normal .bss memory
*
* The compiler will allocate enough memory for one CPU's time-stamps,
* the remaining memory for other CPU's is allocated by the
* linker script
*/
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__PMF_TIMESTAMP_START__ = .;
KEEP(*(pmf_timestamp_array))
. = ALIGN(CACHE_WRITEBACK_GRANULE);
__PMF_PERCPU_TIMESTAMP_END__ = .;
__PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__);
. = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1));
__PMF_TIMESTAMP_END__ = .;
#endif /* ENABLE_PMF */
__BSS_END__ = .;
} >RAM
/*
* The xlat_table section is for full, aligned page tables (4K).
* Removing them from .bss avoids forcing 4K alignment on
* the .bss section and eliminates the unecessary zero init
*/
xlat_table (NOLOAD) : {
#if ENABLE_SPM
__SP_IMAGE_XLAT_TABLES_START__ = .;
*secure_partition*.o(xlat_table)
/* Make sure that the rest of the page is empty. */
. = NEXT(PAGE_SIZE);
__SP_IMAGE_XLAT_TABLES_END__ = .;
#endif
*(xlat_table)
} >RAM
#if USE_COHERENT_MEM
/*
* The base address of the coherent memory section must be page-aligned (4K)
* to guarantee that the coherent data are stored on their own pages and
* are not mixed with normal data. This is required to set up the correct
* memory attributes for the coherent data page tables.
*/
coherent_ram (NOLOAD) : ALIGN(PAGE_SIZE) {
__COHERENT_RAM_START__ = .;
/*
* Bakery locks are stored in coherent memory
*
* Each lock's data is contiguous and fully allocated by the compiler
*/
*(bakery_lock)
*(tzfw_coherent_mem)
__COHERENT_RAM_END_UNALIGNED__ = .;
/*
* Memory page(s) mapped to this section will be marked
* as device memory. No other unexpected data must creep in.
* Ensure the rest of the current memory page is unused.
*/
. = NEXT(PAGE_SIZE);
__COHERENT_RAM_END__ = .;
} >RAM
#endif
/*
* Define a linker symbol to mark end of the RW memory area for this
* image.
*/
__RW_END__ = .;
__BL31_END__ = .;
__BSS_SIZE__ = SIZEOF(.bss);
#if USE_COHERENT_MEM
__COHERENT_RAM_UNALIGNED_SIZE__ =
__COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
#endif
ASSERT(. <= BL31_LIMIT, "BL31 image has exceeded its limit.")
}