mirror of
https://github.com/CTCaer/switch-l4t-atf.git
synced 2024-12-13 21:58:51 +00:00
308d359b26
Introduce zeromem_dczva function on AArch64 that can handle unaligned addresses and make use of DC ZVA instruction to zero a whole block at a time. This zeroing takes place directly in the cache to speed it up without doing external memory access. Remove the zeromem16 function on AArch64 and replace it with an alias to zeromem. This zeromem16 function is now deprecated. Remove the 16-bytes alignment constraint on __BSS_START__ in firmware-design.md as it is now not mandatory anymore (it used to comply with zeromem16 requirements). Change the 16-bytes alignment constraints in SP min's linker script to a 8-bytes alignment constraint as the AArch32 zeromem implementation is now more efficient on 8-bytes aligned addresses. Introduce zero_normalmem and zeromem helpers in platform agnostic header that are implemented this way: * AArch32: * zero_normalmem: zero using usual data access * zeromem: alias for zero_normalmem * AArch64: * zero_normalmem: zero normal memory using DC ZVA instruction (needs MMU enabled) * zeromem: zero using usual data access Usage guidelines: in most cases, zero_normalmem should be preferred. There are 2 scenarios where zeromem (or memset) must be used instead: * Code that must run with MMU disabled (which means all memory is considered device memory for data accesses). * Code that fills device memory with null bytes. Optionally, the following rule can be applied if performance is important: * Code zeroing small areas (few bytes) that are not secrets should use memset to take advantage of compiler optimizations. Note: Code zeroing security-related critical information should use zero_normalmem/zeromem instead of memset to avoid removal by compilers' optimizations in some cases or misbehaving versions of GCC. Fixes ARM-software/tf-issues#408 Change-Id: Iafd9663fc1070413c3e1904e54091cf60effaa82 Signed-off-by: Douglas Raillard <douglas.raillard@arm.com>
199 lines
6.2 KiB
ArmAsm
199 lines
6.2 KiB
ArmAsm
/*
|
|
* Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions are met:
|
|
*
|
|
* Redistributions of source code must retain the above copyright notice, this
|
|
* list of conditions and the following disclaimer.
|
|
*
|
|
* Redistributions in binary form must reproduce the above copyright notice,
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
* and/or other materials provided with the distribution.
|
|
*
|
|
* Neither the name of ARM nor the names of its contributors may be used
|
|
* to endorse or promote products derived from this software without specific
|
|
* prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
|
|
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
|
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
|
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
|
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
|
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
|
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
*/
|
|
|
|
#include <platform_def.h>
|
|
|
|
OUTPUT_FORMAT(PLATFORM_LINKER_FORMAT)
|
|
OUTPUT_ARCH(PLATFORM_LINKER_ARCH)
|
|
ENTRY(bl1_entrypoint)
|
|
|
|
MEMORY {
|
|
ROM (rx): ORIGIN = BL1_RO_BASE, LENGTH = BL1_RO_LIMIT - BL1_RO_BASE
|
|
RAM (rwx): ORIGIN = BL1_RW_BASE, LENGTH = BL1_RW_LIMIT - BL1_RW_BASE
|
|
}
|
|
|
|
SECTIONS
|
|
{
|
|
. = BL1_RO_BASE;
|
|
ASSERT(. == ALIGN(4096),
|
|
"BL1_RO_BASE address is not aligned on a page boundary.")
|
|
|
|
#if SEPARATE_CODE_AND_RODATA
|
|
.text . : {
|
|
__TEXT_START__ = .;
|
|
*bl1_entrypoint.o(.text*)
|
|
*(.text*)
|
|
*(.vectors)
|
|
. = NEXT(4096);
|
|
__TEXT_END__ = .;
|
|
} >ROM
|
|
|
|
.rodata . : {
|
|
__RODATA_START__ = .;
|
|
*(.rodata*)
|
|
|
|
/* Ensure 8-byte alignment for descriptors and ensure inclusion */
|
|
. = ALIGN(8);
|
|
__PARSER_LIB_DESCS_START__ = .;
|
|
KEEP(*(.img_parser_lib_descs))
|
|
__PARSER_LIB_DESCS_END__ = .;
|
|
|
|
/*
|
|
* Ensure 8-byte alignment for cpu_ops so that its fields are also
|
|
* aligned. Also ensure cpu_ops inclusion.
|
|
*/
|
|
. = ALIGN(8);
|
|
__CPU_OPS_START__ = .;
|
|
KEEP(*(cpu_ops))
|
|
__CPU_OPS_END__ = .;
|
|
|
|
/*
|
|
* No need to pad out the .rodata section to a page boundary. Next is
|
|
* the .data section, which can mapped in ROM with the same memory
|
|
* attributes as the .rodata section.
|
|
*/
|
|
__RODATA_END__ = .;
|
|
} >ROM
|
|
#else
|
|
ro . : {
|
|
__RO_START__ = .;
|
|
*bl1_entrypoint.o(.text*)
|
|
*(.text*)
|
|
*(.rodata*)
|
|
|
|
/* Ensure 8-byte alignment for descriptors and ensure inclusion */
|
|
. = ALIGN(8);
|
|
__PARSER_LIB_DESCS_START__ = .;
|
|
KEEP(*(.img_parser_lib_descs))
|
|
__PARSER_LIB_DESCS_END__ = .;
|
|
|
|
/*
|
|
* Ensure 8-byte alignment for cpu_ops so that its fields are also
|
|
* aligned. Also ensure cpu_ops inclusion.
|
|
*/
|
|
. = ALIGN(8);
|
|
__CPU_OPS_START__ = .;
|
|
KEEP(*(cpu_ops))
|
|
__CPU_OPS_END__ = .;
|
|
|
|
*(.vectors)
|
|
__RO_END__ = .;
|
|
} >ROM
|
|
#endif
|
|
|
|
ASSERT(__CPU_OPS_END__ > __CPU_OPS_START__,
|
|
"cpu_ops not defined for this platform.")
|
|
|
|
/*
|
|
* The .data section gets copied from ROM to RAM at runtime.
|
|
* Its LMA must be 16-byte aligned.
|
|
* Its VMA must be page-aligned as it marks the first read/write page.
|
|
*/
|
|
. = BL1_RW_BASE;
|
|
ASSERT(. == ALIGN(4096),
|
|
"BL1_RW_BASE address is not aligned on a page boundary.")
|
|
.data . : ALIGN(16) {
|
|
__DATA_RAM_START__ = .;
|
|
*(.data*)
|
|
__DATA_RAM_END__ = .;
|
|
} >RAM AT>ROM
|
|
|
|
stacks . (NOLOAD) : {
|
|
__STACKS_START__ = .;
|
|
*(tzfw_normal_stacks)
|
|
__STACKS_END__ = .;
|
|
} >RAM
|
|
|
|
/*
|
|
* The .bss section gets initialised to 0 at runtime.
|
|
* Its base address should be 16-byte aligned for better performance of the
|
|
* zero-initialization code.
|
|
*/
|
|
.bss : ALIGN(16) {
|
|
__BSS_START__ = .;
|
|
*(.bss*)
|
|
*(COMMON)
|
|
__BSS_END__ = .;
|
|
} >RAM
|
|
|
|
/*
|
|
* The xlat_table section is for full, aligned page tables (4K).
|
|
* Removing them from .bss avoids forcing 4K alignment on
|
|
* the .bss section and eliminates the unecessary zero init
|
|
*/
|
|
xlat_table (NOLOAD) : {
|
|
*(xlat_table)
|
|
} >RAM
|
|
|
|
#if USE_COHERENT_MEM
|
|
/*
|
|
* The base address of the coherent memory section must be page-aligned (4K)
|
|
* to guarantee that the coherent data are stored on their own pages and
|
|
* are not mixed with normal data. This is required to set up the correct
|
|
* memory attributes for the coherent data page tables.
|
|
*/
|
|
coherent_ram (NOLOAD) : ALIGN(4096) {
|
|
__COHERENT_RAM_START__ = .;
|
|
*(tzfw_coherent_mem)
|
|
__COHERENT_RAM_END_UNALIGNED__ = .;
|
|
/*
|
|
* Memory page(s) mapped to this section will be marked
|
|
* as device memory. No other unexpected data must creep in.
|
|
* Ensure the rest of the current memory page is unused.
|
|
*/
|
|
. = NEXT(4096);
|
|
__COHERENT_RAM_END__ = .;
|
|
} >RAM
|
|
#endif
|
|
|
|
__BL1_RAM_START__ = ADDR(.data);
|
|
__BL1_RAM_END__ = .;
|
|
|
|
__DATA_ROM_START__ = LOADADDR(.data);
|
|
__DATA_SIZE__ = SIZEOF(.data);
|
|
|
|
/*
|
|
* The .data section is the last PROGBITS section so its end marks the end
|
|
* of BL1's actual content in Trusted ROM.
|
|
*/
|
|
__BL1_ROM_END__ = __DATA_ROM_START__ + __DATA_SIZE__;
|
|
ASSERT(__BL1_ROM_END__ <= BL1_RO_LIMIT,
|
|
"BL1's ROM content has exceeded its limit.")
|
|
|
|
__BSS_SIZE__ = SIZEOF(.bss);
|
|
|
|
#if USE_COHERENT_MEM
|
|
__COHERENT_RAM_UNALIGNED_SIZE__ =
|
|
__COHERENT_RAM_END_UNALIGNED__ - __COHERENT_RAM_START__;
|
|
#endif
|
|
|
|
ASSERT(. <= BL1_RW_LIMIT, "BL1's RW section has exceeded its limit.")
|
|
}
|