Merge pull request #1152 from jeenu-arm/ehf-and-sdei

EHF and SDEI
This commit is contained in:
davidcunado-arm 2017-11-13 10:58:40 +00:00 committed by GitHub
commit c195f1a705
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
44 changed files with 3891 additions and 30 deletions

View File

@ -32,6 +32,20 @@ ifeq (${ENABLE_PMF}, 1)
BL31_SOURCES += lib/pmf/pmf_main.c
endif
ifeq (${EL3_EXCEPTION_HANDLING},1)
BL31_SOURCES += bl31/ehf.c
endif
ifeq (${SDEI_SUPPORT},1)
ifeq (${EL3_EXCEPTION_HANDLING},0)
$(error EL3_EXCEPTION_HANDLING must be 1 for SDEI support)
endif
BL31_SOURCES += services/std_svc/sdei/sdei_event.c \
services/std_svc/sdei/sdei_intr_mgmt.c \
services/std_svc/sdei/sdei_main.c \
services/std_svc/sdei/sdei_state.c
endif
BL31_LINKERFILE := bl31/bl31.ld.S
# Flag used to indicate if Crash reporting via console should be included
@ -41,4 +55,9 @@ CRASH_REPORTING := $(DEBUG)
endif
$(eval $(call assert_boolean,CRASH_REPORTING))
$(eval $(call assert_boolean,EL3_EXCEPTION_HANDLING))
$(eval $(call assert_boolean,SDEI_SUPPORT))
$(eval $(call add_define,CRASH_REPORTING))
$(eval $(call add_define,EL3_EXCEPTION_HANDLING))
$(eval $(call add_define,SDEI_SUPPORT))

View File

@ -12,6 +12,7 @@
#include <console.h>
#include <context_mgmt.h>
#include <debug.h>
#include <ehf.h>
#include <platform.h>
#include <pmf.h>
#include <runtime_instr.h>
@ -79,6 +80,11 @@ void bl31_main(void)
/* Initialise helper libraries */
bl31_lib_init();
#if EL3_EXCEPTION_HANDLING
INFO("BL31: Initialising Exception Handling Framework\n");
ehf_init();
#endif
/* Initialize the runtime services e.g. psci. */
INFO("BL31: Initializing runtime services\n");
runtime_svc_init();

504
bl31/ehf.c Normal file
View File

@ -0,0 +1,504 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* Exception handlers at EL3, their priority levels, and management.
*/
#include <assert.h>
#include <cpu_data.h>
#include <debug.h>
#include <ehf.h>
#include <gic_common.h>
#include <interrupt_mgmt.h>
#include <platform.h>
#include <pubsub_events.h>
/* Output EHF logs as verbose */
#define EHF_LOG(...) VERBOSE("EHF: " __VA_ARGS__)
#define EHF_INVALID_IDX (-1)
/* For a valid handler, return the actual function pointer; otherwise, 0. */
#define RAW_HANDLER(h) \
((ehf_handler_t) ((h & _EHF_PRI_VALID) ? (h & ~_EHF_PRI_VALID) : 0))
#define PRI_BIT(idx) (((ehf_pri_bits_t) 1) << idx)
/*
* Convert index into secure priority using the platform-defined priority bits
* field.
*/
#define IDX_TO_PRI(idx) \
((idx << (7 - exception_data.pri_bits)) & 0x7f)
/* Check whether a given index is valid */
#define IS_IDX_VALID(idx) \
((exception_data.ehf_priorities[idx].ehf_handler & _EHF_PRI_VALID) != 0)
/* Returns whether given priority is in secure priority range */
#define IS_PRI_SECURE(pri) ((pri & 0x80) == 0)
/* To be defined by the platform */
extern const ehf_priorities_t exception_data;
/* Translate priority to the index in the priority array */
static int pri_to_idx(unsigned int priority)
{
int idx;
idx = EHF_PRI_TO_IDX(priority, exception_data.pri_bits);
assert((idx >= 0) && (idx < exception_data.num_priorities));
assert(IS_IDX_VALID(idx));
return idx;
}
/* Return whether there are outstanding priority activation */
static int has_valid_pri_activations(pe_exc_data_t *pe_data)
{
return pe_data->active_pri_bits != 0;
}
static pe_exc_data_t *this_cpu_data(void)
{
return &get_cpu_data(ehf_data);
}
/*
* Return the current priority index of this CPU. If no priority is active,
* return EHF_INVALID_IDX.
*/
static int get_pe_highest_active_idx(pe_exc_data_t *pe_data)
{
if (!has_valid_pri_activations(pe_data))
return EHF_INVALID_IDX;
/* Current priority is the right-most bit */
return __builtin_ctz(pe_data->active_pri_bits);
}
/*
* Mark priority active by setting the corresponding bit in active_pri_bits and
* programming the priority mask.
*
* This API is to be used as part of delegating to lower ELs other than for
* interrupts; e.g. while handling synchronous exceptions.
*
* This API is expected to be invoked before restoring context (Secure or
* Non-secure) in preparation for the respective dispatch.
*/
void ehf_activate_priority(unsigned int priority)
{
int idx, cur_pri_idx;
unsigned int old_mask, run_pri;
pe_exc_data_t *pe_data = this_cpu_data();
/*
* Query interrupt controller for the running priority, or idle priority
* if no interrupts are being handled. The requested priority must be
* less (higher priority) than the active running priority.
*/
run_pri = plat_ic_get_running_priority();
if (priority >= run_pri) {
ERROR("Running priority higher (0x%x) than requested (0x%x)\n",
run_pri, priority);
panic();
}
/*
* If there were priority activations already, the requested priority
* must be less (higher priority) than the current highest priority
* activation so far.
*/
cur_pri_idx = get_pe_highest_active_idx(pe_data);
idx = pri_to_idx(priority);
if ((cur_pri_idx != EHF_INVALID_IDX) && (idx >= cur_pri_idx)) {
ERROR("Activation priority mismatch: req=0x%x current=0x%x\n",
priority, IDX_TO_PRI(cur_pri_idx));
panic();
}
/* Set the bit corresponding to the requested priority */
pe_data->active_pri_bits |= PRI_BIT(idx);
/*
* Program priority mask for the activated level. Check that the new
* priority mask is setting a higher priority level than the existing
* mask.
*/
old_mask = plat_ic_set_priority_mask(priority);
if (priority >= old_mask) {
ERROR("Requested priority (0x%x) lower than Priority Mask (0x%x)\n",
priority, old_mask);
panic();
}
/*
* If this is the first activation, save the priority mask. This will be
* restored after the last deactivation.
*/
if (cur_pri_idx == EHF_INVALID_IDX)
pe_data->init_pri_mask = old_mask;
EHF_LOG("activate prio=%d\n", get_pe_highest_active_idx(pe_data));
}
/*
* Mark priority inactive by clearing the corresponding bit in active_pri_bits,
* and programming the priority mask.
*
* This API is expected to be used as part of delegating to to lower ELs other
* than for interrupts; e.g. while handling synchronous exceptions.
*
* This API is expected to be invoked after saving context (Secure or
* Non-secure), having concluded the respective dispatch.
*/
void ehf_deactivate_priority(unsigned int priority)
{
int idx, cur_pri_idx;
pe_exc_data_t *pe_data = this_cpu_data();
unsigned int old_mask, run_pri;
/*
* Query interrupt controller for the running priority, or idle priority
* if no interrupts are being handled. The requested priority must be
* less (higher priority) than the active running priority.
*/
run_pri = plat_ic_get_running_priority();
if (priority >= run_pri) {
ERROR("Running priority higher (0x%x) than requested (0x%x)\n",
run_pri, priority);
panic();
}
/*
* Deactivation is allowed only when there are priority activations, and
* the deactivation priority level must match the current activated
* priority.
*/
cur_pri_idx = get_pe_highest_active_idx(pe_data);
idx = pri_to_idx(priority);
if ((cur_pri_idx == EHF_INVALID_IDX) || (idx != cur_pri_idx)) {
ERROR("Deactivation priority mismatch: req=0x%x current=0x%x\n",
priority, IDX_TO_PRI(cur_pri_idx));
panic();
}
/* Clear bit corresponding to highest priority */
pe_data->active_pri_bits &= (pe_data->active_pri_bits - 1);
/*
* Restore priority mask corresponding to the next priority, or the
* one stashed earlier if there are no more to deactivate.
*/
idx = get_pe_highest_active_idx(pe_data);
if (idx == EHF_INVALID_IDX)
old_mask = plat_ic_set_priority_mask(pe_data->init_pri_mask);
else
old_mask = plat_ic_set_priority_mask(priority);
if (old_mask >= priority) {
ERROR("Deactivation priority (0x%x) lower than Priority Mask (0x%x)\n",
priority, old_mask);
panic();
}
EHF_LOG("deactivate prio=%d\n", get_pe_highest_active_idx(pe_data));
}
/*
* After leaving Non-secure world, stash current Non-secure Priority Mask, and
* set Priority Mask to the highest Non-secure priority so that Non-secure
* interrupts cannot preempt Secure execution.
*
* If the current running priority is in the secure range, or if there are
* outstanding priority activations, this function does nothing.
*
* This function subscribes to the 'cm_exited_normal_world' event published by
* the Context Management Library.
*/
static void *ehf_exited_normal_world(const void *arg)
{
unsigned int run_pri;
pe_exc_data_t *pe_data = this_cpu_data();
/* If the running priority is in the secure range, do nothing */
run_pri = plat_ic_get_running_priority();
if (IS_PRI_SECURE(run_pri))
return 0;
/* Do nothing if there are explicit activations */
if (has_valid_pri_activations(pe_data))
return 0;
assert(pe_data->ns_pri_mask == 0);
pe_data->ns_pri_mask =
plat_ic_set_priority_mask(GIC_HIGHEST_NS_PRIORITY);
/* The previous Priority Mask is not expected to be in secure range */
if (IS_PRI_SECURE(pe_data->ns_pri_mask)) {
ERROR("Priority Mask (0x%x) already in secure range\n",
pe_data->ns_pri_mask);
panic();
}
EHF_LOG("Priority Mask: 0x%x => 0x%x\n", pe_data->ns_pri_mask,
GIC_HIGHEST_NS_PRIORITY);
return 0;
}
/*
* Conclude Secure execution and prepare for return to Non-secure world. Restore
* the Non-secure Priority Mask previously stashed upon leaving Non-secure
* world.
*
* If there the current running priority is in the secure range, or if there are
* outstanding priority activations, this function does nothing.
*
* This function subscribes to the 'cm_entering_normal_world' event published by
* the Context Management Library.
*/
static void *ehf_entering_normal_world(const void *arg)
{
unsigned int old_pmr, run_pri;
pe_exc_data_t *pe_data = this_cpu_data();
/* If the running priority is in the secure range, do nothing */
run_pri = plat_ic_get_running_priority();
if (IS_PRI_SECURE(run_pri))
return 0;
/*
* If there are explicit activations, do nothing. The Priority Mask will
* be restored upon the last deactivation.
*/
if (has_valid_pri_activations(pe_data))
return 0;
/* Do nothing if we don't have a valid Priority Mask to restore */
if (pe_data->ns_pri_mask == 0)
return 0;
old_pmr = plat_ic_set_priority_mask(pe_data->ns_pri_mask);
/*
* When exiting secure world, the current Priority Mask must be
* GIC_HIGHEST_NS_PRIORITY (as set during entry), or the Non-secure
* priority mask set upon calling ehf_allow_ns_preemption()
*/
if ((old_pmr != GIC_HIGHEST_NS_PRIORITY) &&
(old_pmr != pe_data->ns_pri_mask)) {
ERROR("Invalid Priority Mask (0x%x) restored\n", old_pmr);
panic();
}
EHF_LOG("Priority Mask: 0x%x => 0x%x\n", old_pmr, pe_data->ns_pri_mask);
pe_data->ns_pri_mask = 0;
return 0;
}
/*
* Program Priority Mask to the original Non-secure priority such that
* Non-secure interrupts may preempt Secure execution, viz. during Yielding SMC
* calls.
*
* This API is expected to be invoked before delegating a yielding SMC to Secure
* EL1. I.e. within the window of secure execution after Non-secure context is
* saved (after entry into EL3) and Secure context is restored (before entering
* Secure EL1).
*/
void ehf_allow_ns_preemption(void)
{
unsigned int old_pmr __unused;
pe_exc_data_t *pe_data = this_cpu_data();
/*
* We should have been notified earlier of entering secure world, and
* therefore have stashed the Non-secure priority mask.
*/
assert(pe_data->ns_pri_mask != 0);
/* Make sure no priority levels are active when requesting this */
if (has_valid_pri_activations(pe_data)) {
ERROR("PE %lx has priority activations: 0x%x\n",
read_mpidr_el1(), pe_data->active_pri_bits);
panic();
}
old_pmr = plat_ic_set_priority_mask(pe_data->ns_pri_mask);
EHF_LOG("Priority Mask: 0x%x => 0x%x\n", old_pmr, pe_data->ns_pri_mask);
pe_data->ns_pri_mask = 0;
}
/*
* Return whether Secure execution has explicitly allowed Non-secure interrupts
* to preempt itself, viz. during Yielding SMC calls.
*/
unsigned int ehf_is_ns_preemption_allowed(void)
{
unsigned int run_pri;
pe_exc_data_t *pe_data = this_cpu_data();
/* If running priority is in secure range, return false */
run_pri = plat_ic_get_running_priority();
if (IS_PRI_SECURE(run_pri))
return 0;
/*
* If Non-secure preemption was permitted by calling
* ehf_allow_ns_preemption() earlier:
*
* - There wouldn't have been priority activations;
* - We would have cleared the stashed the Non-secure Priority Mask.
*/
if (has_valid_pri_activations(pe_data))
return 0;
if (pe_data->ns_pri_mask != 0)
return 0;
return 1;
}
/*
* Top-level EL3 interrupt handler.
*/
static uint64_t ehf_el3_interrupt_handler(uint32_t id, uint32_t flags,
void *handle, void *cookie)
{
int pri, idx, intr, intr_raw, ret = 0;
ehf_handler_t handler;
/*
* Top-level interrupt type handler from Interrupt Management Framework
* doesn't acknowledge the interrupt; so the interrupt ID must be
* invalid.
*/
assert(id == INTR_ID_UNAVAILABLE);
/*
* Acknowledge interrupt. Proceed with handling only for valid interrupt
* IDs. This situation may arise because of Interrupt Management
* Framework identifying an EL3 interrupt, but before it's been
* acknowledged here, the interrupt was either deasserted, or there was
* a higher-priority interrupt of another type.
*/
intr_raw = plat_ic_acknowledge_interrupt();
intr = plat_ic_get_interrupt_id(intr_raw);
if (intr == INTR_ID_UNAVAILABLE)
return 0;
/* Having acknowledged the interrupt, get the running priority */
pri = plat_ic_get_running_priority();
/* Check EL3 interrupt priority is in secure range */
assert(IS_PRI_SECURE(pri));
/*
* Translate the priority to a descriptor index. We do this by masking
* and shifting the running priority value (platform-supplied).
*/
idx = pri_to_idx(pri);
/* Validate priority */
assert(pri == IDX_TO_PRI(idx));
handler = RAW_HANDLER(exception_data.ehf_priorities[idx].ehf_handler);
if (!handler) {
ERROR("No EL3 exception handler for priority 0x%x\n",
IDX_TO_PRI(idx));
panic();
}
/*
* Call registered handler. Pass the raw interrupt value to registered
* handlers.
*/
ret = handler(intr_raw, flags, handle, cookie);
return ret;
}
/*
* Initialize the EL3 exception handling.
*/
void ehf_init(void)
{
unsigned int flags = 0;
int ret __unused;
/* Ensure EL3 interrupts are supported */
assert(plat_ic_has_interrupt_type(INTR_TYPE_EL3));
/*
* Make sure that priority water mark has enough bits to represent the
* whole priority array.
*/
assert(exception_data.num_priorities <= (sizeof(ehf_pri_bits_t) * 8));
assert(exception_data.ehf_priorities);
/*
* Bit 7 of GIC priority must be 0 for secure interrupts. This means
* platforms must use at least 1 of the remaining 7 bits.
*/
assert((exception_data.pri_bits >= 1) || (exception_data.pri_bits < 8));
/* Route EL3 interrupts when in Secure and Non-secure. */
set_interrupt_rm_flag(flags, NON_SECURE);
set_interrupt_rm_flag(flags, SECURE);
/* Register handler for EL3 interrupts */
ret = register_interrupt_type_handler(INTR_TYPE_EL3,
ehf_el3_interrupt_handler, flags);
assert(ret == 0);
}
/*
* Register a handler at the supplied priority. Registration is allowed only if
* a handler hasn't been registered before, or one wasn't provided at build
* time. The priority for which the handler is being registered must also accord
* with the platform-supplied data.
*/
void ehf_register_priority_handler(unsigned int pri, ehf_handler_t handler)
{
int idx;
/* Sanity check for handler */
assert(handler != NULL);
/* Handler ought to be 4-byte aligned */
assert((((uintptr_t) handler) & 3) == 0);
/* Ensure we register for valid priority */
idx = pri_to_idx(pri);
assert(idx < exception_data.num_priorities);
assert(IDX_TO_PRI(idx) == pri);
/* Return failure if a handler was already registered */
if (exception_data.ehf_priorities[idx].ehf_handler != _EHF_NO_HANDLER) {
ERROR("Handler already registered for priority 0x%x\n", pri);
panic();
}
/*
* Install handler, and retain the valid bit. We assume that the handler
* is 4-byte aligned, which is usually the case.
*/
exception_data.ehf_priorities[idx].ehf_handler =
(((uintptr_t) handler) | _EHF_PRI_VALID);
EHF_LOG("register pri=0x%x handler=%p\n", pri, handler);
}
SUBSCRIBE_TO_EVENT(cm_entering_normal_world, ehf_entering_normal_world);
SUBSCRIBE_TO_EVENT(cm_exited_normal_world, ehf_exited_normal_world);

View File

@ -0,0 +1,13 @@
#!/bin/bash
# Convert all PlantUML files in this directory to SVG files. The plantuml_jar
# environment variable must be set to the path to PlantUML JAR file.
if [ -z "$plantuml_jar" ]; then
echo "Usage: plantuml_jar=/path/to/plantuml.jar $0 *.puml" >&2
exit 1
fi
java -jar "$plantuml_jar" -nometadata -tsvg "$@"
# vim:set noet sts=8 tw=80:

View File

@ -0,0 +1,45 @@
/'
' Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
'
' SPDX-License-Identifier: BSD-3-Clause
'/
@startuml
autonumber "<b>[#]</b>"
participant "SDEI client" as EL2
participant EL3
participant SEL1
activate EL2
EL2->EL3: **SDEI_EVENT_REGISTER**(ev, handler, ...)
EL3->EL2: success
EL2->EL3: **SDEI_EVENT_ENABLE**(ev)
EL3->EL2: success
EL2->EL3: **SDEI_PE_UNMASK**()
EL3->EL2: 1
... <<Business as usual>> ...
EL3<--]: **CRITICAL EVENT**
activate EL3 #red
note over EL3: Critical event triage
EL3->SEL1: dispatch
activate SEL1 #salmon
note over SEL1: Critical event handling
SEL1->EL3: done
deactivate SEL1
EL3-->EL3: sdei_dispatch_event(ev)
note over EL3: Prepare SDEI dispatch
EL3->EL2: dispatch
activate EL2 #salmon
note over EL2: SDEI handler
EL2->EL3: **SDEI_EVENT_COMPLETE()**
deactivate EL2
note over EL3: Complete SDEI dispatch
EL3->EL2: resumes preempted execution
deactivate EL3
... <<Normal execution resumes>> ...
@enduml

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 18 KiB

View File

@ -0,0 +1,43 @@
/'
' Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
'
' SPDX-License-Identifier: BSD-3-Clause
'/
@startuml
autonumber "<b>[#]</b>"
participant "SDEI client" as EL2
participant EL3
participant "SDEI interrupt source" as SDEI
activate EL2
EL2->EL3: **SDEI_INTERRUPT_BIND**(irq)
EL3->EL2: event number: ev
EL2->EL3: **SDEI_EVENT_REGISTER**(ev, handler, ...)
EL3->EL2: success
EL2->EL3: **SDEI_EVENT_ENABLE**(ev)
EL3->EL2: success
EL2->EL3: **SDEI_PE_UNMASK**()
EL3->EL2: 1
... <<Business as usual>> ...
SDEI-->EL3: SDEI interrupt
activate SDEI #salmon
activate EL3 #red
note over EL3: Prepare SDEI dispatch
EL3->EL2: dispatch
activate EL2 #salmon
note over EL2: SDEI handler
EL2->EL3: **SDEI_EVENT_COMPLETE()**
deactivate EL2
note over EL3: Complete SDEI dispatch
EL3-->SDEI: EOI
deactivate SDEI
EL3->EL2: resumes preempted execution
deactivate EL3
... <<Normal execution resumes>> ...
@enduml

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 17 KiB

View File

@ -292,6 +292,22 @@ inserts to order memory updates before updating mask, then writes to the GIC
*Priority Mask Register*, and make sure memory updates are visible before
potential trigger due to mask update.
Function: unsigned int plat_ic_get_interrupt_id(unsigned int raw); [optional]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
Argument : unsigned int
Return : unsigned int
This API should extract and return the interrupt number from the raw value
obtained by the acknowledging the interrupt (read using
``plat_ic_acknowledge_interrupt()``). If the interrupt ID is invalid, this API
should return ``INTR_ID_UNAVAILABLE``.
In case of ARM standard platforms using GIC, the implementation of the API
masks out the interrupt ID field from the acknowledged value from GIC.
----
*Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.*

View File

@ -1904,6 +1904,74 @@ calculated by the linker then a link time assertion is raised. A compile time
assertion is raised if the value of the constant is not aligned to the cache
line boundary.
SDEI porting requirements
~~~~~~~~~~~~~~~~~~~~~~~~~
The SDEI dispatcher requires the platform to provide the following macros
and functions, of which some are optional, and some others mandatory.
Macros
......
Macro: PLAT_SDEI_NORMAL_PRI [mandatory]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This macro must be defined to the EL3 exception priority level associated with
Normal SDEI events on the platform. This must have a higher value (therefore of
lower priority) than ``PLAT_SDEI_CRITICAL_PRI``.
Macro: PLAT_SDEI_CRITICAL_PRI [mandatory]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This macro must be defined to the EL3 exception priority level associated with
Critical SDEI events on the platform. This must have a lower value (therefore of
higher priority) than ``PLAT_SDEI_NORMAL_PRI``.
It's recommended that SDEI exception priorities in general are assigned the
lowest among Secure priorities. Among the SDEI exceptions, Critical SDEI
priority must be higher than Normal SDEI priority.
Functions
.........
Function: int plat_sdei_validate_entry_point(uintptr_t ep) [optional]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
::
Argument: uintptr_t
Return: int
This function validates the address of client entry points provided for both
event registration and *Complete and Resume* SDEI calls. The function takes one
argument, which is the address of the handler the SDEI client requested to
register. The function must return ``0`` for successful validation, or ``-1``
upon failure.
The default implementation always returns ``0``. On ARM platforms, this function
is implemented to translate the entry point to physical address, and further to
ensure that the address is located in Non-secure DRAM.
Function: void plat_sdei_handle_masked_trigger(uint64_t mpidr, unsigned int intr) [optional]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
::
Argument: uint64_t
Argument: unsigned int
Return: void
SDEI specification requires that a PE comes out of reset with the events masked.
The client therefore is expected to call ``PE_UNMASK`` to unmask SDEI events on
the PE. No SDEI events can be dispatched until such time.
Should a PE receive an interrupt that was bound to an SDEI event while the
events are masked on the PE, the dispatcher implementation invokes the function
``plat_sdei_handle_masked_trigger``. The MPIDR of the PE that received the
interrupt and the interrupt ID are passed as parameters.
The default implementation only prints out a warning message.
Power State Coordination Interface (in BL31)
--------------------------------------------
@ -2479,14 +2547,17 @@ Function : plat\_ic\_acknowledge\_interrupt() [mandatory]
Return : uint32_t
This API is used by the CPU to indicate to the platform IC that processing of
the highest pending interrupt has begun. It should return the id of the
interrupt which is being processed.
the highest pending interrupt has begun. It should return the raw, unmodified
value obtained from the interrupt controller when acknowledging an interrupt.
The actual interrupt number shall be extracted from this raw value using the API
`plat_ic_get_interrupt_id()`__.
.. __: platform-interrupt-controller-API.rst#function-unsigned-int-plat-ic-get-interrupt-id-unsigned-int-raw-optional
This function in ARM standard platforms using GICv2, reads the *Interrupt
Acknowledge Register* (``GICC_IAR``). This changes the state of the highest
priority pending interrupt from pending to active in the interrupt controller.
It returns the value read from the ``GICC_IAR``. This value is the id of the
interrupt whose state has been changed.
It returns the value read from the ``GICC_IAR``, unmodified.
In the case of ARM standard platforms using GICv3, if the API is invoked
from EL3, the function reads the system register ``ICC_IAR0_EL1``, *Interrupt
@ -2494,7 +2565,7 @@ Acknowledge Register group 0*. If the API is invoked from S-EL1, the function
reads the system register ``ICC_IAR1_EL1``, *Interrupt Acknowledge Register
group 1*. The read changes the state of the highest pending interrupt from
pending to active in the interrupt controller. The value read is returned
and is the id of the interrupt whose state has been changed.
unmodified.
The TSP uses this API to start processing of the secure physical timer
interrupt.

367
docs/sdei.rst Normal file
View File

@ -0,0 +1,367 @@
Software Delegated Exception Interface
======================================
.. section-numbering::
:suffix: .
.. contents::
:depth: 2
This document provides an overview of the SDEI dispatcher implementation in ARM
Trusted Firmware.
Introduction
------------
`Software Delegated Exception Interface`_ (SDEI) is an ARM specification for
Non-secure world to register handlers with firmware to receive notifications
about system events. Firmware will first receive the system events by way of
asynchronous exceptions and, in response, arranges for the registered handler to
execute in the Non-secure EL.
Normal world software that interacts with the SDEI dispatcher (makes SDEI
requests and receives notifications) is referred to as the *SDEI Client*. A
client receives the event notification at the registered handler even when it
was executing with exceptions masked. The list of SDEI events available to the
client are specific to the platform [#std-event]_. See also `Determining client
EL`_.
.. _general SDEI dispatch:
The following figure depicts a general sequence involving SDEI client executing
at EL2 and an event dispatch resulting from the triggering of a bound interrupt.
A commentary is provided below:
.. image:: plantuml/sdei_general.svg
As part of initialisation, the SDEI client binds a Non-secure interrupt [1], and
the SDEI dispatcher returns a platform dynamic event number [2]. The client then
registers a handler for that event [3], enables the event [5], and unmasks all
events on the current PE [7]. This sequence is typical of an SDEI client, but it
may involve additional SDEI calls.
At a later point in time, when the bound interrupt triggers [9], it's trapped to
EL3. The interrupt is handed over to the SDEI dispatcher, which then arranges to
execute the registered handler [10]. The client terminates its execution with
``SDEI_EVENT_COMPLETE`` [11], following which the dispatcher resumes the
original EL2 execution [13]. Note that the SDEI interrupt remains active until
the client handler completes, at which point EL3 does EOI [12].
SDEI events can be explicitly dispatched in response to other asynchronous
exceptions. See `Explicit dispatch of events`_.
The remainder of this document only discusses the design and implementation of
SDEI dispatcher in ARM Trusted Firmware, and assumes that the reader is familiar
with the SDEI specification, the interfaces, and their requirements.
.. [#std-event] Except event 0, which is defined by the SDEI specification as a
standard event.
Defining events
---------------
A platform choosing to include the SDEI dispatcher must also define the events
available on the platform, along with their attributes.
The platform is expected to provide two arrays of event descriptors: one for
private events, and another for shared events. The SDEI dispatcher provides
``SDEI_PRIVATE_EVENT()`` and ``SDEI_SHARED_EVENT()`` macros to populate the
event descriptors. Both macros take 3 arguments:
- The event number: this must be a positive 32-bit integer.
- The interrupt number the event is bound to:
- If it's not applicable to an event, this shall be left as ``0``.
- If the event is dynamic, this should be specified as ``SDEI_DYN_IRQ``.
- A bit map of `Event flags`_.
To define event 0, the macro ``SDEI_DEFINE_EVENT_0()`` should be used. This
macro takes only one parameter: an SGI number to signal other PEs.
Once the event descriptor arrays are defined, they should be exported to the
SDEI dispatcher using the ``REGISTER_SDEI_MAP()`` macro, passing it the pointers
to the private and shared event descriptor arrays, respectively. Note that the
``REGISTER_SDEI_MAP()`` macro must be used in the same file where the arrays are
defined.
Regarding event descriptors:
- For Event 0:
- There must be exactly one descriptor in the private array, and none in the
shared array.
- The event should be defined using ``SDEI_DEFINE_EVENT_0()``.
- Must be bound to a Secure SGI on the platform.
- Statically bound shared and private interrupts must be bound to shared and
private interrupts on the platform, respectively. See the section on
`interrupt configuration`__.
.. __: `Configuration within Exception Handling Framework`_
- Both arrays should be one-dimensional. The ``REGISTER_SDEI_MAP()`` macro
takes care of replicating private events for each PE on the platform.
- Both arrays must be sorted in the increasing order of event number.
The SDEI specification doesn't have provisions for discovery of available events
on the platform. The list of events made available to the client, along with
their semantics, have to be communicated out of band; for example, through
Device Trees or firmware configuration tables.
See also `Event definition example`_.
Event flags
~~~~~~~~~~~
Event flags describe the properties of the event. They are bit maps that can be
``OR``\ ed to form parameters to macros that `define events`__.
.. __: `Defining events`_
- ``SDEI_MAPF_DYNAMIC``: Marks the event as dynamic. Dynamic events can be
bound to (or released from) any Non-secure interrupt at runtime via. the
``SDEI_INTERRUPT_BIND`` and ``SDEI_INTERRUPT_RELEASE`` calls.
- ``SDEI_MAPF_BOUND``: Marks the event as statically bound to an interrupt.
These events cannot be re-bound at runtime.
- ``SDEI_MAPF_CRITICAL``: Marks the event as having *Critical* priority.
Without this flag, the event is assumed to have *Normal* priority.
Event definition example
------------------------
.. code:: c
static sdei_ev_map_t plat_private_sdei[] = {
/* Event 0 definition */
SDEI_DEFINE_EVENT_0(8),
/* PPI */
SDEI_PRIVATE_EVENT(8, 23, SDEI_MAPF_BOUND),
/* Dynamic private events */
SDEI_PRIVATE_EVENT(100, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
SDEI_PRIVATE_EVENT(101, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC)
};
/* Shared event mappings */
static sdei_ev_map_t plat_shared_sdei[] = {
SDEI_SHARED_EVENT(804, 0, SDEI_MAPF_DYNAMIC),
/* Dynamic shared events */
SDEI_SHARED_EVENT(3000, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
SDEI_SHARED_EVENT(3001, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC)
};
/* Export SDEI events */
REGISTER_SDEI_MAP(plat_private_sdei, plat_shared_sdei);
Configuration within Exception Handling Framework
-------------------------------------------------
The SDEI dispatcher functions alongside the Exception Handling Framework. This
means that the platform must assign priorities to both Normal and Critical SDEI
interrupts for the platform:
- Install priority descriptors for Normal and Critical SDEI interrupts.
- For those interrupts that are statically bound (i.e. events defined as having
the ``SDEI_MAPF_BOUND`` property), enumerate their properties for the GIC
driver to configure interrupts accordingly.
The interrupts must be configured to target EL3. This means that they should
be configured as *Group 0*. Additionally, on GICv2 systems, the build option
``GICV2_G0_FOR_EL3`` must be set to ``1``.
See also `SDEI porting requirements`_.
Determining client EL
---------------------
The SDEI specification requires that the *physical* SDEI client executes in the
highest Non-secure EL implemented on the system. This means that the dispatcher
will only allow SDEI calls to be made from:
- EL2, if EL2 is implemented. The Hypervisor is expected to implement a
*virtual* SDEI dispatcher to support SDEI clients in Guest Operating Systems
executing in Non-secure EL1.
- Non-secure EL1, if EL2 is not implemented or disabled.
See the function ``sdei_client_el()`` in ``sdei_private.h``.
Explicit dispatch of events
---------------------------
Typically, an SDEI event dispatch is caused by the PE receiving interrupts that
are bound to an SDEI event. However, there are cases where the Secure world
requires dispatch of an SDEI event as a direct or indirect result of a past
activity, viz. receiving a Secure interrupt or an exception.
The SDEI dispatcher implementation provides ``sdei_dispatch_event()`` API for
this purpose. The API has the following signature:
::
int sdei_dispatch_event(int ev_num, unsigned int preempted_sec_state);
- The parameter ``ev_num`` is the event number to dispatch;
- The parameter ``preempted_sec_state`` indicates the context that was
preempted. This must be either ``SECURE`` or ``NON_SECURE``.
The API returns ``0`` on success, or ``-1`` on failure.
The following figure depicts a scenario involving explicit dispatch of SDEI
event. A commentary is provided below:
.. image:: plantuml/sdei_explicit_dispatch.svg
As part of initialisation, the SDEI client registers a handler for a platform
event [1], enables the event [3], and unmasks the current PE [5]. Note that,
unlike in `general SDEI dispatch`_, this doesn't involve interrupt binding, as
bound or dynamic events can't be explicitly dispatched (see the section below).
At a later point in time, a critical event [#critical-event]_ is trapped into
EL3 [7]. EL3 performs a first-level triage of the event, and decides to dispatch
to Secure EL1 for further handling [8]. The dispatch completes, but intends to
involve Non-secure world in further handling, and therefore decides to
explicitly dispatch an event [10] (which the client had already registered for
[1]). The rest of the sequence is similar to that in the `general SDEI
dispatch`_: the requested event is dispatched to the client (assuming all the
conditions are met), and when the handler completes, the preempted execution
resumes.
Conditions for event dispatch
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
All of the following requirements must be met for the API to return ``0`` and
event to be dispatched:
- SDEI events must be unmasked on the PE. I.e. the client must have called
``PE_UNMASK`` beforehand.
- Event 0 can't be dispatched.
- The event must neither be a dynamic event nor be bound to an interrupt.
- The event must be private to the PE.
- The event must have been registered for and enabled.
- A dispatch for the same event must not be outstanding. I.e. it hasn't already
been dispatched and is yet to be completed.
- The priority of the event (either Critical or Normal, as configured by the
platform at build-time) shouldn't cause priority inversion. This means:
- If it's of Normal priority, neither Normal nor Critical priority dispatch
must be outstanding on the PE.
- If it's of a Critical priority, no Critical priority dispatch must be
outstanding on the PE.
Further, the caller should be aware of the following assumptions made by the
dispatcher:
- The caller of the API is a component running in EL3; for example, the *Secure
Partition Manager*.
- The requested dispatch will be permitted by the Exception Handling Framework.
I.e. the caller must make sure that the requested dispatch has sufficient
priority so as not to cause priority level inversion within Exception
Handling Framework.
- At the time of the call, the active context is Secure, and it has been saved.
- Upon returning success, the Non-secure context will be restored and setup for
the event dispatch, and it will be the active context. The Non-secure context
should not be modified further by the caller.
- The API returning success only means that the dispatch is scheduled at the
next ``ERET``, and not immediately performed. Also, the caller must be
prepared for this API to return failure and handle accordingly.
- Upon completing the event (i.e. when the client calls either
``SDEI_EVENT_COMPLETE`` or ``SDEI_COMPLETE_AND_RESUME``), the preempted
context is resumed (as indicated by the ``preempted_sec_state`` parameter of
the API).
.. [#critical-event] Examples of critical event are *SError*, *Synchronous
External Abort*, *Fault Handling interrupt*, or *Error
Recovery interrupt* from one of RAS nodes in the system.
Porting requirements
--------------------
The porting requirements of the SDEI dispatcher are outlined in the `porting
guide`__.
.. __: `SDEI porting requirements`_
Note on writing SDEI event handlers
-----------------------------------
*This section pertains to SDEI event handlers in general, not just when using
ARM Trusted Firmware SDEI dispatcher.*
The SDEI specification requires that event handlers preserve the contents of all
registers except ``x0`` to ``x17``. This has significance if event handler is
written in C: compilers typically adjust the stack frame at the beginning and
end of C functions. For example, AArch64 GCC typically produces the following
function prologue and epilogue:
::
c_event_handler:
stp x29, x30, [sp,#-32]!
mov x29, sp
...
bl ...
...
ldp x29, x30, [sp],#32
ret
The register ``x29`` is used as frame pointer in the prologue. Because neither a
valid ``SDEI_EVENT_COMPLETE`` nor ``SDEI_EVENT_COMPLETE_AND_RESUME`` calls
return to the handler, the epilogue never gets executed, and registers ``x29``
and ``x30`` (in the case above) are inadvertently corrupted. This violates the
SDEI specification, and the normal execution thereafter will result in
unexpected behaviour.
To work this around, it's advised that the top-level event handlers are
implemented in assembly, following a similar pattern as below:
::
asm_event_handler:
/* Save link register whilst maintaining stack alignment */
stp xzr, x30, [sp, #-16]!
bl c_event_handler
/* Restore link register */
ldp xzr, x30, [sp], #16
/* Complete call */
ldr x0, =SDEI_EVENT_COMPLETE
smc #0
b .
----
*Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.*
.. _SDEI specification: http://infocenter.arm.com/help/topic/com.arm.doc.den0054a/ARM_DEN0054A_Software_Delegated_Exception_Interface.pdf
.. _SDEI porting requirements: porting-guide.rst#sdei-porting-requirements

View File

@ -361,6 +361,11 @@ Common build options
Firmware as error. It can take the value 1 (flag the use of deprecated
APIs as error) or 0. The default is 0.
- ``EL3_EXCEPTION_HANDLING``: When set to ``1``, enable handling of exceptions
targeted at EL3. When set ``0`` (default), no exceptions are expected or
handled at EL3, and a panic will result. This is supported only for AArch64
builds.
- ``FIP_NAME``: This is an optional build option which specifies the FIP
filename for the ``fip`` target. Default is ``fip.bin``.
@ -529,6 +534,12 @@ Common build options
optional. It is only needed if the platform makefile specifies that it
is required in order to build the ``fwu_fip`` target.
- ``SDEI_SUPPORT``: Setting this to ``1`` enables support for Software
Delegated Exception Interface to BL31 image. This defaults to ``0``.
When set to ``1``, the build option ``EL3_EXCEPTION_HANDLING`` must also be
set to ``1``.
- ``SEPARATE_CODE_AND_RODATA``: Whether code and read-only data should be
isolated on separate memory pages. This is a trade-off between security and
memory usage. See "Isolating code and read-only data on separate memory

View File

@ -72,6 +72,8 @@ void gicv2_cpuif_disable(void)
******************************************************************************/
void gicv2_pcpu_distif_init(void)
{
unsigned int ctlr;
assert(driver_data);
assert(driver_data->gicd_base);
@ -89,6 +91,13 @@ void gicv2_pcpu_distif_init(void)
driver_data->g0_interrupt_array);
}
#endif
/* Enable G0 interrupts if not already */
ctlr = gicd_read_ctlr(driver_data->gicd_base);
if ((ctlr & CTLR_ENABLE_G0_BIT) == 0) {
gicd_write_ctlr(driver_data->gicd_base,
ctlr | CTLR_ENABLE_G0_BIT);
}
}
/*******************************************************************************
@ -308,9 +317,26 @@ void gicv2_set_pe_target_mask(unsigned int proc_num)
if (driver_data->target_masks[proc_num])
return;
/* Read target register corresponding to this CPU */
/*
* Update target register corresponding to this CPU and flush for it to
* be visible to other CPUs.
*/
if (driver_data->target_masks[proc_num] == 0) {
driver_data->target_masks[proc_num] =
gicv2_get_cpuif_id(driver_data->gicd_base);
#if !HW_ASSISTED_COHERENCY
/*
* PEs only update their own masks. Primary updates it with
* caches on. But because secondaries does it with caches off,
* all updates go to memory directly, and there's no danger of
* secondaries overwriting each others' mask, despite
* target_masks[] not being cache line aligned.
*/
flush_dcache_range((uintptr_t)
&driver_data->target_masks[proc_num],
sizeof(driver_data->target_masks[proc_num]));
#endif
}
}
/*******************************************************************************

View File

@ -541,12 +541,13 @@ void gicv3_secure_ppi_sgi_configure(uintptr_t gicr_base,
/*******************************************************************************
* Helper function to configure properties of secure G0 and G1S PPIs and SGIs.
******************************************************************************/
void gicv3_secure_ppi_sgi_configure_props(uintptr_t gicr_base,
unsigned int gicv3_secure_ppi_sgi_configure_props(uintptr_t gicr_base,
const interrupt_prop_t *interrupt_props,
unsigned int interrupt_props_num)
{
unsigned int i;
const interrupt_prop_t *current_prop;
unsigned int ctlr_enable = 0;
/* Make sure there's a valid property array */
assert(interrupt_props != NULL);
@ -564,10 +565,13 @@ void gicv3_secure_ppi_sgi_configure_props(uintptr_t gicr_base,
/* Configure this interrupt as G0 or a G1S interrupt */
assert((current_prop->intr_grp == INTR_GROUP0) ||
(current_prop->intr_grp == INTR_GROUP1S));
if (current_prop->intr_grp == INTR_GROUP1S)
if (current_prop->intr_grp == INTR_GROUP1S) {
gicr_set_igrpmodr0(gicr_base, current_prop->intr_num);
else
ctlr_enable |= CTLR_ENABLE_G1S_BIT;
} else {
gicr_clr_igrpmodr0(gicr_base, current_prop->intr_num);
ctlr_enable |= CTLR_ENABLE_G0_BIT;
}
/* Set the priority of this interrupt */
gicr_set_ipriorityr(gicr_base, current_prop->intr_num,
@ -586,4 +590,6 @@ void gicv3_secure_ppi_sgi_configure_props(uintptr_t gicr_base,
/* Enable this interrupt */
gicr_set_isenabler0(gicr_base, current_prop->intr_num);
}
return ctlr_enable;
}

View File

@ -224,12 +224,16 @@ void gicv3_distif_init(void)
void gicv3_rdistif_init(unsigned int proc_num)
{
uintptr_t gicr_base;
unsigned int bitmap = 0;
uint32_t ctlr;
assert(gicv3_driver_data);
assert(proc_num < gicv3_driver_data->rdistif_num);
assert(gicv3_driver_data->rdistif_base_addrs);
assert(gicv3_driver_data->gicd_base);
assert(gicd_read_ctlr(gicv3_driver_data->gicd_base) & CTLR_ARE_S_BIT);
ctlr = gicd_read_ctlr(gicv3_driver_data->gicd_base);
assert(ctlr & CTLR_ARE_S_BIT);
assert(IS_IN_EL3());
@ -244,7 +248,7 @@ void gicv3_rdistif_init(unsigned int proc_num)
#if !ERROR_DEPRECATED
if (gicv3_driver_data->interrupt_props != NULL) {
#endif
gicv3_secure_ppi_sgi_configure_props(gicr_base,
bitmap = gicv3_secure_ppi_sgi_configure_props(gicr_base,
gicv3_driver_data->interrupt_props,
gicv3_driver_data->interrupt_props_num);
#if !ERROR_DEPRECATED
@ -258,6 +262,7 @@ void gicv3_rdistif_init(unsigned int proc_num)
gicv3_driver_data->g1s_interrupt_num,
gicv3_driver_data->g1s_interrupt_array,
INTR_GROUP1S);
bitmap |= CTLR_ENABLE_G1S_BIT;
}
/* Configure the G0 SGIs/PPIs */
@ -266,9 +271,14 @@ void gicv3_rdistif_init(unsigned int proc_num)
gicv3_driver_data->g0_interrupt_num,
gicv3_driver_data->g0_interrupt_array,
INTR_GROUP0);
bitmap |= CTLR_ENABLE_G0_BIT;
}
}
#endif
/* Enable interrupt groups as required, if not already */
if ((ctlr & bitmap) != bitmap)
gicd_set_ctlr(gicv3_driver_data->gicd_base, bitmap, RWP_TRUE);
}
/*******************************************************************************

View File

@ -95,7 +95,7 @@ void gicv3_secure_ppi_sgi_configure(uintptr_t gicr_base,
const unsigned int *sec_intr_list,
unsigned int int_grp);
#endif
void gicv3_secure_ppi_sgi_configure_props(uintptr_t gicr_base,
unsigned int gicv3_secure_ppi_sgi_configure_props(uintptr_t gicr_base,
const interrupt_prop_t *interrupt_props,
unsigned int interrupt_props_num);
unsigned int gicv3_secure_spis_configure_props(uintptr_t gicd_base,

90
include/bl31/ehf.h Normal file
View File

@ -0,0 +1,90 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __EHF_H__
#define __EHF_H__
#ifndef __ASSEMBLY__
#include <stdint.h>
#include <utils_def.h>
/* Valid priorities set bit 0 of the priority handler. */
#define _EHF_PRI_VALID (((uintptr_t) 1) << 0)
/* Marker for no handler registered for a valid priority */
#define _EHF_NO_HANDLER (0 | _EHF_PRI_VALID)
/* Extract the specified number of top bits from 7 lower bits of priority */
#define EHF_PRI_TO_IDX(pri, plat_bits) \
((pri & 0x7f) >> (7 - plat_bits))
/* Install exception priority descriptor at a suitable index */
#define EHF_PRI_DESC(plat_bits, priority) \
[EHF_PRI_TO_IDX(priority, plat_bits)] = { \
.ehf_handler = _EHF_NO_HANDLER, \
}
/* Macro for platforms to regiter its exception priorities */
#define EHF_REGISTER_PRIORITIES(priorities, num, bits) \
const ehf_priorities_t exception_data = { \
.num_priorities = num, \
.ehf_priorities = priorities, \
.pri_bits = bits, \
}
/*
* Priority stack, managed as a bitmap.
*
* Currently only supports 32 priority levels, allowing platforms to use up to 5
* top bits of priority. But the type can be changed to uint64_t should need
* arise to support 64 priority levels, allowing platforms to use up to 6 top
* bits of priority.
*/
typedef uint32_t ehf_pri_bits_t;
/*
* Per-PE exception data. The data for each PE is kept as a per-CPU data field.
* See cpu_data.h.
*/
typedef struct {
ehf_pri_bits_t active_pri_bits;
/* Priority mask value before any priority levels were active */
uint8_t init_pri_mask;
/* Non-secure priority mask value stashed during Secure execution */
uint8_t ns_pri_mask;
} __aligned(sizeof(uint64_t)) pe_exc_data_t;
typedef int (*ehf_handler_t)(uint32_t intr_raw, uint32_t flags, void *handle,
void *cookie);
typedef struct ehf_pri_desc {
/*
* 4-byte-aligned exception handler. Bit 0 indicates the corresponding
* priority level is valid. This is effectively of ehf_handler_t type,
* but left as uintptr_t in order to make pointer arithmetic convenient.
*/
uintptr_t ehf_handler;
} ehf_pri_desc_t;
typedef struct ehf_priorities {
ehf_pri_desc_t *ehf_priorities;
unsigned int num_priorities;
int pri_bits;
} ehf_priorities_t;
void ehf_init(void);
void ehf_activate_priority(unsigned int priority);
void ehf_deactivate_priority(unsigned int priority);
void ehf_register_priority_handler(unsigned int pri, ehf_handler_t handler);
void ehf_allow_ns_preemption(void);
unsigned int ehf_is_ns_preemption_allowed(void);
#endif /* __ASSEMBLY__ */
#endif /* __EHF_H__ */

View File

@ -34,10 +34,10 @@
#define GIC_INTR_CFG_EDGE 1
/* Constants to categorise priorities */
#define GIC_HIGHEST_SEC_PRIORITY 0
#define GIC_LOWEST_SEC_PRIORITY 127
#define GIC_HIGHEST_NS_PRIORITY 128
#define GIC_LOWEST_NS_PRIORITY 254 /* 255 would disable an interrupt */
#define GIC_HIGHEST_SEC_PRIORITY 0x0
#define GIC_LOWEST_SEC_PRIORITY 0x7f
#define GIC_HIGHEST_NS_PRIORITY 0x80
#define GIC_LOWEST_NS_PRIORITY 0xfe /* 0xff would disable all interrupts */
/*******************************************************************************
* GIC Distributor interface register offsets that are common to GICv3 & GICv2

View File

@ -245,6 +245,9 @@
#define GICR_NUM_REGS(reg_name) \
DIV_ROUND_UP_2EVAL(TOTAL_PCPU_INTR_NUM, (1 << reg_name ## _SHIFT))
/* Interrupt ID mask for HPPIR, AHPPIR, IAR and AIAR CPU Interface registers */
#define INT_ID_MASK 0xffffff
/*******************************************************************************
* This structure describes some of the implementation defined attributes of the
* GICv3 IP. It is used by the platform port to specify these attributes in order

View File

@ -598,4 +598,10 @@
#define MAKE_MAIR_NORMAL_MEMORY(inner, outer) ((inner) | ((outer) << MAIR_NORM_OUTER_SHIFT))
/* PAR_EL1 fields */
#define PAR_F_SHIFT 0
#define PAR_F_MASK 1
#define PAR_ADDR_SHIFT 12
#define PAR_ADDR_MASK (BIT(40) - 1) /* 40-bits-wide page address */
#endif /* __ARCH_H__ */

View File

@ -155,6 +155,7 @@ DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1r)
DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e1w)
DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0r)
DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s12e0w)
DEFINE_SYSOP_TYPE_PARAM_FUNC(at, s1e2r)
void flush_dcache_range(uintptr_t addr, size_t size);
void clean_dcache_range(uintptr_t addr, size_t size);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -7,6 +7,7 @@
#ifndef __CPU_DATA_H__
#define __CPU_DATA_H__
#include <ehf.h>
#include <platform_def.h> /* CACHE_WRITEBACK_GRANULE required */
#ifdef AARCH32
@ -96,6 +97,9 @@ typedef struct cpu_data {
#if PLAT_PCPU_DATA_SIZE
uint8_t platform_cpu_data[PLAT_PCPU_DATA_SIZE];
#endif
#if defined(IMAGE_BL31) && EL3_EXCEPTION_HANDLING
pe_exc_data_t ehf_data;
#endif
} __aligned(CACHE_WRITEBACK_GRANULE) cpu_data_t;
#if CRASH_REPORTING

View File

@ -202,7 +202,7 @@
GIC_INTR_CFG_EDGE)
#define ARM_G0_IRQ_PROPS(grp) \
INTR_PROP_DESC(ARM_IRQ_SEC_SGI_0, GIC_HIGHEST_SEC_PRIORITY, grp, \
INTR_PROP_DESC(ARM_IRQ_SEC_SGI_0, PLAT_SDEI_NORMAL_PRI, grp, \
GIC_INTR_CFG_EDGE), \
INTR_PROP_DESC(ARM_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_EDGE)
@ -454,5 +454,24 @@
*/
#define PLAT_PERCPU_BAKERY_LOCK_SIZE (1 * CACHE_WRITEBACK_GRANULE)
/* Priority levels for ARM platforms */
#define PLAT_SDEI_CRITICAL_PRI 0x60
#define PLAT_SDEI_NORMAL_PRI 0x70
/* ARM platforms use 3 upper bits of secure interrupt priority */
#define ARM_PRI_BITS 3
/* SGI used for SDEI signalling */
#define ARM_SDEI_SGI ARM_IRQ_SEC_SGI_0
/* ARM SDEI dynamic private event numbers */
#define ARM_SDEI_DP_EVENT_0 1000
#define ARM_SDEI_DP_EVENT_1 1001
#define ARM_SDEI_DP_EVENT_2 1002
/* ARM SDEI dynamic shared event numbers */
#define ARM_SDEI_DS_EVENT_0 2000
#define ARM_SDEI_DS_EVENT_1 2001
#define ARM_SDEI_DS_EVENT_2 2002
#endif /* __ARM_DEF_H__ */

View File

@ -119,6 +119,7 @@ void arm_configure_sys_timer(void);
/* PM utility functions */
int arm_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state);
int arm_validate_psci_entrypoint(uintptr_t entrypoint);
int arm_validate_ns_entrypoint(uintptr_t entrypoint);
void arm_system_pwr_domain_save(void);
void arm_system_pwr_domain_resume(void);

View File

@ -90,6 +90,7 @@ void plat_ic_set_spi_routing(unsigned int id, unsigned int routing_mode,
void plat_ic_set_interrupt_pending(unsigned int id);
void plat_ic_clear_interrupt_pending(unsigned int id);
unsigned int plat_ic_set_priority_mask(unsigned int mask);
unsigned int plat_ic_get_interrupt_id(unsigned int raw);
/*******************************************************************************
* Optional common functions (may be overridden)
@ -113,6 +114,16 @@ void bl1_plat_arch_setup(void);
void bl1_platform_setup(void);
struct meminfo *bl1_plat_sec_mem_layout(void);
/*******************************************************************************
* Optional EL3 component functions in BL31
******************************************************************************/
/* SDEI platform functions */
#if SDEI_SUPPORT
int plat_sdei_validate_entry_point(uintptr_t ep, unsigned int client_mode);
void plat_sdei_handle_masked_trigger(uint64_t mpidr, unsigned int intr);
#endif
/*
* The following function is mandatory when the
* firmware update feature is used.

181
include/services/sdei.h Normal file
View File

@ -0,0 +1,181 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __SDEI_H__
#define __SDEI_H__
#include <spinlock.h>
#include <utils_def.h>
/* Range 0xC4000020 - 0xC400003F reserved for SDE 64bit smc calls */
#define SDEI_VERSION 0xC4000020
#define SDEI_EVENT_REGISTER 0xC4000021
#define SDEI_EVENT_ENABLE 0xC4000022
#define SDEI_EVENT_DISABLE 0xC4000023
#define SDEI_EVENT_CONTEXT 0xC4000024
#define SDEI_EVENT_COMPLETE 0xC4000025
#define SDEI_EVENT_COMPLETE_AND_RESUME 0xC4000026
#define SDEI_EVENT_UNREGISTER 0xC4000027
#define SDEI_EVENT_STATUS 0xC4000028
#define SDEI_EVENT_GET_INFO 0xC4000029
#define SDEI_EVENT_ROUTING_SET 0xC400002A
#define SDEI_PE_MASK 0xC400002B
#define SDEI_PE_UNMASK 0xC400002C
#define SDEI_INTERRUPT_BIND 0xC400002D
#define SDEI_INTERRUPT_RELEASE 0xC400002E
#define SDEI_EVENT_SIGNAL 0xC400002F
#define SDEI_FEATURES 0xC4000030
#define SDEI_PRIVATE_RESET 0xC4000031
#define SDEI_SHARED_RESET 0xC4000032
/* SDEI_EVENT_REGISTER flags */
#define SDEI_REGF_RM_ANY 0
#define SDEI_REGF_RM_PE 1
/* SDEI_EVENT_COMPLETE status flags */
#define SDEI_EV_HANDLED 0
#define SDEI_EV_FAILED 1
/* SDE event status values in bit position */
#define SDEI_STATF_REGISTERED 0
#define SDEI_STATF_ENABLED 1
#define SDEI_STATF_RUNNING 2
/* Internal: SDEI flag bit positions */
#define _SDEI_MAPF_DYNAMIC_SHIFT 1
#define _SDEI_MAPF_BOUND_SHIFT 2
#define _SDEI_MAPF_SIGNALABLE_SHIFT 3
#define _SDEI_MAPF_PRIVATE_SHIFT 4
#define _SDEI_MAPF_CRITICAL_SHIFT 5
/* SDEI event 0 */
#define SDEI_EVENT_0 0
/* Placeholder interrupt for dynamic mapping */
#define SDEI_DYN_IRQ 0
/* SDEI flags */
/*
* These flags determine whether or not an event can be associated with an
* interrupt. Static events are permanently associated with an interrupt, and
* can't be changed at runtime. Association of dynamic events with interrupts
* can be changed at run time using the SDEI_INTERRUPT_BIND and
* SDEI_INTERRUPT_RELEASE calls.
*
* SDEI_MAPF_DYNAMIC only indicates run time configurability, where as
* SDEI_MAPF_BOUND indicates interrupt association. For example:
*
* - Calling SDEI_INTERRUPT_BIND on a dynamic event will have both
* SDEI_MAPF_DYNAMIC and SDEI_MAPF_BOUND set.
*
* - Statically-bound events will always have SDEI_MAPF_BOUND set, and neither
* SDEI_INTERRUPT_BIND nor SDEI_INTERRUPT_RELEASE can be called on them.
*
* See also the is_map_bound() macro.
*/
#define SDEI_MAPF_DYNAMIC BIT(_SDEI_MAPF_DYNAMIC_SHIFT)
#define SDEI_MAPF_BOUND BIT(_SDEI_MAPF_BOUND_SHIFT)
#define SDEI_MAPF_SIGNALABLE BIT(_SDEI_MAPF_SIGNALABLE_SHIFT)
#define SDEI_MAPF_PRIVATE BIT(_SDEI_MAPF_PRIVATE_SHIFT)
#define SDEI_MAPF_CRITICAL BIT(_SDEI_MAPF_CRITICAL_SHIFT)
/* Indices of private and shared mappings */
#define _SDEI_MAP_IDX_PRIV 0
#define _SDEI_MAP_IDX_SHRD 1
#define _SDEI_MAP_IDX_MAX 2
/* The macros below are used to identify SDEI calls from the SMC function ID */
#define SDEI_FID_MASK U(0xffe0)
#define SDEI_FID_VALUE U(0x20)
#define is_sdei_fid(_fid) \
((((_fid) & SDEI_FID_MASK) == SDEI_FID_VALUE) && \
(((_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64))
#define SDEI_EVENT_MAP(_event, _intr, _flags) \
{ \
.ev_num = _event, \
.intr = _intr, \
.map_flags = _flags \
}
#define SDEI_SHARED_EVENT(_event, _intr, _flags) \
SDEI_EVENT_MAP(_event, _intr, _flags)
#define SDEI_PRIVATE_EVENT(_event, _intr, _flags) \
SDEI_EVENT_MAP(_event, _intr, _flags | SDEI_MAPF_PRIVATE)
#define SDEI_DEFINE_EVENT_0(_intr) \
SDEI_PRIVATE_EVENT(SDEI_EVENT_0, _intr, SDEI_MAPF_SIGNALABLE)
/*
* Declare shared and private entries for each core. Also declare a global
* structure containing private and share entries.
*
* This macro must be used in the same file as the platform SDEI mappings are
* declared. Only then would ARRAY_SIZE() yield a meaningful value.
*/
#define REGISTER_SDEI_MAP(_private, _shared) \
sdei_entry_t sdei_private_event_table \
[PLATFORM_CORE_COUNT * ARRAY_SIZE(_private)]; \
sdei_entry_t sdei_shared_event_table[ARRAY_SIZE(_shared)]; \
const sdei_mapping_t sdei_global_mappings[] = { \
[_SDEI_MAP_IDX_PRIV] = { \
.map = _private, \
.num_maps = ARRAY_SIZE(_private) \
}, \
[_SDEI_MAP_IDX_SHRD] = { \
.map = _shared, \
.num_maps = ARRAY_SIZE(_shared) \
}, \
}
typedef uint8_t sdei_state_t;
/* Runtime data of SDEI event */
typedef struct sdei_entry {
uint64_t ep; /* Entry point */
uint64_t arg; /* Entry point argument */
uint64_t affinity; /* Affinity of shared event */
unsigned int reg_flags; /* Registration flags */
/* Event handler states: registered, enabled, running */
sdei_state_t state;
} sdei_entry_t;
/* Mapping of SDEI events to interrupts, and associated data */
typedef struct sdei_ev_map {
int32_t ev_num; /* Event number */
unsigned int intr; /* Physical interrupt number for a bound map */
unsigned int map_flags; /* Mapping flags, see SDEI_MAPF_* */
unsigned int reg_count; /* Registration count */
spinlock_t lock; /* Per-event lock */
} sdei_ev_map_t;
typedef struct sdei_mapping {
sdei_ev_map_t *map;
size_t num_maps;
} sdei_mapping_t;
/* Handler to be called to handle SDEI smc calls */
uint64_t sdei_smc_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
uint64_t x3,
uint64_t x4,
void *cookie,
void *handle,
uint64_t flags);
void sdei_init(void);
/* Public API to dispatch an event to Normal world */
int sdei_dispatch_event(int ev_num, unsigned int preempted_sec_state);
#endif /* __SDEI_H__ */

View File

@ -62,6 +62,9 @@ ENABLE_RUNTIME_INSTRUMENTATION := 0
# Flag to enable stack corruption protection
ENABLE_STACK_PROTECTOR := 0
# Flag to enable exception handling in EL3
EL3_EXCEPTION_HANDLING := 0
# Build flag to treat usage of deprecated platform and framework APIs as error.
ERROR_DEPRECATED := 0
@ -111,6 +114,9 @@ RESET_TO_BL31 := 0
# For Chain of Trust
SAVE_KEYS := 0
# Software Delegated Exception support
SDEI_SUPPORT := 0
# Whether code and read-only data should be put on separate memory pages. The
# platform Makefile is free to override this value.
SEPARATE_CODE_AND_RODATA := 0

View File

@ -398,7 +398,7 @@ plat_psci_ops_t plat_arm_psci_pm_ops = {
.system_off = fvp_system_off,
.system_reset = fvp_system_reset,
.validate_power_state = fvp_validate_power_state,
.validate_ns_entrypoint = arm_validate_ns_entrypoint,
.validate_ns_entrypoint = arm_validate_psci_entrypoint,
.translate_power_state_by_mpidr = fvp_translate_power_state_by_mpidr,
.get_node_hw_state = fvp_node_hw_state,
.get_sys_suspend_power_state = fvp_get_sys_suspend_power_state,

View File

@ -0,0 +1,24 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <ehf.h>
#include <platform_def.h>
/*
* Enumeration of priority levels on ARM platforms.
*/
ehf_pri_desc_t arm_exceptions[] = {
#if SDEI_SUPPORT
/* Critical priority SDEI */
EHF_PRI_DESC(ARM_PRI_BITS, PLAT_SDEI_CRITICAL_PRI),
/* Normal priority SDEI */
EHF_PRI_DESC(ARM_PRI_BITS, PLAT_SDEI_NORMAL_PRI),
#endif
};
/* Plug in ARM exceptions to Exception Handling Framework. */
EHF_REGISTER_PRIORITIES(arm_exceptions, ARRAY_SIZE(arm_exceptions), ARM_PRI_BITS);

View File

@ -0,0 +1,33 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/* SDEI configuration for ARM platforms */
#include <ehf.h>
#include <platform_def.h>
#include <sdei.h>
/* Private event mappings */
static sdei_ev_map_t arm_private_sdei[] = {
/* Event 0 */
SDEI_DEFINE_EVENT_0(ARM_SDEI_SGI),
/* Dynamic private events */
SDEI_PRIVATE_EVENT(ARM_SDEI_DP_EVENT_0, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
SDEI_PRIVATE_EVENT(ARM_SDEI_DP_EVENT_1, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
SDEI_PRIVATE_EVENT(ARM_SDEI_DP_EVENT_2, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
};
/* Shared event mappings */
static sdei_ev_map_t arm_shared_sdei[] = {
/* Dynamic shared events */
SDEI_SHARED_EVENT(ARM_SDEI_DS_EVENT_0, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
SDEI_SHARED_EVENT(ARM_SDEI_DS_EVENT_1, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
SDEI_SHARED_EVENT(ARM_SDEI_DS_EVENT_2, SDEI_DYN_IRQ, SDEI_MAPF_DYNAMIC),
};
/* Export ARM SDEI events */
REGISTER_SDEI_MAP(arm_private_sdei, arm_shared_sdei);

View File

@ -204,3 +204,51 @@ unsigned int plat_get_syscnt_freq2(void)
}
#endif /* ARM_SYS_CNTCTL_BASE */
#if SDEI_SUPPORT
/*
* Translate SDEI entry point to PA, and perform standard ARM entry point
* validation on it.
*/
int plat_sdei_validate_entry_point(uintptr_t ep, unsigned int client_mode)
{
uint64_t par, pa;
uint32_t scr_el3;
/* Doing Non-secure address translation requires SCR_EL3.NS set */
scr_el3 = read_scr_el3();
write_scr_el3(scr_el3 | SCR_NS_BIT);
isb();
assert((client_mode == MODE_EL2) || (client_mode == MODE_EL1));
if (client_mode == MODE_EL2) {
/*
* Translate entry point to Physical Address using the EL2
* translation regime.
*/
ats1e2r(ep);
} else {
/*
* Translate entry point to Physical Address using the EL1&0
* translation regime, including stage 2.
*/
ats12e1r(ep);
}
isb();
par = read_par_el1();
/* Restore original SCRL_EL3 */
write_scr_el3(scr_el3);
isb();
/* If the translation resulted in fault, return failure */
if ((par & PAR_F_MASK) != 0)
return -1;
/* Extract Physical Address from PAR */
pa = (par & (PAR_ADDR_MASK << PAR_ADDR_SHIFT));
/* Perform NS entry point validation on the physical address */
return arm_validate_ns_entrypoint(pa);
}
#endif

View File

@ -184,6 +184,14 @@ BL31_SOURCES += plat/arm/common/arm_sip_svc.c \
lib/pmf/pmf_smc.c
endif
ifeq (${EL3_EXCEPTION_HANDLING},1)
BL31_SOURCES += plat/arm/common/aarch64/arm_ehf.c
endif
ifeq (${SDEI_SUPPORT},1)
BL31_SOURCES += plat/arm/common/aarch64/arm_sdei.c
endif
ifneq (${TRUSTED_BOARD_BOOT},0)
# Include common TBB sources

View File

@ -51,6 +51,7 @@ void plat_arm_gic_init(void)
{
gicv2_distif_init();
gicv2_pcpu_distif_init();
gicv2_set_pe_target_mask(plat_my_core_pos());
gicv2_cpuif_enable();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015-2016, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2015-2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -112,7 +112,7 @@ int arm_validate_power_state(unsigned int power_state,
/*******************************************************************************
* ARM standard platform handler called to check the validity of the non secure
* entrypoint.
* entrypoint. Returns 0 if the entrypoint is valid, or -1 otherwise.
******************************************************************************/
int arm_validate_ns_entrypoint(uintptr_t entrypoint)
{
@ -121,15 +121,23 @@ int arm_validate_ns_entrypoint(uintptr_t entrypoint)
* secure DRAM.
*/
if ((entrypoint >= ARM_NS_DRAM1_BASE) && (entrypoint <
(ARM_NS_DRAM1_BASE + ARM_NS_DRAM1_SIZE)))
return PSCI_E_SUCCESS;
(ARM_NS_DRAM1_BASE + ARM_NS_DRAM1_SIZE))) {
return 0;
}
#ifndef AARCH32
if ((entrypoint >= ARM_DRAM2_BASE) && (entrypoint <
(ARM_DRAM2_BASE + ARM_DRAM2_SIZE)))
return PSCI_E_SUCCESS;
(ARM_DRAM2_BASE + ARM_DRAM2_SIZE))) {
return 0;
}
#endif
return PSCI_E_INVALID_ADDRESS;
return -1;
}
int arm_validate_psci_entrypoint(uintptr_t entrypoint)
{
return arm_validate_ns_entrypoint(entrypoint) == 0 ? PSCI_E_SUCCESS :
PSCI_E_INVALID_ADDRESS;
}
/******************************************************************************

View File

@ -299,7 +299,7 @@ plat_psci_ops_t plat_arm_psci_pm_ops = {
.system_off = css_system_off,
.system_reset = css_system_reset,
.validate_power_state = css_validate_power_state,
.validate_ns_entrypoint = arm_validate_ns_entrypoint,
.validate_ns_entrypoint = arm_validate_psci_entrypoint,
.translate_power_state_by_mpidr = css_translate_power_state_by_mpidr,
.get_node_hw_state = css_node_hw_state,
.get_sys_suspend_power_state = css_get_sys_suspend_power_state,

View File

@ -3,6 +3,8 @@
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <assert.h>
#include <console.h>
#include <platform.h>
@ -20,6 +22,11 @@
#pragma weak plat_get_syscnt_freq2
#endif /* ERROR_DEPRECATED */
#if SDEI_SUPPORT
#pragma weak plat_sdei_handle_masked_trigger
#pragma weak plat_sdei_validate_entry_point
#endif
void bl31_plat_enable_mmu(uint32_t flags)
{
enable_mmu_el3(flags);
@ -64,3 +71,22 @@ unsigned int plat_get_syscnt_freq2(void)
return (unsigned int)freq;
}
#endif /* ERROR_DEPRECATED */
#if SDEI_SUPPORT
/*
* Function that handles spurious SDEI interrupts while events are masked.
*/
void plat_sdei_handle_masked_trigger(uint64_t mpidr, unsigned int intr)
{
WARN("Spurious SDEI interrupt %u on masked PE %lx\n", intr, mpidr);
}
/*
* Default Function to validate SDEI entry point, which returns success.
* Platforms may override this with their own validation mechanism.
*/
int plat_sdei_validate_entry_point(uintptr_t ep, unsigned int client_mode)
{
return 0;
}
#endif

View File

@ -277,3 +277,13 @@ unsigned int plat_ic_set_priority_mask(unsigned int mask)
{
return gicv2_set_pmr(mask);
}
unsigned int plat_ic_get_interrupt_id(unsigned int raw)
{
unsigned int id = (raw & INT_ID_MASK);
if (id == GIC_SPURIOUS_INTERRUPT)
id = INTR_ID_UNAVAILABLE;
return id;
}

View File

@ -271,6 +271,14 @@ unsigned int plat_ic_set_priority_mask(unsigned int mask)
{
return gicv3_set_pmr(mask);
}
unsigned int plat_ic_get_interrupt_id(unsigned int raw)
{
unsigned int id = (raw & INT_ID_MASK);
return (gicv3_is_intr_id_special_identifier(id) ?
INTR_ID_UNAVAILABLE : id);
}
#endif
#ifdef IMAGE_BL32

View File

@ -0,0 +1,98 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <utils.h>
#include "sdei_private.h"
#define MAP_OFF(_map, _mapping) ((_map) - (_mapping)->map)
/*
* Get SDEI entry with the given mapping: on success, returns pointer to SDEI
* entry. On error, returns NULL.
*
* Both shared and private maps are stored in single-dimensional array. Private
* event entries are kept for each PE forming a 2D array.
*/
sdei_entry_t *get_event_entry(sdei_ev_map_t *map)
{
const sdei_mapping_t *mapping;
sdei_entry_t *cpu_priv_base;
unsigned int idx, base_idx;
if (is_event_private(map)) {
/*
* For a private map, find the index of the mapping in the
* array.
*/
mapping = SDEI_PRIVATE_MAPPING();
idx = MAP_OFF(map, mapping);
/* Base of private mappings for this CPU */
base_idx = plat_my_core_pos() * mapping->num_maps;
cpu_priv_base = &sdei_private_event_table[base_idx];
/*
* Return the address of the entry at the same index in the
* per-CPU event entry.
*/
return &cpu_priv_base[idx];
} else {
mapping = SDEI_SHARED_MAPPING();
idx = MAP_OFF(map, mapping);
return &sdei_shared_event_table[idx];
}
}
/*
* Find event mapping for a given interrupt number: On success, returns pointer
* to the event mapping. On error, returns NULL.
*/
sdei_ev_map_t *find_event_map_by_intr(int intr_num, int shared)
{
const sdei_mapping_t *mapping;
sdei_ev_map_t *map;
unsigned int i;
/*
* Look for a match in private and shared mappings, as requested. This
* is a linear search. However, if the mappings are required to be
* sorted, for large maps, we could consider binary search.
*/
mapping = shared ? SDEI_SHARED_MAPPING() : SDEI_PRIVATE_MAPPING();
iterate_mapping(mapping, i, map) {
if (map->intr == intr_num)
return map;
}
return NULL;
}
/*
* Find event mapping for a given event number: On success returns pointer to
* the event mapping. On error, returns NULL.
*/
sdei_ev_map_t *find_event_map(int ev_num)
{
const sdei_mapping_t *mapping;
sdei_ev_map_t *map;
unsigned int i, j;
/*
* Iterate through mappings to find a match. This is a linear search.
* However, if the mappings are required to be sorted, for large maps,
* we could consider binary search.
*/
for_each_mapping_type(i, mapping) {
iterate_mapping(mapping, j, map) {
if (map->ev_num == ev_num)
return map;
}
}
return NULL;
}

View File

@ -0,0 +1,676 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <cassert.h>
#include <context_mgmt.h>
#include <debug.h>
#include <ehf.h>
#include <interrupt_mgmt.h>
#include <runtime_svc.h>
#include <sdei.h>
#include <string.h>
#include "sdei_private.h"
#define PE_MASKED 1
#define PE_NOT_MASKED 0
/* x0-x17 GPREGS context */
#define SDEI_SAVED_GPREGS 18
/* Maximum preemption nesting levels: Critical priority and Normal priority */
#define MAX_EVENT_NESTING 2
/* Per-CPU SDEI state access macro */
#define sdei_get_this_pe_state() (&sdei_cpu_state[plat_my_core_pos()])
/* Structure to store information about an outstanding dispatch */
typedef struct sdei_dispatch_context {
sdei_ev_map_t *map;
unsigned int sec_state;
unsigned int intr_raw;
uint64_t x[SDEI_SAVED_GPREGS];
/* Exception state registers */
uint64_t elr_el3;
uint64_t spsr_el3;
} sdei_dispatch_context_t;
/* Per-CPU SDEI state data */
typedef struct sdei_cpu_state {
sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
unsigned short stack_top; /* Empty ascending */
unsigned int pe_masked:1;
unsigned int pending_enables:1;
} sdei_cpu_state_t;
/* SDEI states for all cores in the system */
static sdei_cpu_state_t sdei_cpu_state[PLATFORM_CORE_COUNT];
unsigned int sdei_pe_mask(void)
{
unsigned int ret;
sdei_cpu_state_t *state = sdei_get_this_pe_state();
/*
* Return value indicates whether this call had any effect in the mask
* status of this PE.
*/
ret = (state->pe_masked ^ PE_MASKED);
state->pe_masked = PE_MASKED;
return ret;
}
void sdei_pe_unmask(void)
{
int i;
sdei_ev_map_t *map;
sdei_entry_t *se;
sdei_cpu_state_t *state = sdei_get_this_pe_state();
uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
/*
* If there are pending enables, iterate through the private mappings
* and enable those bound maps that are in enabled state. Also, iterate
* through shared mappings and enable interrupts of events that are
* targeted to this PE.
*/
if (state->pending_enables) {
for_each_private_map(i, map) {
se = get_event_entry(map);
if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
plat_ic_enable_interrupt(map->intr);
}
for_each_shared_map(i, map) {
se = get_event_entry(map);
sdei_map_lock(map);
if (is_map_bound(map) &&
GET_EV_STATE(se, ENABLED) &&
(se->reg_flags == SDEI_REGF_RM_PE) &&
(se->affinity == my_mpidr)) {
plat_ic_enable_interrupt(map->intr);
}
sdei_map_unlock(map);
}
}
state->pending_enables = 0;
state->pe_masked = PE_NOT_MASKED;
}
/* Push a dispatch context to the dispatch stack */
static sdei_dispatch_context_t *push_dispatch(void)
{
sdei_cpu_state_t *state = sdei_get_this_pe_state();
sdei_dispatch_context_t *disp_ctx;
/* Cannot have more than max events */
assert(state->stack_top < MAX_EVENT_NESTING);
disp_ctx = &state->dispatch_stack[state->stack_top];
state->stack_top++;
return disp_ctx;
}
/* Pop a dispatch context to the dispatch stack */
static sdei_dispatch_context_t *pop_dispatch(void)
{
sdei_cpu_state_t *state = sdei_get_this_pe_state();
if (state->stack_top == 0)
return NULL;
assert(state->stack_top <= MAX_EVENT_NESTING);
state->stack_top--;
return &state->dispatch_stack[state->stack_top];
}
/* Retrieve the context at the top of dispatch stack */
static sdei_dispatch_context_t *get_outstanding_dispatch(void)
{
sdei_cpu_state_t *state = sdei_get_this_pe_state();
if (state->stack_top == 0)
return NULL;
assert(state->stack_top <= MAX_EVENT_NESTING);
return &state->dispatch_stack[state->stack_top - 1];
}
static void save_event_ctx(sdei_ev_map_t *map, void *tgt_ctx, int sec_state,
unsigned int intr_raw)
{
sdei_dispatch_context_t *disp_ctx;
gp_regs_t *tgt_gpregs;
el3_state_t *tgt_el3;
assert(tgt_ctx);
tgt_gpregs = get_gpregs_ctx(tgt_ctx);
tgt_el3 = get_el3state_ctx(tgt_ctx);
disp_ctx = push_dispatch();
assert(disp_ctx);
disp_ctx->sec_state = sec_state;
disp_ctx->map = map;
disp_ctx->intr_raw = intr_raw;
/* Save general purpose and exception registers */
memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
}
static void restore_event_ctx(sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
{
gp_regs_t *tgt_gpregs;
el3_state_t *tgt_el3;
assert(tgt_ctx);
tgt_gpregs = get_gpregs_ctx(tgt_ctx);
tgt_el3 = get_el3state_ctx(tgt_ctx);
CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
foo);
/* Restore general purpose and exception registers */
memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
}
static void save_secure_context(void)
{
cm_el1_sysregs_context_save(SECURE);
}
/* Restore Secure context and arrange to resume it at the next ERET */
static void restore_and_resume_secure_context(void)
{
cm_el1_sysregs_context_restore(SECURE);
cm_set_next_eret_context(SECURE);
}
/*
* Restore Non-secure context and arrange to resume it at the next ERET. Return
* pointer to the Non-secure context.
*/
static cpu_context_t *restore_and_resume_ns_context(void)
{
cpu_context_t *ns_ctx;
cm_el1_sysregs_context_restore(NON_SECURE);
cm_set_next_eret_context(NON_SECURE);
ns_ctx = cm_get_context(NON_SECURE);
assert(ns_ctx);
return ns_ctx;
}
/*
* Populate the Non-secure context so that the next ERET will dispatch to the
* SDEI client.
*/
static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
cpu_context_t *ctx, int sec_state_to_resume,
unsigned int intr_raw)
{
el3_state_t *el3_ctx = get_el3state_ctx(ctx);
/* Push the event and context */
save_event_ctx(map, ctx, sec_state_to_resume, intr_raw);
/*
* Setup handler arguments:
*
* - x0: Event number
* - x1: Handler argument supplied at the time of event registration
* - x2: Interrupted PC
* - x3: Interrupted SPSR
*/
SMC_SET_GP(ctx, CTX_GPREG_X0, map->ev_num);
SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
SMC_SET_GP(ctx, CTX_GPREG_X2, read_ctx_reg(el3_ctx, CTX_ELR_EL3));
SMC_SET_GP(ctx, CTX_GPREG_X3, read_ctx_reg(el3_ctx, CTX_SPSR_EL3));
/*
* Prepare for ERET:
*
* - Set PC to the registered handler address
* - Set SPSR to jump to client EL with exceptions masked
*/
cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep,
SPSR_64(sdei_client_el(), MODE_SP_ELX,
DISABLE_ALL_EXCEPTIONS));
}
/* Handle a triggered SDEI interrupt while events were masked on this PE */
static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
sdei_cpu_state_t *state, unsigned int intr_raw)
{
uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
int disable = 0;
/* Nothing to do for event 0 */
if (map->ev_num == SDEI_EVENT_0)
return;
/*
* For a private event, or for a shared event specifically routed to
* this CPU, we disable interrupt, leave the interrupt pending, and do
* EOI.
*/
if (is_event_private(map)) {
disable = 1;
} else if (se->reg_flags == SDEI_REGF_RM_PE) {
assert(se->affinity == my_mpidr);
disable = 1;
}
if (disable) {
plat_ic_disable_interrupt(map->intr);
plat_ic_set_interrupt_pending(map->intr);
plat_ic_end_of_interrupt(intr_raw);
state->pending_enables = 1;
return;
}
/*
* We just received a shared event with routing set to ANY PE. The
* interrupt can't be delegated on this PE as SDEI events are masked.
* However, because its routing mode is ANY, it is possible that the
* event can be delegated on any other PE that hasn't masked events.
* Therefore, we set the interrupt back pending so as to give other
* suitable PEs a chance of handling it.
*/
assert(plat_ic_is_spi(map->intr));
plat_ic_set_interrupt_pending(map->intr);
/*
* Leaving the same interrupt pending also means that the same interrupt
* can target this PE again as soon as this PE leaves EL3. Whether and
* how often that happens depends on the implementation of GIC.
*
* We therefore call a platform handler to resolve this situation.
*/
plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
/* This PE is masked. We EOI the interrupt, as it can't be delegated */
plat_ic_end_of_interrupt(intr_raw);
}
/* SDEI main interrupt handler */
int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
void *cookie)
{
sdei_entry_t *se;
cpu_context_t *ctx;
sdei_ev_map_t *map;
sdei_dispatch_context_t *disp_ctx;
unsigned int sec_state;
sdei_cpu_state_t *state;
uint32_t intr;
/*
* To handle an event, the following conditions must be true:
*
* 1. Event must be signalled
* 2. Event must be enabled
* 3. This PE must be a target PE for the event
* 4. PE must be unmasked for SDEI
* 5. If this is a normal event, no event must be running
* 6. If this is a critical event, no critical event must be running
*
* (1) and (2) are true when this function is running
* (3) is enforced in GIC by selecting the appropriate routing option
* (4) is satisfied by client calling PE_UNMASK
* (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
* - Normal SDEI events belong to Normal SDE priority class
* - Critical SDEI events belong to Critical CSDE priority class
*
* The interrupt has already been acknowledged, and therefore is active,
* so no other PE can handle this event while we are at it.
*
* Find if this is an SDEI interrupt. There must be an event mapped to
* this interrupt
*/
intr = plat_ic_get_interrupt_id(intr_raw);
map = find_event_map_by_intr(intr, plat_ic_is_spi(intr));
if (!map) {
ERROR("No SDEI map for interrupt %u\n", intr);
panic();
}
/*
* Received interrupt number must either correspond to event 0, or must
* be bound interrupt.
*/
assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
se = get_event_entry(map);
state = sdei_get_this_pe_state();
if (state->pe_masked == PE_MASKED) {
/*
* Interrupts received while this PE was masked can't be
* dispatched.
*/
SDEI_LOG("interrupt %u on %lx while PE masked\n", map->intr,
read_mpidr_el1());
if (is_event_shared(map))
sdei_map_lock(map);
handle_masked_trigger(map, se, state, intr_raw);
if (is_event_shared(map))
sdei_map_unlock(map);
return 0;
}
/* Insert load barrier for signalled SDEI event */
if (map->ev_num == SDEI_EVENT_0)
dmbld();
if (is_event_shared(map))
sdei_map_lock(map);
/* Assert shared event routed to this PE had been configured so */
if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
assert(se->affinity ==
(read_mpidr_el1() & MPIDR_AFFINITY_MASK));
}
if (!can_sdei_state_trans(se, DO_DISPATCH)) {
SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
map->ev_num, se->state);
/*
* If the event is registered, leave the interrupt pending so
* that it's delivered when the event is enabled.
*/
if (GET_EV_STATE(se, REGISTERED))
plat_ic_set_interrupt_pending(map->intr);
/*
* The interrupt was disabled or unregistered after the handler
* started to execute, which means now the interrupt is already
* disabled and we just need to EOI the interrupt.
*/
plat_ic_end_of_interrupt(intr_raw);
if (is_event_shared(map))
sdei_map_unlock(map);
return 0;
}
disp_ctx = get_outstanding_dispatch();
if (is_event_critical(map)) {
/*
* If this event is Critical, and if there's an outstanding
* dispatch, assert the latter is a Normal dispatch. Critical
* events can preempt an outstanding Normal event dispatch.
*/
if (disp_ctx)
assert(is_event_normal(disp_ctx->map));
} else {
/*
* If this event is Normal, assert that there are no outstanding
* dispatches. Normal events can't preempt any outstanding event
* dispatches.
*/
assert(disp_ctx == NULL);
}
sec_state = get_interrupt_src_ss(flags);
if (is_event_shared(map))
sdei_map_unlock(map);
SDEI_LOG("ACK %lx, ev:%d ss:%d spsr:%lx ELR:%lx\n", read_mpidr_el1(),
map->ev_num, sec_state, read_spsr_el3(),
read_elr_el3());
ctx = handle;
/*
* Check if we interrupted secure state. Perform a context switch so
* that we can delegate to NS.
*/
if (sec_state == SECURE) {
save_secure_context();
ctx = restore_and_resume_ns_context();
}
setup_ns_dispatch(map, se, ctx, sec_state, intr_raw);
/*
* End of interrupt is done in sdei_event_complete, when the client
* signals completion.
*/
return 0;
}
/* Explicitly dispatch the given SDEI event */
int sdei_dispatch_event(int ev_num, unsigned int preempted_sec_state)
{
sdei_entry_t *se;
sdei_ev_map_t *map;
cpu_context_t *ctx;
sdei_dispatch_context_t *disp_ctx;
sdei_cpu_state_t *state;
/* Validate preempted security state */
if ((preempted_sec_state != SECURE) || (preempted_sec_state != NON_SECURE))
return -1;
/* Can't dispatch if events are masked on this PE */
state = sdei_get_this_pe_state();
if (state->pe_masked == PE_MASKED)
return -1;
/* Event 0 can't be dispatched */
if (ev_num == SDEI_EVENT_0)
return -1;
/* Locate mapping corresponding to this event */
map = find_event_map(ev_num);
if (!map)
return -1;
/*
* Statically-bound or dynamic maps are dispatched only as a result of
* interrupt, and not upon explicit request.
*/
if (is_map_dynamic(map) || is_map_bound(map))
return -1;
/* The event must be private */
if (is_event_shared(map))
return -1;
/* Examine state of dispatch stack */
disp_ctx = get_outstanding_dispatch();
if (disp_ctx) {
/*
* There's an outstanding dispatch. If the outstanding dispatch
* is critical, no more dispatches are possible.
*/
if (is_event_critical(disp_ctx->map))
return -1;
/*
* If the outstanding dispatch is Normal, only critical events
* can be dispatched.
*/
if (is_event_normal(map))
return -1;
}
se = get_event_entry(map);
if (!can_sdei_state_trans(se, DO_DISPATCH))
return -1;
/* Activate the priority corresponding to the event being dispatched */
ehf_activate_priority(sdei_event_priority(map));
/*
* We assume the current context is SECURE, and that it's already been
* saved.
*/
ctx = restore_and_resume_ns_context();
/*
* The caller has effectively terminated execution. Record to resume the
* preempted context later when the event completes or
* complete-and-resumes.
*/
setup_ns_dispatch(map, se, ctx, preempted_sec_state, 0);
return 0;
}
int sdei_event_complete(int resume, uint64_t pc)
{
sdei_dispatch_context_t *disp_ctx;
sdei_entry_t *se;
sdei_ev_map_t *map;
cpu_context_t *ctx;
sdei_action_t act;
unsigned int client_el = sdei_client_el();
/* Return error if called without an active event */
disp_ctx = pop_dispatch();
if (!disp_ctx)
return SDEI_EDENY;
/* Validate resumption point */
if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
return SDEI_EDENY;
map = disp_ctx->map;
assert(map);
se = get_event_entry(map);
SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
map->ev_num, read_spsr_el3(), read_elr_el3());
if (is_event_shared(map))
sdei_map_lock(map);
act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
if (!can_sdei_state_trans(se, act)) {
if (is_event_shared(map))
sdei_map_unlock(map);
return SDEI_EDENY;
}
/*
* Restore Non-secure to how it was originally interrupted. Once done,
* it's up-to-date with the saved copy.
*/
ctx = cm_get_context(NON_SECURE);
restore_event_ctx(disp_ctx, ctx);
if (resume) {
/*
* Complete-and-resume call. Prepare the Non-secure context
* (currently active) for complete and resume.
*/
cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
/*
* Make it look as if a synchronous exception were taken at the
* supplied Non-secure resumption point. Populate SPSR and
* ELR_ELx so that an ERET from there works as expected.
*
* The assumption is that the client, if necessary, would have
* saved any live content in these registers before making this
* call.
*/
if (client_el == MODE_EL2) {
write_elr_el2(disp_ctx->elr_el3);
write_spsr_el2(disp_ctx->spsr_el3);
} else {
/* EL1 */
write_elr_el1(disp_ctx->elr_el3);
write_spsr_el1(disp_ctx->spsr_el3);
}
}
/*
* If the cause of dispatch originally interrupted the Secure world, and
* if Non-secure world wasn't allowed to preempt Secure execution,
* resume Secure.
*
* No need to save the Non-secure context ahead of a world switch: the
* Non-secure context was fully saved before dispatch, and has been
* returned to its pre-dispatch state.
*/
if ((disp_ctx->sec_state == SECURE) &&
(ehf_is_ns_preemption_allowed() == 0)) {
restore_and_resume_secure_context();
}
if ((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)) {
/*
* The event was dispatched after receiving SDEI interrupt. With
* the event handling completed, EOI the corresponding
* interrupt.
*/
plat_ic_end_of_interrupt(disp_ctx->intr_raw);
} else {
/*
* An unbound event must have been dispatched explicitly.
* Deactivate the priority level that was activated at the time
* of explicit dispatch.
*/
ehf_deactivate_priority(sdei_event_priority(map));
}
if (is_event_shared(map))
sdei_map_unlock(map);
return 0;
}
int sdei_event_context(void *handle, unsigned int param)
{
sdei_dispatch_context_t *disp_ctx;
if (param >= SDEI_SAVED_GPREGS)
return SDEI_EINVAL;
/* Get outstanding dispatch on this CPU */
disp_ctx = get_outstanding_dispatch();
if (!disp_ctx)
return SDEI_EDENY;
assert(disp_ctx->map);
if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
return SDEI_EDENY;
/*
* No locking is required for the Running status as this is the only CPU
* which can complete the event
*/
return disp_ctx->x[param];
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,234 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#ifndef __SDEI_PRIVATE_H__
#define __SDEI_PRIVATE_H__
#include <arch_helpers.h>
#include <debug.h>
#include <errno.h>
#include <interrupt_mgmt.h>
#include <platform.h>
#include <sdei.h>
#include <spinlock.h>
#include <stdbool.h>
#include <types.h>
#include <utils_def.h>
#ifdef AARCH32
# error SDEI is implemented only for AArch64 systems
#endif
#ifndef PLAT_SDEI_CRITICAL_PRI
# error Platform must define SDEI critical priority value
#endif
#ifndef PLAT_SDEI_NORMAL_PRI
# error Platform must define SDEI normal priority value
#endif
/* Output SDEI logs as verbose */
#define SDEI_LOG(...) VERBOSE("SDEI: " __VA_ARGS__)
/* SDEI handler unregistered state. This is the default state. */
#define SDEI_STATE_UNREGISTERED 0
/* SDE event status values in bit position */
#define SDEI_STATF_REGISTERED 0
#define SDEI_STATF_ENABLED 1
#define SDEI_STATF_RUNNING 2
/* SDEI SMC error codes */
#define SDEI_EINVAL (-2)
#define SDEI_EDENY (-3)
#define SDEI_EPEND (-5)
#define SDEI_ENOMEM (-10)
/*
* 'info' parameter to SDEI_EVENT_GET_INFO SMC.
*
* Note that the SDEI v1.0 speification mistakenly enumerates the
* SDEI_INFO_EV_SIGNALED as SDEI_INFO_SIGNALED. This will be corrected in a
* future version.
*/
#define SDEI_INFO_EV_TYPE 0
#define SDEI_INFO_EV_NOT_SIGNALED 1
#define SDEI_INFO_EV_PRIORITY 2
#define SDEI_INFO_EV_ROUTING_MODE 3
#define SDEI_INFO_EV_ROUTING_AFF 4
#define SDEI_PRIVATE_MAPPING() (&sdei_global_mappings[_SDEI_MAP_IDX_PRIV])
#define SDEI_SHARED_MAPPING() (&sdei_global_mappings[_SDEI_MAP_IDX_SHRD])
#define for_each_mapping_type(_i, _mapping) \
for (_i = 0, _mapping = &sdei_global_mappings[i]; \
_i < _SDEI_MAP_IDX_MAX; \
_i++, _mapping = &sdei_global_mappings[i])
#define iterate_mapping(_mapping, _i, _map) \
for (_map = (_mapping)->map, _i = 0; \
_i < (_mapping)->num_maps; \
_i++, _map++)
#define for_each_private_map(_i, _map) \
iterate_mapping(SDEI_PRIVATE_MAPPING(), _i, _map)
#define for_each_shared_map(_i, _map) \
iterate_mapping(SDEI_SHARED_MAPPING(), _i, _map)
/* SDEI_FEATURES */
#define SDEI_FEATURE_BIND_SLOTS 0
#define BIND_SLOTS_MASK 0xffff
#define FEATURES_SHARED_SLOTS_SHIFT 16
#define FEATURES_PRIVATE_SLOTS_SHIFT 0
#define FEATURE_BIND_SLOTS(_priv, _shrd) \
((((_priv) & BIND_SLOTS_MASK) << FEATURES_PRIVATE_SLOTS_SHIFT) | \
(((_shrd) & BIND_SLOTS_MASK) << FEATURES_SHARED_SLOTS_SHIFT))
#define GET_EV_STATE(_e, _s) get_ev_state_bit(_e, SDEI_STATF_##_s)
#define SET_EV_STATE(_e, _s) clr_ev_state_bit(_e->state, SDEI_STATF_##_s)
static inline int is_event_private(sdei_ev_map_t *map)
{
return ((map->map_flags & BIT(_SDEI_MAPF_PRIVATE_SHIFT)) != 0);
}
static inline int is_event_shared(sdei_ev_map_t *map)
{
return !is_event_private(map);
}
static inline int is_event_critical(sdei_ev_map_t *map)
{
return ((map->map_flags & BIT(_SDEI_MAPF_CRITICAL_SHIFT)) != 0);
}
static inline int is_event_normal(sdei_ev_map_t *map)
{
return !is_event_critical(map);
}
static inline int is_event_signalable(sdei_ev_map_t *map)
{
return ((map->map_flags & BIT(_SDEI_MAPF_SIGNALABLE_SHIFT)) != 0);
}
static inline int is_map_dynamic(sdei_ev_map_t *map)
{
return ((map->map_flags & BIT(_SDEI_MAPF_DYNAMIC_SHIFT)) != 0);
}
/*
* Checks whether an event is associated with an interrupt. Static events always
* return true, and dynamic events return whether SDEI_INTERRUPT_BIND had been
* called on them. This can be used on both static or dynamic events to check
* for an associated interrupt.
*/
static inline int is_map_bound(sdei_ev_map_t *map)
{
return ((map->map_flags & BIT(_SDEI_MAPF_BOUND_SHIFT)) != 0);
}
static inline void set_map_bound(sdei_ev_map_t *map)
{
map->map_flags |= BIT(_SDEI_MAPF_BOUND_SHIFT);
}
static inline void clr_map_bound(sdei_ev_map_t *map)
{
map->map_flags &= ~(BIT(_SDEI_MAPF_BOUND_SHIFT));
}
static inline int is_secure_sgi(unsigned int intr)
{
return (plat_ic_is_sgi(intr) &&
(plat_ic_get_interrupt_type(intr) == INTR_TYPE_EL3));
}
/*
* Determine EL of the client. If EL2 is implemented (hence the enabled HCE
* bit), deem EL2; otherwise, deem EL1.
*/
static inline unsigned int sdei_client_el(void)
{
return read_scr_el3() & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
}
static inline unsigned int sdei_event_priority(sdei_ev_map_t *map)
{
return is_event_critical(map) ? PLAT_SDEI_CRITICAL_PRI :
PLAT_SDEI_NORMAL_PRI;
}
static inline int get_ev_state_bit(sdei_entry_t *se, unsigned int bit_no)
{
return ((se->state & BIT(bit_no)) != 0);
}
static inline void clr_ev_state_bit(sdei_entry_t *se, unsigned int bit_no)
{
se->state &= ~BIT(bit_no);
}
/* SDEI actions for state transition */
typedef enum {
/*
* Actions resulting from client requests. These directly map to SMC
* calls. Note that the state table columns are listed in this order
* too.
*/
DO_REGISTER = 0,
DO_RELEASE = 1,
DO_ENABLE = 2,
DO_DISABLE = 3,
DO_UNREGISTER = 4,
DO_ROUTING = 5,
DO_CONTEXT = 6,
DO_COMPLETE = 7,
DO_COMPLETE_RESUME = 8,
/* Action for event dispatch */
DO_DISPATCH = 9,
DO_MAX,
} sdei_action_t;
typedef enum {
SDEI_NORMAL,
SDEI_CRITICAL
} sdei_class_t;
static inline void sdei_map_lock(sdei_ev_map_t *map)
{
spin_lock(&map->lock);
}
static inline void sdei_map_unlock(sdei_ev_map_t *map)
{
spin_unlock(&map->lock);
}
extern const sdei_mapping_t sdei_global_mappings[];
extern sdei_entry_t sdei_private_event_table[];
extern sdei_entry_t sdei_shared_event_table[];
void init_sdei_state(void);
sdei_ev_map_t *find_event_map_by_intr(int intr_num, int shared);
sdei_ev_map_t *find_event_map(int ev_num);
sdei_entry_t *get_event_entry(sdei_ev_map_t *map);
int sdei_event_context(void *handle, unsigned int param);
int sdei_event_complete(int resume, uint64_t arg);
void sdei_pe_unmask(void);
unsigned int sdei_pe_mask(void);
int sdei_intr_handler(uint32_t intr, uint32_t flags, void *handle,
void *cookie);
bool can_sdei_state_trans(sdei_entry_t *se, sdei_action_t act);
#endif /* __SDEI_PRIVATE_H__ */

View File

@ -0,0 +1,150 @@
/*
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <cassert.h>
#include <stdbool.h>
#include "sdei_private.h"
/* Aliases for SDEI handler states: 'R'unning, 'E'nabled, and re'G'istered */
#define r_ 0
#define R_ (1u << SDEI_STATF_RUNNING)
#define e_ 0
#define E_ (1u << SDEI_STATF_ENABLED)
#define g_ 0
#define G_ (1u << SDEI_STATF_REGISTERED)
/* All possible composite handler states */
#define reg_ (r_ | e_ | g_)
#define reG_ (r_ | e_ | G_)
#define rEg_ (r_ | E_ | g_)
#define rEG_ (r_ | E_ | G_)
#define Reg_ (R_ | e_ | g_)
#define ReG_ (R_ | e_ | G_)
#define REg_ (R_ | E_ | g_)
#define REG_ (R_ | E_ | G_)
#define MAX_STATES (REG_ + 1)
/* Invalid state */
#define SDEI_STATE_INVALID ((sdei_state_t) (-1))
/* No change in state */
#define SDEI_STATE_NOP ((sdei_state_t) (-2))
#define X___ SDEI_STATE_INVALID
#define NOP_ SDEI_STATE_NOP
/* Ensure special states don't overlap with valid ones */
CASSERT(X___ > REG_, sdei_state_overlap_invalid);
CASSERT(NOP_ > REG_, sdei_state_overlap_nop);
/*
* SDEI handler state machine: refer to sections 6.1 and 6.1.2 of the SDEI v1.0
* specification:
*
* http://infocenter.arm.com/help/topic/com.arm.doc.den0054a/ARM_DEN0054A_Software_Delegated_Exception_Interface.pdf
*
* Not all calls contribute to handler state transition. This table is also used
* to validate whether a call is permissible at a given handler state:
*
* - X___ denotes a forbidden transition;
* - NOP_ denotes a permitted transition, but there's no change in state;
* - Otherwise, XXX_ gives the new state.
*
* DISP[atch] is a transition added for the implementation, but is not mentioned
* in the spec.
*
* Those calls that the spec mentions as can be made any time don't picture in
* this table.
*/
static const sdei_state_t sdei_state_table[MAX_STATES][DO_MAX] = {
/*
* Action: REG REL ENA DISA UREG ROUT CTX COMP COMPR DISP
* Notes: [3] [1] [3] [3][4] [2]
*/
/* Handler unregistered, disabled, and not running. This is the default state. */
/* 0 */ [reg_] = { reG_, NOP_, X___, X___, X___, X___, X___, X___, X___, X___, },
/* Handler unregistered and running */
/* 4 */ [Reg_] = { X___, X___, X___, X___, X___, X___, NOP_, reg_, reg_, X___, },
/* Handler registered */
/* 1 */ [reG_] = { X___, X___, rEG_, NOP_, reg_, NOP_, X___, X___, X___, X___, },
/* Handler registered and running */
/* 5 */ [ReG_] = { X___, X___, REG_, NOP_, Reg_, X___, NOP_, reG_, reG_, X___, },
/* Handler registered and enabled */
/* 3 */ [rEG_] = { X___, X___, NOP_, reG_, reg_, X___, X___, X___, X___, REG_, },
/* Handler registered, enabled, and running */
/* 7 */ [REG_] = { X___, X___, NOP_, ReG_, Reg_, X___, NOP_, rEG_, rEG_, X___, },
/*
* Invalid states: no valid transition would leave the handler in these
* states; and no transition from these states is possible either.
*/
/*
* Handler can't be enabled without being registered. I.e., XEg is
* impossible.
*/
/* 2 */ [rEg_] = { X___, X___, X___, X___, X___, X___, X___, X___, X___, X___, },
/* 6 */ [REg_] = { X___, X___, X___, X___, X___, X___, X___, X___, X___, X___, },
};
/*
* [1] Unregister will always also disable the event, so the new state will have
* Xeg.
* [2] Event is considered for dispatch only when it's both registered and
* enabled.
* [3] Never causes change in state.
* [4] Only allowed when running.
*/
/*
* Given an action, transition the state of an event by looking up the state
* table above:
*
* - Return false for invalid transition;
* - Return true for valid transition that causes no change in state;
* - Otherwise, update state and return true.
*
* This function assumes that the caller holds necessary locks. If the
* transition has constrains other than the state table describes, the caller is
* expected to restore the previous state. See sdei_event_register() for
* example.
*/
bool can_sdei_state_trans(sdei_entry_t *se, sdei_action_t act)
{
sdei_state_t next;
assert(act < DO_MAX);
if (se->state >= MAX_STATES) {
WARN(" event state invalid: %x\n", se->state);
return false;
}
next = sdei_state_table[se->state][act];
switch (next) {
case SDEI_STATE_INVALID:
return false;
case SDEI_STATE_NOP:
return true;
default:
/* Valid transition. Update state. */
SDEI_LOG(" event state 0x%x => 0x%x\n", se->state, next);
se->state = next;
return true;
}
}

View File

@ -11,6 +11,7 @@
#include <psci.h>
#include <runtime_instr.h>
#include <runtime_svc.h>
#include <sdei.h>
#include <smcc_helpers.h>
#include <spm_svc.h>
#include <std_svc.h>
@ -45,6 +46,11 @@ static int32_t std_svc_setup(void)
}
#endif
#if SDEI_SUPPORT
/* SDEI initialisation */
sdei_init();
#endif
return ret;
}
@ -92,7 +98,6 @@ uintptr_t std_svc_smc_handler(uint32_t smc_fid,
SMC_RET1(handle, ret);
}
#if ENABLE_SPM
/*
* Dispatch SPM calls to SPM SMC handler and return its return
@ -104,6 +109,13 @@ uintptr_t std_svc_smc_handler(uint32_t smc_fid,
}
#endif
#if SDEI_SUPPORT
if (is_sdei_fid(smc_fid)) {
return sdei_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
flags);
}
#endif
switch (smc_fid) {
case ARM_STD_SVC_CALL_COUNT:
/*