mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-04-15 00:54:22 +00:00
BL31: Add SDEI dispatcher
The implementation currently supports only interrupt-based SDEI events, and supports all interfaces as defined by SDEI specification version 1.0 [1]. Introduce the build option SDEI_SUPPORT to include SDEI dispatcher in BL31. Update user guide and porting guide. SDEI documentation to follow. [1] http://infocenter.arm.com/help/topic/com.arm.doc.den0054a/ARM_DEN0054A_Software_Delegated_Exception_Interface.pdf Change-Id: I758b733084e4ea3b27ac77d0259705565842241a Co-authored-by: Yousuf A <yousuf.sait@arm.com> Signed-off-by: Jeenu Viswambharan <jeenu.viswambharan@arm.com>
This commit is contained in:
parent
3d732e23e7
commit
b7cb133e5c
13 changed files with 2452 additions and 1 deletions
12
bl31/bl31.mk
12
bl31/bl31.mk
|
@ -36,6 +36,16 @@ ifeq (${EL3_EXCEPTION_HANDLING},1)
|
|||
BL31_SOURCES += bl31/ehf.c
|
||||
endif
|
||||
|
||||
ifeq (${SDEI_SUPPORT},1)
|
||||
ifeq (${EL3_EXCEPTION_HANDLING},0)
|
||||
$(error EL3_EXCEPTION_HANDLING must be 1 for SDEI support)
|
||||
endif
|
||||
BL31_SOURCES += services/std_svc/sdei/sdei_event.c \
|
||||
services/std_svc/sdei/sdei_intr_mgmt.c \
|
||||
services/std_svc/sdei/sdei_main.c \
|
||||
services/std_svc/sdei/sdei_state.c
|
||||
endif
|
||||
|
||||
BL31_LINKERFILE := bl31/bl31.ld.S
|
||||
|
||||
# Flag used to indicate if Crash reporting via console should be included
|
||||
|
@ -46,6 +56,8 @@ endif
|
|||
|
||||
$(eval $(call assert_boolean,CRASH_REPORTING))
|
||||
$(eval $(call assert_boolean,EL3_EXCEPTION_HANDLING))
|
||||
$(eval $(call assert_boolean,SDEI_SUPPORT))
|
||||
|
||||
$(eval $(call add_define,CRASH_REPORTING))
|
||||
$(eval $(call add_define,EL3_EXCEPTION_HANDLING))
|
||||
$(eval $(call add_define,SDEI_SUPPORT))
|
||||
|
|
|
@ -1904,6 +1904,74 @@ calculated by the linker then a link time assertion is raised. A compile time
|
|||
assertion is raised if the value of the constant is not aligned to the cache
|
||||
line boundary.
|
||||
|
||||
SDEI porting requirements
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The SDEI dispatcher requires the platform to provide the following macros
|
||||
and functions, of which some are optional, and some others mandatory.
|
||||
|
||||
Macros
|
||||
......
|
||||
|
||||
Macro: PLAT_SDEI_NORMAL_PRI [mandatory]
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This macro must be defined to the EL3 exception priority level associated with
|
||||
Normal SDEI events on the platform. This must have a higher value (therefore of
|
||||
lower priority) than ``PLAT_SDEI_CRITICAL_PRI``.
|
||||
|
||||
Macro: PLAT_SDEI_CRITICAL_PRI [mandatory]
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This macro must be defined to the EL3 exception priority level associated with
|
||||
Critical SDEI events on the platform. This must have a lower value (therefore of
|
||||
higher priority) than ``PLAT_SDEI_NORMAL_PRI``.
|
||||
|
||||
It's recommended that SDEI exception priorities in general are assigned the
|
||||
lowest among Secure priorities. Among the SDEI exceptions, Critical SDEI
|
||||
priority must be higher than Normal SDEI priority.
|
||||
|
||||
Functions
|
||||
.........
|
||||
|
||||
Function: int plat_sdei_validate_entry_point(uintptr_t ep) [optional]
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
::
|
||||
|
||||
Argument: uintptr_t
|
||||
Return: int
|
||||
|
||||
This function validates the address of client entry points provided for both
|
||||
event registration and *Complete and Resume* SDEI calls. The function takes one
|
||||
argument, which is the address of the handler the SDEI client requested to
|
||||
register. The function must return ``0`` for successful validation, or ``-1``
|
||||
upon failure.
|
||||
|
||||
The default implementation always returns ``0``. On ARM platforms, this function
|
||||
is implemented to translate the entry point to physical address, and further to
|
||||
ensure that the address is located in Non-secure DRAM.
|
||||
|
||||
Function: void plat_sdei_handle_masked_trigger(uint64_t mpidr, unsigned int intr) [optional]
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
::
|
||||
|
||||
Argument: uint64_t
|
||||
Argument: unsigned int
|
||||
Return: void
|
||||
|
||||
SDEI specification requires that a PE comes out of reset with the events masked.
|
||||
The client therefore is expected to call ``PE_UNMASK`` to unmask SDEI events on
|
||||
the PE. No SDEI events can be dispatched until such time.
|
||||
|
||||
Should a PE receive an interrupt that was bound to an SDEI event while the
|
||||
events are masked on the PE, the dispatcher implementation invokes the function
|
||||
``plat_sdei_handle_masked_trigger``. The MPIDR of the PE that received the
|
||||
interrupt and the interrupt ID are passed as parameters.
|
||||
|
||||
The default implementation only prints out a warning message.
|
||||
|
||||
Power State Coordination Interface (in BL31)
|
||||
--------------------------------------------
|
||||
|
||||
|
|
|
@ -534,6 +534,12 @@ Common build options
|
|||
optional. It is only needed if the platform makefile specifies that it
|
||||
is required in order to build the ``fwu_fip`` target.
|
||||
|
||||
- ``SDEI_SUPPORT``: Setting this to ``1`` enables support for Software
|
||||
Delegated Exception Interface to BL31 image. This defaults to ``0``.
|
||||
|
||||
When set to ``1``, the build option ``EL3_EXCEPTION_HANDLING`` must also be
|
||||
set to ``1``.
|
||||
|
||||
- ``SEPARATE_CODE_AND_RODATA``: Whether code and read-only data should be
|
||||
isolated on separate memory pages. This is a trade-off between security and
|
||||
memory usage. See "Isolating code and read-only data on separate memory
|
||||
|
|
|
@ -114,6 +114,16 @@ void bl1_plat_arch_setup(void);
|
|||
void bl1_platform_setup(void);
|
||||
struct meminfo *bl1_plat_sec_mem_layout(void);
|
||||
|
||||
/*******************************************************************************
|
||||
* Optional EL3 component functions in BL31
|
||||
******************************************************************************/
|
||||
|
||||
/* SDEI platform functions */
|
||||
#if SDEI_SUPPORT
|
||||
int plat_sdei_validate_entry_point(uintptr_t ep, unsigned int client_mode);
|
||||
void plat_sdei_handle_masked_trigger(uint64_t mpidr, unsigned int intr);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The following function is mandatory when the
|
||||
* firmware update feature is used.
|
||||
|
|
178
include/services/sdei.h
Normal file
178
include/services/sdei.h
Normal file
|
@ -0,0 +1,178 @@
|
|||
/*
|
||||
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#ifndef __SDEI_H__
|
||||
#define __SDEI_H__
|
||||
|
||||
#include <spinlock.h>
|
||||
#include <utils_def.h>
|
||||
|
||||
/* Range 0xC4000020 - 0xC400003F reserved for SDE 64bit smc calls */
|
||||
#define SDEI_VERSION 0xC4000020
|
||||
#define SDEI_EVENT_REGISTER 0xC4000021
|
||||
#define SDEI_EVENT_ENABLE 0xC4000022
|
||||
#define SDEI_EVENT_DISABLE 0xC4000023
|
||||
#define SDEI_EVENT_CONTEXT 0xC4000024
|
||||
#define SDEI_EVENT_COMPLETE 0xC4000025
|
||||
#define SDEI_EVENT_COMPLETE_AND_RESUME 0xC4000026
|
||||
|
||||
#define SDEI_EVENT_UNREGISTER 0xC4000027
|
||||
#define SDEI_EVENT_STATUS 0xC4000028
|
||||
#define SDEI_EVENT_GET_INFO 0xC4000029
|
||||
#define SDEI_EVENT_ROUTING_SET 0xC400002A
|
||||
#define SDEI_PE_MASK 0xC400002B
|
||||
#define SDEI_PE_UNMASK 0xC400002C
|
||||
|
||||
#define SDEI_INTERRUPT_BIND 0xC400002D
|
||||
#define SDEI_INTERRUPT_RELEASE 0xC400002E
|
||||
#define SDEI_EVENT_SIGNAL 0xC400002F
|
||||
#define SDEI_FEATURES 0xC4000030
|
||||
#define SDEI_PRIVATE_RESET 0xC4000031
|
||||
#define SDEI_SHARED_RESET 0xC4000032
|
||||
|
||||
/* SDEI_EVENT_REGISTER flags */
|
||||
#define SDEI_REGF_RM_ANY 0
|
||||
#define SDEI_REGF_RM_PE 1
|
||||
|
||||
/* SDEI_EVENT_COMPLETE status flags */
|
||||
#define SDEI_EV_HANDLED 0
|
||||
#define SDEI_EV_FAILED 1
|
||||
|
||||
/* SDE event status values in bit position */
|
||||
#define SDEI_STATF_REGISTERED 0
|
||||
#define SDEI_STATF_ENABLED 1
|
||||
#define SDEI_STATF_RUNNING 2
|
||||
|
||||
/* Internal: SDEI flag bit positions */
|
||||
#define _SDEI_MAPF_DYNAMIC_SHIFT 1
|
||||
#define _SDEI_MAPF_BOUND_SHIFT 2
|
||||
#define _SDEI_MAPF_SIGNALABLE_SHIFT 3
|
||||
#define _SDEI_MAPF_PRIVATE_SHIFT 4
|
||||
#define _SDEI_MAPF_CRITICAL_SHIFT 5
|
||||
|
||||
/* SDEI event 0 */
|
||||
#define SDEI_EVENT_0 0
|
||||
|
||||
/* Placeholder interrupt for dynamic mapping */
|
||||
#define SDEI_DYN_IRQ 0
|
||||
|
||||
/* SDEI flags */
|
||||
|
||||
/*
|
||||
* These flags determine whether or not an event can be associated with an
|
||||
* interrupt. Static events are permanently associated with an interrupt, and
|
||||
* can't be changed at runtime. Association of dynamic events with interrupts
|
||||
* can be changed at run time using the SDEI_INTERRUPT_BIND and
|
||||
* SDEI_INTERRUPT_RELEASE calls.
|
||||
*
|
||||
* SDEI_MAPF_DYNAMIC only indicates run time configurability, where as
|
||||
* SDEI_MAPF_BOUND indicates interrupt association. For example:
|
||||
*
|
||||
* - Calling SDEI_INTERRUPT_BIND on a dynamic event will have both
|
||||
* SDEI_MAPF_DYNAMIC and SDEI_MAPF_BOUND set.
|
||||
*
|
||||
* - Statically-bound events will always have SDEI_MAPF_BOUND set, and neither
|
||||
* SDEI_INTERRUPT_BIND nor SDEI_INTERRUPT_RELEASE can be called on them.
|
||||
*
|
||||
* See also the is_map_bound() macro.
|
||||
*/
|
||||
#define SDEI_MAPF_DYNAMIC BIT(_SDEI_MAPF_DYNAMIC_SHIFT)
|
||||
#define SDEI_MAPF_BOUND BIT(_SDEI_MAPF_BOUND_SHIFT)
|
||||
|
||||
#define SDEI_MAPF_SIGNALABLE BIT(_SDEI_MAPF_SIGNALABLE_SHIFT)
|
||||
#define SDEI_MAPF_PRIVATE BIT(_SDEI_MAPF_PRIVATE_SHIFT)
|
||||
#define SDEI_MAPF_CRITICAL BIT(_SDEI_MAPF_CRITICAL_SHIFT)
|
||||
|
||||
/* Indices of private and shared mappings */
|
||||
#define _SDEI_MAP_IDX_PRIV 0
|
||||
#define _SDEI_MAP_IDX_SHRD 1
|
||||
#define _SDEI_MAP_IDX_MAX 2
|
||||
|
||||
/* The macros below are used to identify SDEI calls from the SMC function ID */
|
||||
#define SDEI_FID_MASK U(0xffe0)
|
||||
#define SDEI_FID_VALUE U(0x20)
|
||||
#define is_sdei_fid(_fid) \
|
||||
((((_fid) & SDEI_FID_MASK) == SDEI_FID_VALUE) && \
|
||||
(((_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64))
|
||||
|
||||
#define SDEI_EVENT_MAP(_event, _intr, _flags) \
|
||||
{ \
|
||||
.ev_num = _event, \
|
||||
.intr = _intr, \
|
||||
.map_flags = _flags \
|
||||
}
|
||||
|
||||
#define SDEI_SHARED_EVENT(_event, _intr, _flags) \
|
||||
SDEI_EVENT_MAP(_event, _intr, _flags)
|
||||
|
||||
#define SDEI_PRIVATE_EVENT(_event, _intr, _flags) \
|
||||
SDEI_EVENT_MAP(_event, _intr, _flags | SDEI_MAPF_PRIVATE)
|
||||
|
||||
#define SDEI_DEFINE_EVENT_0(_intr) \
|
||||
SDEI_PRIVATE_EVENT(SDEI_EVENT_0, _intr, SDEI_MAPF_SIGNALABLE)
|
||||
|
||||
/*
|
||||
* Declare shared and private entries for each core. Also declare a global
|
||||
* structure containing private and share entries.
|
||||
*
|
||||
* This macro must be used in the same file as the platform SDEI mappings are
|
||||
* declared. Only then would ARRAY_SIZE() yield a meaningful value.
|
||||
*/
|
||||
#define REGISTER_SDEI_MAP(_private, _shared) \
|
||||
sdei_entry_t sdei_private_event_table \
|
||||
[PLATFORM_CORE_COUNT * ARRAY_SIZE(_private)]; \
|
||||
sdei_entry_t sdei_shared_event_table[ARRAY_SIZE(_shared)]; \
|
||||
const sdei_mapping_t sdei_global_mappings[] = { \
|
||||
[_SDEI_MAP_IDX_PRIV] = { \
|
||||
.map = _private, \
|
||||
.num_maps = ARRAY_SIZE(_private) \
|
||||
}, \
|
||||
[_SDEI_MAP_IDX_SHRD] = { \
|
||||
.map = _shared, \
|
||||
.num_maps = ARRAY_SIZE(_shared) \
|
||||
}, \
|
||||
}
|
||||
|
||||
typedef uint8_t sdei_state_t;
|
||||
|
||||
/* Runtime data of SDEI event */
|
||||
typedef struct sdei_entry {
|
||||
uint64_t ep; /* Entry point */
|
||||
uint64_t arg; /* Entry point argument */
|
||||
uint64_t affinity; /* Affinity of shared event */
|
||||
unsigned int reg_flags; /* Registration flags */
|
||||
|
||||
/* Event handler states: registered, enabled, running */
|
||||
sdei_state_t state;
|
||||
} sdei_entry_t;
|
||||
|
||||
/* Mapping of SDEI events to interrupts, and associated data */
|
||||
typedef struct sdei_ev_map {
|
||||
int32_t ev_num; /* Event number */
|
||||
unsigned int intr; /* Physical interrupt number for a bound map */
|
||||
unsigned int map_flags; /* Mapping flags, see SDEI_MAPF_* */
|
||||
unsigned int reg_count; /* Registration count */
|
||||
spinlock_t lock; /* Per-event lock */
|
||||
} sdei_ev_map_t;
|
||||
|
||||
typedef struct sdei_mapping {
|
||||
sdei_ev_map_t *map;
|
||||
size_t num_maps;
|
||||
} sdei_mapping_t;
|
||||
|
||||
/* Handler to be called to handle SDEI smc calls */
|
||||
uint64_t sdei_smc_handler(uint32_t smc_fid,
|
||||
uint64_t x1,
|
||||
uint64_t x2,
|
||||
uint64_t x3,
|
||||
uint64_t x4,
|
||||
void *cookie,
|
||||
void *handle,
|
||||
uint64_t flags);
|
||||
|
||||
void sdei_init(void);
|
||||
|
||||
#endif /* __SDEI_H__ */
|
|
@ -114,6 +114,9 @@ RESET_TO_BL31 := 0
|
|||
# For Chain of Trust
|
||||
SAVE_KEYS := 0
|
||||
|
||||
# Software Delegated Exception support
|
||||
SDEI_SUPPORT := 0
|
||||
|
||||
# Whether code and read-only data should be put on separate memory pages. The
|
||||
# platform Makefile is free to override this value.
|
||||
SEPARATE_CODE_AND_RODATA := 0
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <arch_helpers.h>
|
||||
#include <assert.h>
|
||||
#include <console.h>
|
||||
#include <platform.h>
|
||||
|
@ -20,6 +22,11 @@
|
|||
#pragma weak plat_get_syscnt_freq2
|
||||
#endif /* ERROR_DEPRECATED */
|
||||
|
||||
#if SDEI_SUPPORT
|
||||
#pragma weak plat_sdei_handle_masked_trigger
|
||||
#pragma weak plat_sdei_validate_entry_point
|
||||
#endif
|
||||
|
||||
void bl31_plat_enable_mmu(uint32_t flags)
|
||||
{
|
||||
enable_mmu_el3(flags);
|
||||
|
@ -64,3 +71,22 @@ unsigned int plat_get_syscnt_freq2(void)
|
|||
return (unsigned int)freq;
|
||||
}
|
||||
#endif /* ERROR_DEPRECATED */
|
||||
|
||||
#if SDEI_SUPPORT
|
||||
/*
|
||||
* Function that handles spurious SDEI interrupts while events are masked.
|
||||
*/
|
||||
void plat_sdei_handle_masked_trigger(uint64_t mpidr, unsigned int intr)
|
||||
{
|
||||
WARN("Spurious SDEI interrupt %u on masked PE %lx\n", intr, mpidr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Default Function to validate SDEI entry point, which returns success.
|
||||
* Platforms may override this with their own validation mechanism.
|
||||
*/
|
||||
int plat_sdei_validate_entry_point(uintptr_t ep, unsigned int client_mode)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
|
98
services/std_svc/sdei/sdei_event.c
Normal file
98
services/std_svc/sdei/sdei_event.c
Normal file
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <assert.h>
|
||||
#include <utils.h>
|
||||
#include "sdei_private.h"
|
||||
|
||||
#define MAP_OFF(_map, _mapping) ((_map) - (_mapping)->map)
|
||||
|
||||
/*
|
||||
* Get SDEI entry with the given mapping: on success, returns pointer to SDEI
|
||||
* entry. On error, returns NULL.
|
||||
*
|
||||
* Both shared and private maps are stored in single-dimensional array. Private
|
||||
* event entries are kept for each PE forming a 2D array.
|
||||
*/
|
||||
sdei_entry_t *get_event_entry(sdei_ev_map_t *map)
|
||||
{
|
||||
const sdei_mapping_t *mapping;
|
||||
sdei_entry_t *cpu_priv_base;
|
||||
unsigned int idx, base_idx;
|
||||
|
||||
if (is_event_private(map)) {
|
||||
/*
|
||||
* For a private map, find the index of the mapping in the
|
||||
* array.
|
||||
*/
|
||||
mapping = SDEI_PRIVATE_MAPPING();
|
||||
idx = MAP_OFF(map, mapping);
|
||||
|
||||
/* Base of private mappings for this CPU */
|
||||
base_idx = plat_my_core_pos() * mapping->num_maps;
|
||||
cpu_priv_base = &sdei_private_event_table[base_idx];
|
||||
|
||||
/*
|
||||
* Return the address of the entry at the same index in the
|
||||
* per-CPU event entry.
|
||||
*/
|
||||
return &cpu_priv_base[idx];
|
||||
} else {
|
||||
mapping = SDEI_SHARED_MAPPING();
|
||||
idx = MAP_OFF(map, mapping);
|
||||
|
||||
return &sdei_shared_event_table[idx];
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Find event mapping for a given interrupt number: On success, returns pointer
|
||||
* to the event mapping. On error, returns NULL.
|
||||
*/
|
||||
sdei_ev_map_t *find_event_map_by_intr(int intr_num, int shared)
|
||||
{
|
||||
const sdei_mapping_t *mapping;
|
||||
sdei_ev_map_t *map;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* Look for a match in private and shared mappings, as requested. This
|
||||
* is a linear search. However, if the mappings are required to be
|
||||
* sorted, for large maps, we could consider binary search.
|
||||
*/
|
||||
mapping = shared ? SDEI_SHARED_MAPPING() : SDEI_PRIVATE_MAPPING();
|
||||
iterate_mapping(mapping, i, map) {
|
||||
if (map->intr == intr_num)
|
||||
return map;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find event mapping for a given event number: On success returns pointer to
|
||||
* the event mapping. On error, returns NULL.
|
||||
*/
|
||||
sdei_ev_map_t *find_event_map(int ev_num)
|
||||
{
|
||||
const sdei_mapping_t *mapping;
|
||||
sdei_ev_map_t *map;
|
||||
unsigned int i, j;
|
||||
|
||||
/*
|
||||
* Iterate through mappings to find a match. This is a linear search.
|
||||
* However, if the mappings are required to be sorted, for large maps,
|
||||
* we could consider binary search.
|
||||
*/
|
||||
for_each_mapping_type(i, mapping) {
|
||||
iterate_mapping(mapping, j, map) {
|
||||
if (map->ev_num == ev_num)
|
||||
return map;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
590
services/std_svc/sdei/sdei_intr_mgmt.c
Normal file
590
services/std_svc/sdei/sdei_intr_mgmt.c
Normal file
|
@ -0,0 +1,590 @@
|
|||
/*
|
||||
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <arch_helpers.h>
|
||||
#include <assert.h>
|
||||
#include <bl_common.h>
|
||||
#include <cassert.h>
|
||||
#include <context_mgmt.h>
|
||||
#include <debug.h>
|
||||
#include <ehf.h>
|
||||
#include <interrupt_mgmt.h>
|
||||
#include <runtime_svc.h>
|
||||
#include <sdei.h>
|
||||
#include <string.h>
|
||||
#include "sdei_private.h"
|
||||
|
||||
#define PE_MASKED 1
|
||||
#define PE_NOT_MASKED 0
|
||||
|
||||
/* x0-x17 GPREGS context */
|
||||
#define SDEI_SAVED_GPREGS 18
|
||||
|
||||
/* Maximum preemption nesting levels: Critical priority and Normal priority */
|
||||
#define MAX_EVENT_NESTING 2
|
||||
|
||||
/* Per-CPU SDEI state access macro */
|
||||
#define sdei_get_this_pe_state() (&sdei_cpu_state[plat_my_core_pos()])
|
||||
|
||||
/* Structure to store information about an outstanding dispatch */
|
||||
typedef struct sdei_dispatch_context {
|
||||
sdei_ev_map_t *map;
|
||||
unsigned int sec_state;
|
||||
unsigned int intr_raw;
|
||||
uint64_t x[SDEI_SAVED_GPREGS];
|
||||
|
||||
/* Exception state registers */
|
||||
uint64_t elr_el3;
|
||||
uint64_t spsr_el3;
|
||||
} sdei_dispatch_context_t;
|
||||
|
||||
/* Per-CPU SDEI state data */
|
||||
typedef struct sdei_cpu_state {
|
||||
sdei_dispatch_context_t dispatch_stack[MAX_EVENT_NESTING];
|
||||
unsigned short stack_top; /* Empty ascending */
|
||||
unsigned int pe_masked:1;
|
||||
unsigned int pending_enables:1;
|
||||
} sdei_cpu_state_t;
|
||||
|
||||
/* SDEI states for all cores in the system */
|
||||
static sdei_cpu_state_t sdei_cpu_state[PLATFORM_CORE_COUNT];
|
||||
|
||||
unsigned int sdei_pe_mask(void)
|
||||
{
|
||||
unsigned int ret;
|
||||
sdei_cpu_state_t *state = sdei_get_this_pe_state();
|
||||
|
||||
/*
|
||||
* Return value indicates whether this call had any effect in the mask
|
||||
* status of this PE.
|
||||
*/
|
||||
ret = (state->pe_masked ^ PE_MASKED);
|
||||
state->pe_masked = PE_MASKED;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void sdei_pe_unmask(void)
|
||||
{
|
||||
int i;
|
||||
sdei_ev_map_t *map;
|
||||
sdei_entry_t *se;
|
||||
sdei_cpu_state_t *state = sdei_get_this_pe_state();
|
||||
uint64_t my_mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
|
||||
|
||||
/*
|
||||
* If there are pending enables, iterate through the private mappings
|
||||
* and enable those bound maps that are in enabled state. Also, iterate
|
||||
* through shared mappings and enable interrupts of events that are
|
||||
* targeted to this PE.
|
||||
*/
|
||||
if (state->pending_enables) {
|
||||
for_each_private_map(i, map) {
|
||||
se = get_event_entry(map);
|
||||
if (is_map_bound(map) && GET_EV_STATE(se, ENABLED))
|
||||
plat_ic_enable_interrupt(map->intr);
|
||||
}
|
||||
|
||||
for_each_shared_map(i, map) {
|
||||
se = get_event_entry(map);
|
||||
|
||||
sdei_map_lock(map);
|
||||
if (is_map_bound(map) &&
|
||||
GET_EV_STATE(se, ENABLED) &&
|
||||
(se->reg_flags == SDEI_REGF_RM_PE) &&
|
||||
(se->affinity == my_mpidr)) {
|
||||
plat_ic_enable_interrupt(map->intr);
|
||||
}
|
||||
sdei_map_unlock(map);
|
||||
}
|
||||
}
|
||||
|
||||
state->pending_enables = 0;
|
||||
state->pe_masked = PE_NOT_MASKED;
|
||||
}
|
||||
|
||||
/* Push a dispatch context to the dispatch stack */
|
||||
static sdei_dispatch_context_t *push_dispatch(void)
|
||||
{
|
||||
sdei_cpu_state_t *state = sdei_get_this_pe_state();
|
||||
sdei_dispatch_context_t *disp_ctx;
|
||||
|
||||
/* Cannot have more than max events */
|
||||
assert(state->stack_top < MAX_EVENT_NESTING);
|
||||
|
||||
disp_ctx = &state->dispatch_stack[state->stack_top];
|
||||
state->stack_top++;
|
||||
|
||||
return disp_ctx;
|
||||
}
|
||||
|
||||
/* Pop a dispatch context to the dispatch stack */
|
||||
static sdei_dispatch_context_t *pop_dispatch(void)
|
||||
{
|
||||
sdei_cpu_state_t *state = sdei_get_this_pe_state();
|
||||
|
||||
if (state->stack_top == 0)
|
||||
return NULL;
|
||||
|
||||
assert(state->stack_top <= MAX_EVENT_NESTING);
|
||||
|
||||
state->stack_top--;
|
||||
|
||||
return &state->dispatch_stack[state->stack_top];
|
||||
}
|
||||
|
||||
/* Retrieve the context at the top of dispatch stack */
|
||||
static sdei_dispatch_context_t *get_outstanding_dispatch(void)
|
||||
{
|
||||
sdei_cpu_state_t *state = sdei_get_this_pe_state();
|
||||
|
||||
if (state->stack_top == 0)
|
||||
return NULL;
|
||||
|
||||
assert(state->stack_top <= MAX_EVENT_NESTING);
|
||||
|
||||
return &state->dispatch_stack[state->stack_top - 1];
|
||||
}
|
||||
|
||||
static void save_event_ctx(sdei_ev_map_t *map, void *tgt_ctx, int sec_state,
|
||||
unsigned int intr_raw)
|
||||
{
|
||||
sdei_dispatch_context_t *disp_ctx;
|
||||
gp_regs_t *tgt_gpregs;
|
||||
el3_state_t *tgt_el3;
|
||||
|
||||
assert(tgt_ctx);
|
||||
tgt_gpregs = get_gpregs_ctx(tgt_ctx);
|
||||
tgt_el3 = get_el3state_ctx(tgt_ctx);
|
||||
|
||||
disp_ctx = push_dispatch();
|
||||
assert(disp_ctx);
|
||||
disp_ctx->sec_state = sec_state;
|
||||
disp_ctx->map = map;
|
||||
disp_ctx->intr_raw = intr_raw;
|
||||
|
||||
/* Save general purpose and exception registers */
|
||||
memcpy(disp_ctx->x, tgt_gpregs, sizeof(disp_ctx->x));
|
||||
disp_ctx->spsr_el3 = read_ctx_reg(tgt_el3, CTX_SPSR_EL3);
|
||||
disp_ctx->elr_el3 = read_ctx_reg(tgt_el3, CTX_ELR_EL3);
|
||||
}
|
||||
|
||||
static void restore_event_ctx(sdei_dispatch_context_t *disp_ctx, void *tgt_ctx)
|
||||
{
|
||||
gp_regs_t *tgt_gpregs;
|
||||
el3_state_t *tgt_el3;
|
||||
|
||||
assert(tgt_ctx);
|
||||
tgt_gpregs = get_gpregs_ctx(tgt_ctx);
|
||||
tgt_el3 = get_el3state_ctx(tgt_ctx);
|
||||
|
||||
CASSERT(sizeof(disp_ctx->x) == (SDEI_SAVED_GPREGS * sizeof(uint64_t)),
|
||||
foo);
|
||||
|
||||
/* Restore general purpose and exception registers */
|
||||
memcpy(tgt_gpregs, disp_ctx->x, sizeof(disp_ctx->x));
|
||||
write_ctx_reg(tgt_el3, CTX_SPSR_EL3, disp_ctx->spsr_el3);
|
||||
write_ctx_reg(tgt_el3, CTX_ELR_EL3, disp_ctx->elr_el3);
|
||||
}
|
||||
|
||||
static void save_secure_context(void)
|
||||
{
|
||||
cm_el1_sysregs_context_save(SECURE);
|
||||
}
|
||||
|
||||
/* Restore Secure context and arrange to resume it at the next ERET */
|
||||
static void restore_and_resume_secure_context(void)
|
||||
{
|
||||
cm_el1_sysregs_context_restore(SECURE);
|
||||
cm_set_next_eret_context(SECURE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore Non-secure context and arrange to resume it at the next ERET. Return
|
||||
* pointer to the Non-secure context.
|
||||
*/
|
||||
static cpu_context_t *restore_and_resume_ns_context(void)
|
||||
{
|
||||
cpu_context_t *ns_ctx;
|
||||
|
||||
cm_el1_sysregs_context_restore(NON_SECURE);
|
||||
cm_set_next_eret_context(NON_SECURE);
|
||||
|
||||
ns_ctx = cm_get_context(NON_SECURE);
|
||||
assert(ns_ctx);
|
||||
|
||||
return ns_ctx;
|
||||
}
|
||||
|
||||
/*
|
||||
* Populate the Non-secure context so that the next ERET will dispatch to the
|
||||
* SDEI client.
|
||||
*/
|
||||
static void setup_ns_dispatch(sdei_ev_map_t *map, sdei_entry_t *se,
|
||||
cpu_context_t *ctx, int sec_state_to_resume,
|
||||
unsigned int intr_raw)
|
||||
{
|
||||
el3_state_t *el3_ctx = get_el3state_ctx(ctx);
|
||||
|
||||
/* Push the event and context */
|
||||
save_event_ctx(map, ctx, sec_state_to_resume, intr_raw);
|
||||
|
||||
/*
|
||||
* Setup handler arguments:
|
||||
*
|
||||
* - x0: Event number
|
||||
* - x1: Handler argument supplied at the time of event registration
|
||||
* - x2: Interrupted PC
|
||||
* - x3: Interrupted SPSR
|
||||
*/
|
||||
SMC_SET_GP(ctx, CTX_GPREG_X0, map->ev_num);
|
||||
SMC_SET_GP(ctx, CTX_GPREG_X1, se->arg);
|
||||
SMC_SET_GP(ctx, CTX_GPREG_X2, read_ctx_reg(el3_ctx, CTX_ELR_EL3));
|
||||
SMC_SET_GP(ctx, CTX_GPREG_X3, read_ctx_reg(el3_ctx, CTX_SPSR_EL3));
|
||||
|
||||
/*
|
||||
* Prepare for ERET:
|
||||
*
|
||||
* - Set PC to the registered handler address
|
||||
* - Set SPSR to jump to client EL with exceptions masked
|
||||
*/
|
||||
cm_set_elr_spsr_el3(NON_SECURE, (uintptr_t) se->ep,
|
||||
SPSR_64(sdei_client_el(), MODE_SP_ELX,
|
||||
DISABLE_ALL_EXCEPTIONS));
|
||||
}
|
||||
|
||||
/* Handle a triggered SDEI interrupt while events were masked on this PE */
|
||||
static void handle_masked_trigger(sdei_ev_map_t *map, sdei_entry_t *se,
|
||||
sdei_cpu_state_t *state, unsigned int intr_raw)
|
||||
{
|
||||
uint64_t my_mpidr __unused = (read_mpidr_el1() & MPIDR_AFFINITY_MASK);
|
||||
int disable = 0;
|
||||
|
||||
/* Nothing to do for event 0 */
|
||||
if (map->ev_num == SDEI_EVENT_0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* For a private event, or for a shared event specifically routed to
|
||||
* this CPU, we disable interrupt, leave the interrupt pending, and do
|
||||
* EOI.
|
||||
*/
|
||||
if (is_event_private(map)) {
|
||||
disable = 1;
|
||||
} else if (se->reg_flags == SDEI_REGF_RM_PE) {
|
||||
assert(se->affinity == my_mpidr);
|
||||
disable = 1;
|
||||
}
|
||||
|
||||
if (disable) {
|
||||
plat_ic_disable_interrupt(map->intr);
|
||||
plat_ic_set_interrupt_pending(map->intr);
|
||||
plat_ic_end_of_interrupt(intr_raw);
|
||||
state->pending_enables = 1;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* We just received a shared event with routing set to ANY PE. The
|
||||
* interrupt can't be delegated on this PE as SDEI events are masked.
|
||||
* However, because its routing mode is ANY, it is possible that the
|
||||
* event can be delegated on any other PE that hasn't masked events.
|
||||
* Therefore, we set the interrupt back pending so as to give other
|
||||
* suitable PEs a chance of handling it.
|
||||
*/
|
||||
assert(plat_ic_is_spi(map->intr));
|
||||
plat_ic_set_interrupt_pending(map->intr);
|
||||
|
||||
/*
|
||||
* Leaving the same interrupt pending also means that the same interrupt
|
||||
* can target this PE again as soon as this PE leaves EL3. Whether and
|
||||
* how often that happens depends on the implementation of GIC.
|
||||
*
|
||||
* We therefore call a platform handler to resolve this situation.
|
||||
*/
|
||||
plat_sdei_handle_masked_trigger(my_mpidr, map->intr);
|
||||
|
||||
/* This PE is masked. We EOI the interrupt, as it can't be delegated */
|
||||
plat_ic_end_of_interrupt(intr_raw);
|
||||
}
|
||||
|
||||
/* SDEI main interrupt handler */
|
||||
int sdei_intr_handler(uint32_t intr_raw, uint32_t flags, void *handle,
|
||||
void *cookie)
|
||||
{
|
||||
sdei_entry_t *se;
|
||||
cpu_context_t *ctx;
|
||||
sdei_ev_map_t *map;
|
||||
sdei_dispatch_context_t *disp_ctx;
|
||||
unsigned int sec_state;
|
||||
sdei_cpu_state_t *state;
|
||||
uint32_t intr;
|
||||
|
||||
/*
|
||||
* To handle an event, the following conditions must be true:
|
||||
*
|
||||
* 1. Event must be signalled
|
||||
* 2. Event must be enabled
|
||||
* 3. This PE must be a target PE for the event
|
||||
* 4. PE must be unmasked for SDEI
|
||||
* 5. If this is a normal event, no event must be running
|
||||
* 6. If this is a critical event, no critical event must be running
|
||||
*
|
||||
* (1) and (2) are true when this function is running
|
||||
* (3) is enforced in GIC by selecting the appropriate routing option
|
||||
* (4) is satisfied by client calling PE_UNMASK
|
||||
* (5) and (6) is enforced using interrupt priority, the RPR, in GIC:
|
||||
* - Normal SDEI events belong to Normal SDE priority class
|
||||
* - Critical SDEI events belong to Critical CSDE priority class
|
||||
*
|
||||
* The interrupt has already been acknowledged, and therefore is active,
|
||||
* so no other PE can handle this event while we are at it.
|
||||
*
|
||||
* Find if this is an SDEI interrupt. There must be an event mapped to
|
||||
* this interrupt
|
||||
*/
|
||||
intr = plat_ic_get_interrupt_id(intr_raw);
|
||||
map = find_event_map_by_intr(intr, plat_ic_is_spi(intr));
|
||||
if (!map) {
|
||||
ERROR("No SDEI map for interrupt %u\n", intr);
|
||||
panic();
|
||||
}
|
||||
|
||||
/*
|
||||
* Received interrupt number must either correspond to event 0, or must
|
||||
* be bound interrupt.
|
||||
*/
|
||||
assert((map->ev_num == SDEI_EVENT_0) || is_map_bound(map));
|
||||
|
||||
se = get_event_entry(map);
|
||||
state = sdei_get_this_pe_state();
|
||||
|
||||
if (state->pe_masked == PE_MASKED) {
|
||||
/*
|
||||
* Interrupts received while this PE was masked can't be
|
||||
* dispatched.
|
||||
*/
|
||||
SDEI_LOG("interrupt %u on %lx while PE masked\n", map->intr,
|
||||
read_mpidr_el1());
|
||||
if (is_event_shared(map))
|
||||
sdei_map_lock(map);
|
||||
|
||||
handle_masked_trigger(map, se, state, intr_raw);
|
||||
|
||||
if (is_event_shared(map))
|
||||
sdei_map_unlock(map);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Insert load barrier for signalled SDEI event */
|
||||
if (map->ev_num == SDEI_EVENT_0)
|
||||
dmbld();
|
||||
|
||||
if (is_event_shared(map))
|
||||
sdei_map_lock(map);
|
||||
|
||||
/* Assert shared event routed to this PE had been configured so */
|
||||
if (is_event_shared(map) && (se->reg_flags == SDEI_REGF_RM_PE)) {
|
||||
assert(se->affinity ==
|
||||
(read_mpidr_el1() & MPIDR_AFFINITY_MASK));
|
||||
}
|
||||
|
||||
if (!can_sdei_state_trans(se, DO_DISPATCH)) {
|
||||
SDEI_LOG("SDEI event 0x%x can't be dispatched; state=0x%x\n",
|
||||
map->ev_num, se->state);
|
||||
|
||||
/*
|
||||
* If the event is registered, leave the interrupt pending so
|
||||
* that it's delivered when the event is enabled.
|
||||
*/
|
||||
if (GET_EV_STATE(se, REGISTERED))
|
||||
plat_ic_set_interrupt_pending(map->intr);
|
||||
|
||||
/*
|
||||
* The interrupt was disabled or unregistered after the handler
|
||||
* started to execute, which means now the interrupt is already
|
||||
* disabled and we just need to EOI the interrupt.
|
||||
*/
|
||||
plat_ic_end_of_interrupt(intr_raw);
|
||||
|
||||
if (is_event_shared(map))
|
||||
sdei_map_unlock(map);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
disp_ctx = get_outstanding_dispatch();
|
||||
if (is_event_critical(map)) {
|
||||
/*
|
||||
* If this event is Critical, and if there's an outstanding
|
||||
* dispatch, assert the latter is a Normal dispatch. Critical
|
||||
* events can preempt an outstanding Normal event dispatch.
|
||||
*/
|
||||
if (disp_ctx)
|
||||
assert(is_event_normal(disp_ctx->map));
|
||||
} else {
|
||||
/*
|
||||
* If this event is Normal, assert that there are no outstanding
|
||||
* dispatches. Normal events can't preempt any outstanding event
|
||||
* dispatches.
|
||||
*/
|
||||
assert(disp_ctx == NULL);
|
||||
}
|
||||
|
||||
sec_state = get_interrupt_src_ss(flags);
|
||||
|
||||
if (is_event_shared(map))
|
||||
sdei_map_unlock(map);
|
||||
|
||||
SDEI_LOG("ACK %lx, ev:%d ss:%d spsr:%lx ELR:%lx\n", read_mpidr_el1(),
|
||||
map->ev_num, sec_state, read_spsr_el3(),
|
||||
read_elr_el3());
|
||||
|
||||
ctx = handle;
|
||||
|
||||
/*
|
||||
* Check if we interrupted secure state. Perform a context switch so
|
||||
* that we can delegate to NS.
|
||||
*/
|
||||
if (sec_state == SECURE) {
|
||||
save_secure_context();
|
||||
ctx = restore_and_resume_ns_context();
|
||||
}
|
||||
|
||||
setup_ns_dispatch(map, se, ctx, sec_state, intr_raw);
|
||||
|
||||
/*
|
||||
* End of interrupt is done in sdei_event_complete, when the client
|
||||
* signals completion.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sdei_event_complete(int resume, uint64_t pc)
|
||||
{
|
||||
sdei_dispatch_context_t *disp_ctx;
|
||||
sdei_entry_t *se;
|
||||
sdei_ev_map_t *map;
|
||||
cpu_context_t *ctx;
|
||||
sdei_action_t act;
|
||||
unsigned int client_el = sdei_client_el();
|
||||
|
||||
/* Return error if called without an active event */
|
||||
disp_ctx = pop_dispatch();
|
||||
if (!disp_ctx)
|
||||
return SDEI_EDENY;
|
||||
|
||||
/* Validate resumption point */
|
||||
if (resume && (plat_sdei_validate_entry_point(pc, client_el) != 0))
|
||||
return SDEI_EDENY;
|
||||
|
||||
map = disp_ctx->map;
|
||||
assert(map);
|
||||
|
||||
se = get_event_entry(map);
|
||||
|
||||
SDEI_LOG("EOI:%lx, %d spsr:%lx elr:%lx\n", read_mpidr_el1(),
|
||||
map->ev_num, read_spsr_el3(), read_elr_el3());
|
||||
|
||||
if (is_event_shared(map))
|
||||
sdei_map_lock(map);
|
||||
|
||||
act = resume ? DO_COMPLETE_RESUME : DO_COMPLETE;
|
||||
if (!can_sdei_state_trans(se, act)) {
|
||||
if (is_event_shared(map))
|
||||
sdei_map_unlock(map);
|
||||
return SDEI_EDENY;
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore Non-secure to how it was originally interrupted. Once done,
|
||||
* it's up-to-date with the saved copy.
|
||||
*/
|
||||
ctx = cm_get_context(NON_SECURE);
|
||||
restore_event_ctx(disp_ctx, ctx);
|
||||
|
||||
if (resume) {
|
||||
/*
|
||||
* Complete-and-resume call. Prepare the Non-secure context
|
||||
* (currently active) for complete and resume.
|
||||
*/
|
||||
cm_set_elr_spsr_el3(NON_SECURE, pc, SPSR_64(client_el,
|
||||
MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
|
||||
|
||||
/*
|
||||
* Make it look as if a synchronous exception were taken at the
|
||||
* supplied Non-secure resumption point. Populate SPSR and
|
||||
* ELR_ELx so that an ERET from there works as expected.
|
||||
*
|
||||
* The assumption is that the client, if necessary, would have
|
||||
* saved any live content in these registers before making this
|
||||
* call.
|
||||
*/
|
||||
if (client_el == MODE_EL2) {
|
||||
write_elr_el2(disp_ctx->elr_el3);
|
||||
write_spsr_el2(disp_ctx->spsr_el3);
|
||||
} else {
|
||||
/* EL1 */
|
||||
write_elr_el1(disp_ctx->elr_el3);
|
||||
write_spsr_el1(disp_ctx->spsr_el3);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If the cause of dispatch originally interrupted the Secure world, and
|
||||
* if Non-secure world wasn't allowed to preempt Secure execution,
|
||||
* resume Secure.
|
||||
*
|
||||
* No need to save the Non-secure context ahead of a world switch: the
|
||||
* Non-secure context was fully saved before dispatch, and has been
|
||||
* returned to its pre-dispatch state.
|
||||
*/
|
||||
if ((disp_ctx->sec_state == SECURE) &&
|
||||
(ehf_is_ns_preemption_allowed() == 0)) {
|
||||
restore_and_resume_secure_context();
|
||||
}
|
||||
|
||||
if ((map->ev_num == SDEI_EVENT_0) || is_map_bound(map)) {
|
||||
/*
|
||||
* The event was dispatched after receiving SDEI interrupt. With
|
||||
* the event handling completed, EOI the corresponding
|
||||
* interrupt.
|
||||
*/
|
||||
plat_ic_end_of_interrupt(disp_ctx->intr_raw);
|
||||
}
|
||||
|
||||
if (is_event_shared(map))
|
||||
sdei_map_unlock(map);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sdei_event_context(void *handle, unsigned int param)
|
||||
{
|
||||
sdei_dispatch_context_t *disp_ctx;
|
||||
|
||||
if (param >= SDEI_SAVED_GPREGS)
|
||||
return SDEI_EINVAL;
|
||||
|
||||
/* Get outstanding dispatch on this CPU */
|
||||
disp_ctx = get_outstanding_dispatch();
|
||||
if (!disp_ctx)
|
||||
return SDEI_EDENY;
|
||||
|
||||
assert(disp_ctx->map);
|
||||
|
||||
if (!can_sdei_state_trans(get_event_entry(disp_ctx->map), DO_CONTEXT))
|
||||
return SDEI_EDENY;
|
||||
|
||||
/*
|
||||
* No locking is required for the Running status as this is the only CPU
|
||||
* which can complete the event
|
||||
*/
|
||||
|
||||
return disp_ctx->x[param];
|
||||
}
|
1064
services/std_svc/sdei/sdei_main.c
Normal file
1064
services/std_svc/sdei/sdei_main.c
Normal file
File diff suppressed because it is too large
Load diff
234
services/std_svc/sdei/sdei_private.h
Normal file
234
services/std_svc/sdei/sdei_private.h
Normal file
|
@ -0,0 +1,234 @@
|
|||
/*
|
||||
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#ifndef __SDEI_PRIVATE_H__
|
||||
#define __SDEI_PRIVATE_H__
|
||||
|
||||
#include <arch_helpers.h>
|
||||
#include <debug.h>
|
||||
#include <errno.h>
|
||||
#include <interrupt_mgmt.h>
|
||||
#include <platform.h>
|
||||
#include <sdei.h>
|
||||
#include <spinlock.h>
|
||||
#include <stdbool.h>
|
||||
#include <types.h>
|
||||
#include <utils_def.h>
|
||||
|
||||
#ifdef AARCH32
|
||||
# error SDEI is implemented only for AArch64 systems
|
||||
#endif
|
||||
|
||||
#ifndef PLAT_SDEI_CRITICAL_PRI
|
||||
# error Platform must define SDEI critical priority value
|
||||
#endif
|
||||
|
||||
#ifndef PLAT_SDEI_NORMAL_PRI
|
||||
# error Platform must define SDEI normal priority value
|
||||
#endif
|
||||
|
||||
/* Output SDEI logs as verbose */
|
||||
#define SDEI_LOG(...) VERBOSE("SDEI: " __VA_ARGS__)
|
||||
|
||||
/* SDEI handler unregistered state. This is the default state. */
|
||||
#define SDEI_STATE_UNREGISTERED 0
|
||||
|
||||
/* SDE event status values in bit position */
|
||||
#define SDEI_STATF_REGISTERED 0
|
||||
#define SDEI_STATF_ENABLED 1
|
||||
#define SDEI_STATF_RUNNING 2
|
||||
|
||||
/* SDEI SMC error codes */
|
||||
#define SDEI_EINVAL (-2)
|
||||
#define SDEI_EDENY (-3)
|
||||
#define SDEI_EPEND (-5)
|
||||
#define SDEI_ENOMEM (-10)
|
||||
|
||||
/*
|
||||
* 'info' parameter to SDEI_EVENT_GET_INFO SMC.
|
||||
*
|
||||
* Note that the SDEI v1.0 speification mistakenly enumerates the
|
||||
* SDEI_INFO_EV_SIGNALED as SDEI_INFO_SIGNALED. This will be corrected in a
|
||||
* future version.
|
||||
*/
|
||||
#define SDEI_INFO_EV_TYPE 0
|
||||
#define SDEI_INFO_EV_NOT_SIGNALED 1
|
||||
#define SDEI_INFO_EV_PRIORITY 2
|
||||
#define SDEI_INFO_EV_ROUTING_MODE 3
|
||||
#define SDEI_INFO_EV_ROUTING_AFF 4
|
||||
|
||||
#define SDEI_PRIVATE_MAPPING() (&sdei_global_mappings[_SDEI_MAP_IDX_PRIV])
|
||||
#define SDEI_SHARED_MAPPING() (&sdei_global_mappings[_SDEI_MAP_IDX_SHRD])
|
||||
|
||||
#define for_each_mapping_type(_i, _mapping) \
|
||||
for (_i = 0, _mapping = &sdei_global_mappings[i]; \
|
||||
_i < _SDEI_MAP_IDX_MAX; \
|
||||
_i++, _mapping = &sdei_global_mappings[i])
|
||||
|
||||
#define iterate_mapping(_mapping, _i, _map) \
|
||||
for (_map = (_mapping)->map, _i = 0; \
|
||||
_i < (_mapping)->num_maps; \
|
||||
_i++, _map++)
|
||||
|
||||
#define for_each_private_map(_i, _map) \
|
||||
iterate_mapping(SDEI_PRIVATE_MAPPING(), _i, _map)
|
||||
|
||||
#define for_each_shared_map(_i, _map) \
|
||||
iterate_mapping(SDEI_SHARED_MAPPING(), _i, _map)
|
||||
|
||||
/* SDEI_FEATURES */
|
||||
#define SDEI_FEATURE_BIND_SLOTS 0
|
||||
#define BIND_SLOTS_MASK 0xffff
|
||||
#define FEATURES_SHARED_SLOTS_SHIFT 16
|
||||
#define FEATURES_PRIVATE_SLOTS_SHIFT 0
|
||||
#define FEATURE_BIND_SLOTS(_priv, _shrd) \
|
||||
((((_priv) & BIND_SLOTS_MASK) << FEATURES_PRIVATE_SLOTS_SHIFT) | \
|
||||
(((_shrd) & BIND_SLOTS_MASK) << FEATURES_SHARED_SLOTS_SHIFT))
|
||||
|
||||
#define GET_EV_STATE(_e, _s) get_ev_state_bit(_e, SDEI_STATF_##_s)
|
||||
#define SET_EV_STATE(_e, _s) clr_ev_state_bit(_e->state, SDEI_STATF_##_s)
|
||||
|
||||
static inline int is_event_private(sdei_ev_map_t *map)
|
||||
{
|
||||
return ((map->map_flags & BIT(_SDEI_MAPF_PRIVATE_SHIFT)) != 0);
|
||||
}
|
||||
|
||||
static inline int is_event_shared(sdei_ev_map_t *map)
|
||||
{
|
||||
return !is_event_private(map);
|
||||
}
|
||||
|
||||
static inline int is_event_critical(sdei_ev_map_t *map)
|
||||
{
|
||||
return ((map->map_flags & BIT(_SDEI_MAPF_CRITICAL_SHIFT)) != 0);
|
||||
}
|
||||
|
||||
static inline int is_event_normal(sdei_ev_map_t *map)
|
||||
{
|
||||
return !is_event_critical(map);
|
||||
}
|
||||
|
||||
static inline int is_event_signalable(sdei_ev_map_t *map)
|
||||
{
|
||||
return ((map->map_flags & BIT(_SDEI_MAPF_SIGNALABLE_SHIFT)) != 0);
|
||||
}
|
||||
|
||||
static inline int is_map_dynamic(sdei_ev_map_t *map)
|
||||
{
|
||||
return ((map->map_flags & BIT(_SDEI_MAPF_DYNAMIC_SHIFT)) != 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Checks whether an event is associated with an interrupt. Static events always
|
||||
* return true, and dynamic events return whether SDEI_INTERRUPT_BIND had been
|
||||
* called on them. This can be used on both static or dynamic events to check
|
||||
* for an associated interrupt.
|
||||
*/
|
||||
static inline int is_map_bound(sdei_ev_map_t *map)
|
||||
{
|
||||
return ((map->map_flags & BIT(_SDEI_MAPF_BOUND_SHIFT)) != 0);
|
||||
}
|
||||
|
||||
static inline void set_map_bound(sdei_ev_map_t *map)
|
||||
{
|
||||
map->map_flags |= BIT(_SDEI_MAPF_BOUND_SHIFT);
|
||||
}
|
||||
|
||||
static inline void clr_map_bound(sdei_ev_map_t *map)
|
||||
{
|
||||
map->map_flags &= ~(BIT(_SDEI_MAPF_BOUND_SHIFT));
|
||||
}
|
||||
|
||||
static inline int is_secure_sgi(unsigned int intr)
|
||||
{
|
||||
return (plat_ic_is_sgi(intr) &&
|
||||
(plat_ic_get_interrupt_type(intr) == INTR_TYPE_EL3));
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine EL of the client. If EL2 is implemented (hence the enabled HCE
|
||||
* bit), deem EL2; otherwise, deem EL1.
|
||||
*/
|
||||
static inline unsigned int sdei_client_el(void)
|
||||
{
|
||||
return read_scr_el3() & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
|
||||
}
|
||||
|
||||
static inline unsigned int sdei_event_priority(sdei_ev_map_t *map)
|
||||
{
|
||||
return is_event_critical(map) ? PLAT_SDEI_CRITICAL_PRI :
|
||||
PLAT_SDEI_NORMAL_PRI;
|
||||
}
|
||||
|
||||
static inline int get_ev_state_bit(sdei_entry_t *se, unsigned int bit_no)
|
||||
{
|
||||
return ((se->state & BIT(bit_no)) != 0);
|
||||
}
|
||||
|
||||
static inline void clr_ev_state_bit(sdei_entry_t *se, unsigned int bit_no)
|
||||
{
|
||||
se->state &= ~BIT(bit_no);
|
||||
}
|
||||
|
||||
/* SDEI actions for state transition */
|
||||
typedef enum {
|
||||
/*
|
||||
* Actions resulting from client requests. These directly map to SMC
|
||||
* calls. Note that the state table columns are listed in this order
|
||||
* too.
|
||||
*/
|
||||
DO_REGISTER = 0,
|
||||
DO_RELEASE = 1,
|
||||
DO_ENABLE = 2,
|
||||
DO_DISABLE = 3,
|
||||
DO_UNREGISTER = 4,
|
||||
DO_ROUTING = 5,
|
||||
DO_CONTEXT = 6,
|
||||
DO_COMPLETE = 7,
|
||||
DO_COMPLETE_RESUME = 8,
|
||||
|
||||
/* Action for event dispatch */
|
||||
DO_DISPATCH = 9,
|
||||
|
||||
DO_MAX,
|
||||
} sdei_action_t;
|
||||
|
||||
typedef enum {
|
||||
SDEI_NORMAL,
|
||||
SDEI_CRITICAL
|
||||
} sdei_class_t;
|
||||
|
||||
static inline void sdei_map_lock(sdei_ev_map_t *map)
|
||||
{
|
||||
spin_lock(&map->lock);
|
||||
}
|
||||
|
||||
static inline void sdei_map_unlock(sdei_ev_map_t *map)
|
||||
{
|
||||
spin_unlock(&map->lock);
|
||||
}
|
||||
|
||||
extern const sdei_mapping_t sdei_global_mappings[];
|
||||
extern sdei_entry_t sdei_private_event_table[];
|
||||
extern sdei_entry_t sdei_shared_event_table[];
|
||||
|
||||
void init_sdei_state(void);
|
||||
|
||||
sdei_ev_map_t *find_event_map_by_intr(int intr_num, int shared);
|
||||
sdei_ev_map_t *find_event_map(int ev_num);
|
||||
sdei_entry_t *get_event_entry(sdei_ev_map_t *map);
|
||||
|
||||
int sdei_event_context(void *handle, unsigned int param);
|
||||
int sdei_event_complete(int resume, uint64_t arg);
|
||||
|
||||
void sdei_pe_unmask(void);
|
||||
unsigned int sdei_pe_mask(void);
|
||||
|
||||
int sdei_intr_handler(uint32_t intr, uint32_t flags, void *handle,
|
||||
void *cookie);
|
||||
bool can_sdei_state_trans(sdei_entry_t *se, sdei_action_t act);
|
||||
|
||||
#endif /* __SDEI_PRIVATE_H__ */
|
150
services/std_svc/sdei/sdei_state.c
Normal file
150
services/std_svc/sdei/sdei_state.c
Normal file
|
@ -0,0 +1,150 @@
|
|||
/*
|
||||
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
||||
#include <assert.h>
|
||||
#include <cassert.h>
|
||||
#include <stdbool.h>
|
||||
#include "sdei_private.h"
|
||||
|
||||
/* Aliases for SDEI handler states: 'R'unning, 'E'nabled, and re'G'istered */
|
||||
#define r_ 0
|
||||
#define R_ (1u << SDEI_STATF_RUNNING)
|
||||
|
||||
#define e_ 0
|
||||
#define E_ (1u << SDEI_STATF_ENABLED)
|
||||
|
||||
#define g_ 0
|
||||
#define G_ (1u << SDEI_STATF_REGISTERED)
|
||||
|
||||
/* All possible composite handler states */
|
||||
#define reg_ (r_ | e_ | g_)
|
||||
#define reG_ (r_ | e_ | G_)
|
||||
#define rEg_ (r_ | E_ | g_)
|
||||
#define rEG_ (r_ | E_ | G_)
|
||||
#define Reg_ (R_ | e_ | g_)
|
||||
#define ReG_ (R_ | e_ | G_)
|
||||
#define REg_ (R_ | E_ | g_)
|
||||
#define REG_ (R_ | E_ | G_)
|
||||
|
||||
#define MAX_STATES (REG_ + 1)
|
||||
|
||||
/* Invalid state */
|
||||
#define SDEI_STATE_INVALID ((sdei_state_t) (-1))
|
||||
|
||||
/* No change in state */
|
||||
#define SDEI_STATE_NOP ((sdei_state_t) (-2))
|
||||
|
||||
#define X___ SDEI_STATE_INVALID
|
||||
#define NOP_ SDEI_STATE_NOP
|
||||
|
||||
/* Ensure special states don't overlap with valid ones */
|
||||
CASSERT(X___ > REG_, sdei_state_overlap_invalid);
|
||||
CASSERT(NOP_ > REG_, sdei_state_overlap_nop);
|
||||
|
||||
/*
|
||||
* SDEI handler state machine: refer to sections 6.1 and 6.1.2 of the SDEI v1.0
|
||||
* specification:
|
||||
*
|
||||
* http://infocenter.arm.com/help/topic/com.arm.doc.den0054a/ARM_DEN0054A_Software_Delegated_Exception_Interface.pdf
|
||||
*
|
||||
* Not all calls contribute to handler state transition. This table is also used
|
||||
* to validate whether a call is permissible at a given handler state:
|
||||
*
|
||||
* - X___ denotes a forbidden transition;
|
||||
* - NOP_ denotes a permitted transition, but there's no change in state;
|
||||
* - Otherwise, XXX_ gives the new state.
|
||||
*
|
||||
* DISP[atch] is a transition added for the implementation, but is not mentioned
|
||||
* in the spec.
|
||||
*
|
||||
* Those calls that the spec mentions as can be made any time don't picture in
|
||||
* this table.
|
||||
*/
|
||||
|
||||
static const sdei_state_t sdei_state_table[MAX_STATES][DO_MAX] = {
|
||||
/*
|
||||
* Action: REG REL ENA DISA UREG ROUT CTX COMP COMPR DISP
|
||||
* Notes: [3] [1] [3] [3][4] [2]
|
||||
*/
|
||||
/* Handler unregistered, disabled, and not running. This is the default state. */
|
||||
/* 0 */ [reg_] = { reG_, NOP_, X___, X___, X___, X___, X___, X___, X___, X___, },
|
||||
|
||||
/* Handler unregistered and running */
|
||||
/* 4 */ [Reg_] = { X___, X___, X___, X___, X___, X___, NOP_, reg_, reg_, X___, },
|
||||
|
||||
/* Handler registered */
|
||||
/* 1 */ [reG_] = { X___, X___, rEG_, NOP_, reg_, NOP_, X___, X___, X___, X___, },
|
||||
|
||||
/* Handler registered and running */
|
||||
/* 5 */ [ReG_] = { X___, X___, REG_, NOP_, Reg_, X___, NOP_, reG_, reG_, X___, },
|
||||
|
||||
/* Handler registered and enabled */
|
||||
/* 3 */ [rEG_] = { X___, X___, NOP_, reG_, reg_, X___, X___, X___, X___, REG_, },
|
||||
|
||||
/* Handler registered, enabled, and running */
|
||||
/* 7 */ [REG_] = { X___, X___, NOP_, ReG_, Reg_, X___, NOP_, rEG_, rEG_, X___, },
|
||||
|
||||
/*
|
||||
* Invalid states: no valid transition would leave the handler in these
|
||||
* states; and no transition from these states is possible either.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Handler can't be enabled without being registered. I.e., XEg is
|
||||
* impossible.
|
||||
*/
|
||||
/* 2 */ [rEg_] = { X___, X___, X___, X___, X___, X___, X___, X___, X___, X___, },
|
||||
/* 6 */ [REg_] = { X___, X___, X___, X___, X___, X___, X___, X___, X___, X___, },
|
||||
};
|
||||
|
||||
/*
|
||||
* [1] Unregister will always also disable the event, so the new state will have
|
||||
* Xeg.
|
||||
* [2] Event is considered for dispatch only when it's both registered and
|
||||
* enabled.
|
||||
* [3] Never causes change in state.
|
||||
* [4] Only allowed when running.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Given an action, transition the state of an event by looking up the state
|
||||
* table above:
|
||||
*
|
||||
* - Return false for invalid transition;
|
||||
* - Return true for valid transition that causes no change in state;
|
||||
* - Otherwise, update state and return true.
|
||||
*
|
||||
* This function assumes that the caller holds necessary locks. If the
|
||||
* transition has constrains other than the state table describes, the caller is
|
||||
* expected to restore the previous state. See sdei_event_register() for
|
||||
* example.
|
||||
*/
|
||||
bool can_sdei_state_trans(sdei_entry_t *se, sdei_action_t act)
|
||||
{
|
||||
sdei_state_t next;
|
||||
|
||||
assert(act < DO_MAX);
|
||||
if (se->state >= MAX_STATES) {
|
||||
WARN(" event state invalid: %x\n", se->state);
|
||||
return false;
|
||||
}
|
||||
|
||||
next = sdei_state_table[se->state][act];
|
||||
switch (next) {
|
||||
case SDEI_STATE_INVALID:
|
||||
return false;
|
||||
|
||||
case SDEI_STATE_NOP:
|
||||
return true;
|
||||
|
||||
default:
|
||||
/* Valid transition. Update state. */
|
||||
SDEI_LOG(" event state 0x%x => 0x%x\n", se->state, next);
|
||||
se->state = next;
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
|
@ -11,6 +11,7 @@
|
|||
#include <psci.h>
|
||||
#include <runtime_instr.h>
|
||||
#include <runtime_svc.h>
|
||||
#include <sdei.h>
|
||||
#include <smcc_helpers.h>
|
||||
#include <spm_svc.h>
|
||||
#include <std_svc.h>
|
||||
|
@ -45,6 +46,11 @@ static int32_t std_svc_setup(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
#if SDEI_SUPPORT
|
||||
/* SDEI initialisation */
|
||||
sdei_init();
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -92,7 +98,6 @@ uintptr_t std_svc_smc_handler(uint32_t smc_fid,
|
|||
SMC_RET1(handle, ret);
|
||||
}
|
||||
|
||||
|
||||
#if ENABLE_SPM
|
||||
/*
|
||||
* Dispatch SPM calls to SPM SMC handler and return its return
|
||||
|
@ -104,6 +109,13 @@ uintptr_t std_svc_smc_handler(uint32_t smc_fid,
|
|||
}
|
||||
#endif
|
||||
|
||||
#if SDEI_SUPPORT
|
||||
if (is_sdei_fid(smc_fid)) {
|
||||
return sdei_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle,
|
||||
flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
switch (smc_fid) {
|
||||
case ARM_STD_SVC_CALL_COUNT:
|
||||
/*
|
||||
|
|
Loading…
Add table
Reference in a new issue