arm-trusted-firmware/bl31/aarch64/ea_delegate.S
Manish Pandey d04c04a4e8 feat(el3-runtime): modify vector entry paths
Vector entries in EL3 from lower ELs, first check for any pending
async EAs from lower EL before handling the original exception.
This happens when there is an error (EA) in the system which is not
yet signaled to PE while executing at lower EL. During entry into EL3
the errors (EA) are synchronized causing async EA to pend at EL3.

On detecting the pending EA (via ISR_EL1.A) EL3 either reflects it back
to lower EL (KFH) or handles it in EL3 (FFH) based on EA routing model.

In case of Firmware First handling mode (FFH), EL3 handles the pended
EA first before returing back to handle the original exception.

While in case of Kernel First handling mode (KFH), EL3 will return back
to lower EL without handling the original exception. On returing to
lower EL, EA will be pended. In KFH mode there is a risk of back and
forth between EL3 and lower EL if the EA is masked at lower EL or
priority of EA is lower than that of original exception. This is a
limitation in current architecture but can be solved in future if EL3
gets a capability to inject virtual SError.

Signed-off-by: Manish Pandey <manish.pandey2@arm.com>
Change-Id: I3a2a31de7cf454d9d690b1ef769432a5b24f6c11
2023-11-01 11:11:21 +00:00

448 lines
13 KiB
ArmAsm

/*
* Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2022, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert_macros.S>
#include <asm_macros.S>
#include <assert_macros.S>
#include <bl31/ea_handle.h>
#include <context.h>
#include <lib/extensions/ras_arch.h>
#include <cpu_macros.S>
#include <context.h>
.globl handle_lower_el_sync_ea
.globl handle_lower_el_async_ea
.globl handle_pending_async_ea
.globl reflect_pending_async_ea_to_lower_el
/*
* This function forms the tail end of Synchronous Exception entry from lower
* EL, and expects to handle Synchronous External Aborts from lower EL and CPU
* Implementation Defined Exceptions. If any other kind of exception is detected,
* then this function reports unhandled exception.
*
* It delegates the handling of the EA to platform handler, and upon successfully
* handling the EA, exits EL3; otherwise panics.
*
* This function assumes x30 has been saved.
*/
func handle_lower_el_sync_ea
mrs x30, esr_el3
ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
/* Check for I/D aborts from lower EL */
cmp x30, #EC_IABORT_LOWER_EL
b.eq 1f
cmp x30, #EC_DABORT_LOWER_EL
b.eq 1f
/* Save GP registers */
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
stp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
stp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
/* Get the cpu_ops pointer */
bl get_cpu_ops_ptr
/* Get the cpu_ops exception handler */
ldr x0, [x0, #CPU_E_HANDLER_FUNC]
/*
* If the reserved function pointer is NULL, this CPU does not have an
* implementation defined exception handler function
*/
cbz x0, 2f
mrs x1, esr_el3
ubfx x1, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
blr x0
b 2f
1:
/*
* Save general purpose and ARMv8.3-PAuth registers (if enabled).
* Also save PMCR_EL0 and set the PSTATE to a known state.
*/
bl prepare_el3_entry
#if ENABLE_PAUTH
/* Load and program APIAKey firmware key */
bl pauth_load_bl31_apiakey
#endif
/* Setup exception class and syndrome arguments for platform handler */
mov x0, #ERROR_EA_SYNC
mrs x1, esr_el3
bl delegate_sync_ea
/* el3_exit assumes SP_EL0 on entry */
msr spsel, #MODE_SP_EL0
b el3_exit
2:
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
ldp x2, x3, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X2]
ldp x4, x5, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X4]
/* Synchronous exceptions other than the above are assumed to be EA */
ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
no_ret report_unhandled_exception
endfunc handle_lower_el_sync_ea
/*
* This function handles SErrors from lower ELs.
*
* It delegates the handling of the EA to platform handler, and upon successfully
* handling the EA, exits EL3; otherwise panics.
*
* This function assumes x30 has been saved.
*/
func handle_lower_el_async_ea
/*
* Save general purpose and ARMv8.3-PAuth registers (if enabled).
* Also save PMCR_EL0 and set the PSTATE to a known state.
*/
bl prepare_el3_entry
#if ENABLE_PAUTH
/* Load and program APIAKey firmware key */
bl pauth_load_bl31_apiakey
#endif
/* Setup exception class and syndrome arguments for platform handler */
mov x0, #ERROR_EA_ASYNC
mrs x1, esr_el3
bl delegate_async_ea
/* el3_exit assumes SP_EL0 on entry */
msr spsel, #MODE_SP_EL0
b el3_exit
endfunc handle_lower_el_async_ea
/*
* NOTE 1 : Synchronized async EA handling
*
* Comment here applicable to following two functions
* - handle_pending_async_ea
* - reflect_pending_async_ea_to_lower_el
*
* Must be called from exception vector directly.
*
* These special handling is required to cater for handling async EA from
* lower EL synchronized at EL3 entry.
*
* This scenario may arise when there is an error (EA) in the system which is not
* yet signaled to PE while executing in lower EL. During entry into EL3, the errors
* are synchronized either implicitly or explicitly causing async EA to pend at EL3.
*
* On detecting the pending EA (via ISR_EL1.A), based on routing model of EA
* either handle it in EL3 using "handle_pending_async_ea" (FFH) or return to
* lower EL using "reflect_pending_async_ea_to_lower_el" (KFH) .
*/
/*
* Refer to NOTE 1 : Firmware First Handling (FFH)
* Called when FFH is enabled and outgoing world is Non-Secure (scr_el3.ea = 1).
*
* This function assumes x30 has been saved.
*/
#if HANDLE_EA_EL3_FIRST_NS
func handle_pending_async_ea
/*
* Prepare for nested handling of EA. Stash sysregs clobbered by nested
* exception and handler
*/
str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_GPREG_LR]
mrs x30, esr_el3
str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ESR_EL3]
mrs x30, spsr_el3
str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_SPSR_EL3]
mrs x30, elr_el3
str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
mov x30, #1
str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
/*
* Restore the original x30 saved as part of entering EL3. This is not
* required for the current function but for EL3 SError vector entry
* once PSTATE.A bit is unmasked. We restore x30 and then the same
* value is stored in EL3 SError vector entry.
*/
ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
/*
* After clearing PSTATE.A bit pending SError will trigger at current EL.
* Put explicit synchronization event to ensure newly unmasked interrupt
* is taken immediately.
*/
unmask_async_ea
/* Restore the original exception information along with zeroing the storage */
ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
msr elr_el3, x30
str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_SPSR_EL3]
msr spsr_el3, x30
str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_SPSR_EL3]
ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ESR_EL3]
msr esr_el3, x30
str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ESR_EL3]
/*
* If the original exception corresponds to SError from lower El, eret back
* to lower EL, otherwise return to vector table for original exception handling.
*/
ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
cmp x30, #EC_SERROR
ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_GPREG_LR]
str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_GPREG_LR]
b.eq 1f
ret
1:
exception_return
endfunc handle_pending_async_ea
#endif /* HANDLE_EA_EL3_FIRST_NS */
/*
* Refer to NOTE 1 : Kernel First handling (KFH)
* Called in following scenarios
* - Always, if outgoing world is either Secure or Realm
* - KFH mode if outgoing world is Non-secure.
*
* This function assumes x30 has been saved.
*/
func reflect_pending_async_ea_to_lower_el
/*
* As the original exception was not handled we need to ensure that we return
* back to the instruction which caused the exception. To acheive that, eret
* to "elr-4" (Label "subtract_elr_el3") for SMC or simply eret otherwise
* (Label "skip_smc_check").
*
* LIMITATION: It could be that async EA is masked at the target exception level
* or the priority of async EA wrt to the EL3/secure interrupt is lower, which
* causes back and forth between lower EL and EL3. In case of back and forth between
* lower EL and EL3, we can track the loop count in "CTX_NESTED_EA_FLAG" and leverage
* previous ELR in "CTX_SAVED_ELR_EL3" to detect this cycle and further panic
* to indicate a problem here (Label "check_loop_ctr").
* However, setting SCR_EL3.IESB = 1, should give priority to SError handling
* as per AArch64.TakeException pseudo code in Arm ARM.
*
* TODO: In future if EL3 gets a capability to inject a virtual SError to lower
* ELs, we can remove the el3_panic and handle the original exception first and
* inject SError to lower EL before ereting back.
*/
stp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
ldr x29, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
mrs x28, elr_el3
cmp x29, x28
b.eq check_loop_ctr
str x28, [sp, #CTX_EL3STATE_OFFSET + CTX_SAVED_ELR_EL3]
/* Zero the loop counter */
str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
b skip_loop_ctr
check_loop_ctr:
ldr x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
add x29, x29, #1
str x29, [sp, #CTX_EL3STATE_OFFSET + CTX_NESTED_EA_FLAG]
cmp x29, #ASYNC_EA_REPLAY_COUNTER
b.ge el3_panic
skip_loop_ctr:
/*
* Logic to distinguish if we came from SMC or any other exception.
* Use offsets in vector entry to get which exception we are handling.
* In each vector entry of size 0x200, address "0x0-0x80" is for sync
* exception and "0x80-0x200" is for async exceptions.
* Use vector base address (vbar_el3) and exception offset (LR) to
* calculate whether the address we came from is any of the following
* "0x0-0x80", "0x200-0x280", "0x400-0x480" or "0x600-0x680"
*/
mrs x29, vbar_el3
sub x30, x30, x29
and x30, x30, #0x1ff
cmp x30, #0x80
b.ge skip_smc_check
/* Its a synchronous exception, Now check if it is SMC or not? */
mrs x30, esr_el3
ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH
cmp x30, #EC_AARCH32_SMC
b.eq subtract_elr_el3
cmp x30, #EC_AARCH64_SMC
b.eq subtract_elr_el3
b skip_smc_check
subtract_elr_el3:
sub x28, x28, #4
skip_smc_check:
msr elr_el3, x28
ldp x28, x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X28]
ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR]
exception_return
endfunc reflect_pending_async_ea_to_lower_el
/*
* Prelude for Synchronous External Abort handling. This function assumes that
* all GP registers have been saved by the caller.
*
* x0: EA reason
* x1: EA syndrome
*/
func delegate_sync_ea
#if RAS_FFH_SUPPORT
/*
* Check for Uncontainable error type. If so, route to the platform
* fatal error handler rather than the generic EA one.
*/
ubfx x2, x1, #EABORT_SET_SHIFT, #EABORT_SET_WIDTH
cmp x2, #ERROR_STATUS_SET_UC
b.ne 1f
/* Check fault status code */
ubfx x3, x1, #EABORT_DFSC_SHIFT, #EABORT_DFSC_WIDTH
cmp x3, #SYNC_EA_FSC
b.ne 1f
no_ret plat_handle_uncontainable_ea
1:
#endif
b ea_proceed
endfunc delegate_sync_ea
/*
* Prelude for Asynchronous External Abort handling. This function assumes that
* all GP registers have been saved by the caller.
*
* x0: EA reason
* x1: EA syndrome
*/
func delegate_async_ea
#if RAS_FFH_SUPPORT
/* Check Exception Class to ensure SError, as this function should
* only be invoked for SError. If that is not the case, which implies
* either an HW error or programming error, panic.
*/
ubfx x2, x1, #ESR_EC_SHIFT, #ESR_EC_LENGTH
cmp x2, EC_SERROR
b.ne el3_panic
/*
* Check for Implementation Defined Syndrome. If so, skip checking
* Uncontainable error type from the syndrome as the format is unknown.
*/
tbnz x1, #SERROR_IDS_BIT, 1f
/* AET only valid when DFSC is 0x11 */
ubfx x2, x1, #EABORT_DFSC_SHIFT, #EABORT_DFSC_WIDTH
cmp x2, #DFSC_SERROR
b.ne 1f
/*
* Check for Uncontainable error type. If so, route to the platform
* fatal error handler rather than the generic EA one.
*/
ubfx x3, x1, #EABORT_AET_SHIFT, #EABORT_AET_WIDTH
cmp x3, #ERROR_STATUS_UET_UC
b.ne 1f
no_ret plat_handle_uncontainable_ea
1:
#endif
b ea_proceed
endfunc delegate_async_ea
/*
* Delegate External Abort handling to platform's EA handler. This function
* assumes that all GP registers have been saved by the caller.
*
* x0: EA reason
* x1: EA syndrome
*/
func ea_proceed
/*
* If the ESR loaded earlier is not zero, we were processing an EA
* already, and this is a double fault.
*/
ldr x5, [sp, #CTX_EL3STATE_OFFSET + CTX_ESR_EL3]
cbz x5, 1f
no_ret plat_handle_double_fault
1:
/* Save EL3 state */
mrs x2, spsr_el3
mrs x3, elr_el3
stp x2, x3, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
/*
* Save ESR as handling might involve lower ELs, and returning back to
* EL3 from there would trample the original ESR.
*/
mrs x4, scr_el3
mrs x5, esr_el3
stp x4, x5, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
/*
* Setup rest of arguments, and call platform External Abort handler.
*
* x0: EA reason (already in place)
* x1: Exception syndrome (already in place).
* x2: Cookie (unused for now).
* x3: Context pointer.
* x4: Flags (security state from SCR for now).
*/
mov x2, xzr
mov x3, sp
ubfx x4, x4, #0, #1
/* Switch to runtime stack */
ldr x5, [sp, #CTX_EL3STATE_OFFSET + CTX_RUNTIME_SP]
msr spsel, #MODE_SP_EL0
mov sp, x5
mov x29, x30
#if ENABLE_ASSERTIONS
/* Stash the stack pointer */
mov x28, sp
#endif
bl plat_ea_handler
#if ENABLE_ASSERTIONS
/*
* Error handling flows might involve long jumps; so upon returning from
* the platform error handler, validate that the we've completely
* unwound the stack.
*/
mov x27, sp
cmp x28, x27
ASM_ASSERT(eq)
#endif
/* Make SP point to context */
msr spsel, #MODE_SP_ELX
/* Restore EL3 state and ESR */
ldp x1, x2, [sp, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3]
msr spsr_el3, x1
msr elr_el3, x2
/* Restore ESR_EL3 and SCR_EL3 */
ldp x3, x4, [sp, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3]
msr scr_el3, x3
msr esr_el3, x4
#if ENABLE_ASSERTIONS
cmp x4, xzr
ASM_ASSERT(ne)
#endif
/* Clear ESR storage */
str xzr, [sp, #CTX_EL3STATE_OFFSET + CTX_ESR_EL3]
ret x29
endfunc ea_proceed