From d87c0e277fcc6b07112c0ee6d1aecbc2200055a5 Mon Sep 17 00:00:00 2001 From: Manish Pandey Date: Wed, 11 Jan 2023 21:41:07 +0000 Subject: [PATCH 1/2] refactor(el3_runtime): introduce save_x30 macro Most of the macros/routine in vector entry need a free scratch register. Introduce a macro "save_x30" and call it right at the begining of vector entries where x30 is used. It is more exlicit and less error prone Signed-off-by: Manish Pandey Change-Id: I617f3d41a120739e5e3fe1c421c79ceb70c1188e --- bl31/aarch64/runtime_exceptions.S | 30 ++++++++++++++---------- include/arch/aarch64/el2_common_macros.S | 9 ++++--- include/arch/aarch64/el3_common_macros.S | 9 ++++--- 3 files changed, 26 insertions(+), 22 deletions(-) diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S index 0c608597c..7cdde552b 100644 --- a/bl31/aarch64/runtime_exceptions.S +++ b/bl31/aarch64/runtime_exceptions.S @@ -39,6 +39,14 @@ .globl fiq_aarch32 .globl serror_aarch32 + /* + * Save LR and make x30 available as most of the routines in vector entry + * need a free register + */ + .macro save_x30 + str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + .endm + /* * Macro that prepares entry to EL3 upon taking an exception. * @@ -58,12 +66,6 @@ /* Unmask the SError interrupt */ msr daifclr, #DAIF_ABT_BIT - /* - * Explicitly save x30 so as to free up a register and to enable - * branching - */ - str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - /* Check for SErrors synchronized by the ESB instruction */ mrs x30, DISR_EL1 tbz x30, #DISR_A_BIT, 1f @@ -108,11 +110,7 @@ /* Use ISB for the above unmask operation to take effect immediately */ isb - /* - * Refer Note 1. - * No need to restore X30 as macros following this modify x30 anyway. - */ - str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + /* Refer Note 1. */ mov x30, #1 str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] dmb sy @@ -316,7 +314,7 @@ vector_entry serror_sp_elx * triggered due to explicit synchronization in EL3. Refer Note 1. */ /* Assumes SP_EL3 on entry */ - str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + save_x30 ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] cbnz x30, 1f @@ -338,24 +336,28 @@ vector_entry sync_exception_aarch64 * to a valid cpu context where the general purpose and system register * state can be saved. */ + save_x30 apply_at_speculative_wa check_and_unmask_ea handle_sync_exception end_vector_entry sync_exception_aarch64 vector_entry irq_aarch64 + save_x30 apply_at_speculative_wa check_and_unmask_ea handle_interrupt_exception irq_aarch64 end_vector_entry irq_aarch64 vector_entry fiq_aarch64 + save_x30 apply_at_speculative_wa check_and_unmask_ea handle_interrupt_exception fiq_aarch64 end_vector_entry fiq_aarch64 vector_entry serror_aarch64 + save_x30 apply_at_speculative_wa #if RAS_EXTENSION msr daifclr, #DAIF_ABT_BIT @@ -377,24 +379,28 @@ vector_entry sync_exception_aarch32 * to a valid cpu context where the general purpose and system register * state can be saved. */ + save_x30 apply_at_speculative_wa check_and_unmask_ea handle_sync_exception end_vector_entry sync_exception_aarch32 vector_entry irq_aarch32 + save_x30 apply_at_speculative_wa check_and_unmask_ea handle_interrupt_exception irq_aarch32 end_vector_entry irq_aarch32 vector_entry fiq_aarch32 + save_x30 apply_at_speculative_wa check_and_unmask_ea handle_interrupt_exception fiq_aarch32 end_vector_entry fiq_aarch32 vector_entry serror_aarch32 + save_x30 apply_at_speculative_wa #if RAS_EXTENSION msr daifclr, #DAIF_ABT_BIT diff --git a/include/arch/aarch64/el2_common_macros.S b/include/arch/aarch64/el2_common_macros.S index 7bf480698..b3b85e67e 100644 --- a/include/arch/aarch64/el2_common_macros.S +++ b/include/arch/aarch64/el2_common_macros.S @@ -384,13 +384,12 @@ .macro apply_at_speculative_wa #if ERRATA_SPECULATIVE_AT /* - * Explicitly save x30 so as to free up a register and to enable - * branching and also, save x29 which will be used in the called - * function + * This function expects x30 has been saved. + * Also, save x29 which will be used in the called function. */ - stp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] + str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] bl save_and_update_ptw_el1_sys_regs - ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] + ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] #endif .endm diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S index de2b931af..40ff05668 100644 --- a/include/arch/aarch64/el3_common_macros.S +++ b/include/arch/aarch64/el3_common_macros.S @@ -532,13 +532,12 @@ .macro apply_at_speculative_wa #if ERRATA_SPECULATIVE_AT /* - * Explicitly save x30 so as to free up a register and to enable - * branching and also, save x29 which will be used in the called - * function + * This function expects x30 has been saved. + * Also, save x29 which will be used in the called function. */ - stp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] + str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] bl save_and_update_ptw_el1_sys_regs - ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] + ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] #endif .endm From 6f7de9a87148a6024af4a1ced7f26e5bff55683b Mon Sep 17 00:00:00 2001 From: Manish Pandey Date: Wed, 11 Jan 2023 21:53:02 +0000 Subject: [PATCH 2/2] refactor(el3_runtime): unify handle/enter_lower_el_async_ea handle_lower_el_async_ea and enter_lower_el_async_ea are same except for saving x30 register, with previous patch x30 is now freed before calling these function we don't need both of them. This patch also unifies the naming convention, now we have 3 handlers - handle_lower_el_ea_esb - handle_lower_el_sync_ea - handle_lower_el_async_ea Signed-off-by: Manish Pandey Change-Id: I63b584cf059bac80195aa334981d50fa6272cf49 --- bl31/aarch64/ea_delegate.S | 39 +++++++++++-------------------- bl31/aarch64/runtime_exceptions.S | 10 ++++---- 2 files changed, 19 insertions(+), 30 deletions(-) diff --git a/bl31/aarch64/ea_delegate.S b/bl31/aarch64/ea_delegate.S index dbb32344d..83e4582bd 100644 --- a/bl31/aarch64/ea_delegate.S +++ b/bl31/aarch64/ea_delegate.S @@ -16,9 +16,8 @@ #include .globl handle_lower_el_ea_esb - .globl handle_lower_el_async_ea - .globl enter_lower_el_sync_ea - .globl enter_lower_el_async_ea + .globl handle_lower_el_sync_ea + .globl handle_lower_el_async_ea /* @@ -42,17 +41,12 @@ endfunc handle_lower_el_ea_esb * Implementation Defined Exceptions. If any other kind of exception is detected, * then this function reports unhandled exception. * - * Since it's part of exception vector, this function doesn't expect any GP - * registers to have been saved. It delegates the handling of the EA to platform - * handler, and upon successfully handling the EA, exits EL3; otherwise panics. + * It delegates the handling of the EA to platform handler, and upon successfully + * handling the EA, exits EL3; otherwise panics. + * + * This function assumes x30 has been saved. */ -func enter_lower_el_sync_ea - /* - * Explicitly save x30 so as to free up a register and to enable - * branching. - */ - str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - +func handle_lower_el_sync_ea mrs x30, esr_el3 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH @@ -114,24 +108,19 @@ func enter_lower_el_sync_ea /* Synchronous exceptions other than the above are assumed to be EA */ ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] no_ret report_unhandled_exception -endfunc enter_lower_el_sync_ea +endfunc handle_lower_el_sync_ea /* * This function handles SErrors from lower ELs. * - * Since it's part of exception vector, this function doesn't expect any GP - * registers to have been saved. It delegates the handling of the EA to platform - * handler, and upon successfully handling the EA, exits EL3; otherwise panics. + * It delegates the handling of the EA to platform handler, and upon successfully + * handling the EA, exits EL3; otherwise panics. + * + * This function assumes x30 has been saved. */ -func enter_lower_el_async_ea - /* - * Explicitly save x30 so as to free up a register and to enable - * branching - */ - str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] +func handle_lower_el_async_ea -handle_lower_el_async_ea: /* * Save general purpose and ARMv8.3-PAuth registers (if enabled). * If Secure Cycle Counter is not disabled in MDCR_EL3 when @@ -153,7 +142,7 @@ handle_lower_el_async_ea: /* el3_exit assumes SP_EL0 on entry */ msr spsel, #MODE_SP_EL0 b el3_exit -endfunc enter_lower_el_async_ea +endfunc handle_lower_el_async_ea /* diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S index 7cdde552b..4cbcddcfe 100644 --- a/bl31/aarch64/runtime_exceptions.S +++ b/bl31/aarch64/runtime_exceptions.S @@ -151,7 +151,7 @@ /* Synchronous exceptions other than the above are assumed to be EA */ ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - b enter_lower_el_sync_ea + b handle_lower_el_sync_ea .endm @@ -361,11 +361,11 @@ vector_entry serror_aarch64 apply_at_speculative_wa #if RAS_EXTENSION msr daifclr, #DAIF_ABT_BIT - b enter_lower_el_async_ea #else check_and_unmask_ea - b handle_lower_el_async_ea #endif + b handle_lower_el_async_ea + end_vector_entry serror_aarch64 /* --------------------------------------------------------------------- @@ -404,11 +404,11 @@ vector_entry serror_aarch32 apply_at_speculative_wa #if RAS_EXTENSION msr daifclr, #DAIF_ABT_BIT - b enter_lower_el_async_ea #else check_and_unmask_ea - b handle_lower_el_async_ea #endif + b handle_lower_el_async_ea + end_vector_entry serror_aarch32 #ifdef MONITOR_TRAPS