diff --git a/bl31/aarch64/ea_delegate.S b/bl31/aarch64/ea_delegate.S index dbb32344d..83e4582bd 100644 --- a/bl31/aarch64/ea_delegate.S +++ b/bl31/aarch64/ea_delegate.S @@ -16,9 +16,8 @@ #include .globl handle_lower_el_ea_esb - .globl handle_lower_el_async_ea - .globl enter_lower_el_sync_ea - .globl enter_lower_el_async_ea + .globl handle_lower_el_sync_ea + .globl handle_lower_el_async_ea /* @@ -42,17 +41,12 @@ endfunc handle_lower_el_ea_esb * Implementation Defined Exceptions. If any other kind of exception is detected, * then this function reports unhandled exception. * - * Since it's part of exception vector, this function doesn't expect any GP - * registers to have been saved. It delegates the handling of the EA to platform - * handler, and upon successfully handling the EA, exits EL3; otherwise panics. + * It delegates the handling of the EA to platform handler, and upon successfully + * handling the EA, exits EL3; otherwise panics. + * + * This function assumes x30 has been saved. */ -func enter_lower_el_sync_ea - /* - * Explicitly save x30 so as to free up a register and to enable - * branching. - */ - str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - +func handle_lower_el_sync_ea mrs x30, esr_el3 ubfx x30, x30, #ESR_EC_SHIFT, #ESR_EC_LENGTH @@ -114,24 +108,19 @@ func enter_lower_el_sync_ea /* Synchronous exceptions other than the above are assumed to be EA */ ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] no_ret report_unhandled_exception -endfunc enter_lower_el_sync_ea +endfunc handle_lower_el_sync_ea /* * This function handles SErrors from lower ELs. * - * Since it's part of exception vector, this function doesn't expect any GP - * registers to have been saved. It delegates the handling of the EA to platform - * handler, and upon successfully handling the EA, exits EL3; otherwise panics. + * It delegates the handling of the EA to platform handler, and upon successfully + * handling the EA, exits EL3; otherwise panics. + * + * This function assumes x30 has been saved. */ -func enter_lower_el_async_ea - /* - * Explicitly save x30 so as to free up a register and to enable - * branching - */ - str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] +func handle_lower_el_async_ea -handle_lower_el_async_ea: /* * Save general purpose and ARMv8.3-PAuth registers (if enabled). * If Secure Cycle Counter is not disabled in MDCR_EL3 when @@ -153,7 +142,7 @@ handle_lower_el_async_ea: /* el3_exit assumes SP_EL0 on entry */ msr spsel, #MODE_SP_EL0 b el3_exit -endfunc enter_lower_el_async_ea +endfunc handle_lower_el_async_ea /* diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S index 0c608597c..4cbcddcfe 100644 --- a/bl31/aarch64/runtime_exceptions.S +++ b/bl31/aarch64/runtime_exceptions.S @@ -39,6 +39,14 @@ .globl fiq_aarch32 .globl serror_aarch32 + /* + * Save LR and make x30 available as most of the routines in vector entry + * need a free register + */ + .macro save_x30 + str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + .endm + /* * Macro that prepares entry to EL3 upon taking an exception. * @@ -58,12 +66,6 @@ /* Unmask the SError interrupt */ msr daifclr, #DAIF_ABT_BIT - /* - * Explicitly save x30 so as to free up a register and to enable - * branching - */ - str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - /* Check for SErrors synchronized by the ESB instruction */ mrs x30, DISR_EL1 tbz x30, #DISR_A_BIT, 1f @@ -108,11 +110,7 @@ /* Use ISB for the above unmask operation to take effect immediately */ isb - /* - * Refer Note 1. - * No need to restore X30 as macros following this modify x30 anyway. - */ - str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + /* Refer Note 1. */ mov x30, #1 str x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] dmb sy @@ -153,7 +151,7 @@ /* Synchronous exceptions other than the above are assumed to be EA */ ldr x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] - b enter_lower_el_sync_ea + b handle_lower_el_sync_ea .endm @@ -316,7 +314,7 @@ vector_entry serror_sp_elx * triggered due to explicit synchronization in EL3. Refer Note 1. */ /* Assumes SP_EL3 on entry */ - str x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_LR] + save_x30 ldr x30, [sp, #CTX_EL3STATE_OFFSET + CTX_IS_IN_EL3] cbnz x30, 1f @@ -338,32 +336,36 @@ vector_entry sync_exception_aarch64 * to a valid cpu context where the general purpose and system register * state can be saved. */ + save_x30 apply_at_speculative_wa check_and_unmask_ea handle_sync_exception end_vector_entry sync_exception_aarch64 vector_entry irq_aarch64 + save_x30 apply_at_speculative_wa check_and_unmask_ea handle_interrupt_exception irq_aarch64 end_vector_entry irq_aarch64 vector_entry fiq_aarch64 + save_x30 apply_at_speculative_wa check_and_unmask_ea handle_interrupt_exception fiq_aarch64 end_vector_entry fiq_aarch64 vector_entry serror_aarch64 + save_x30 apply_at_speculative_wa #if RAS_EXTENSION msr daifclr, #DAIF_ABT_BIT - b enter_lower_el_async_ea #else check_and_unmask_ea - b handle_lower_el_async_ea #endif + b handle_lower_el_async_ea + end_vector_entry serror_aarch64 /* --------------------------------------------------------------------- @@ -377,32 +379,36 @@ vector_entry sync_exception_aarch32 * to a valid cpu context where the general purpose and system register * state can be saved. */ + save_x30 apply_at_speculative_wa check_and_unmask_ea handle_sync_exception end_vector_entry sync_exception_aarch32 vector_entry irq_aarch32 + save_x30 apply_at_speculative_wa check_and_unmask_ea handle_interrupt_exception irq_aarch32 end_vector_entry irq_aarch32 vector_entry fiq_aarch32 + save_x30 apply_at_speculative_wa check_and_unmask_ea handle_interrupt_exception fiq_aarch32 end_vector_entry fiq_aarch32 vector_entry serror_aarch32 + save_x30 apply_at_speculative_wa #if RAS_EXTENSION msr daifclr, #DAIF_ABT_BIT - b enter_lower_el_async_ea #else check_and_unmask_ea - b handle_lower_el_async_ea #endif + b handle_lower_el_async_ea + end_vector_entry serror_aarch32 #ifdef MONITOR_TRAPS diff --git a/include/arch/aarch64/el2_common_macros.S b/include/arch/aarch64/el2_common_macros.S index 7bf480698..b3b85e67e 100644 --- a/include/arch/aarch64/el2_common_macros.S +++ b/include/arch/aarch64/el2_common_macros.S @@ -384,13 +384,12 @@ .macro apply_at_speculative_wa #if ERRATA_SPECULATIVE_AT /* - * Explicitly save x30 so as to free up a register and to enable - * branching and also, save x29 which will be used in the called - * function + * This function expects x30 has been saved. + * Also, save x29 which will be used in the called function. */ - stp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] + str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] bl save_and_update_ptw_el1_sys_regs - ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] + ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] #endif .endm diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S index de2b931af..40ff05668 100644 --- a/include/arch/aarch64/el3_common_macros.S +++ b/include/arch/aarch64/el3_common_macros.S @@ -532,13 +532,12 @@ .macro apply_at_speculative_wa #if ERRATA_SPECULATIVE_AT /* - * Explicitly save x30 so as to free up a register and to enable - * branching and also, save x29 which will be used in the called - * function + * This function expects x30 has been saved. + * Also, save x29 which will be used in the called function. */ - stp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] + str x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] bl save_and_update_ptw_el1_sys_regs - ldp x29, x30, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] + ldr x29, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X29] #endif .endm