diff --git a/docs/design/firmware-design.rst b/docs/design/firmware-design.rst index 975c1f2fa..cf8cbc760 100644 --- a/docs/design/firmware-design.rst +++ b/docs/design/firmware-design.rst @@ -1481,7 +1481,9 @@ the returned ``cpu_ops`` is then invoked which executes the required reset handling for that CPU and also any errata workarounds enabled by the platform. It should be defined using the ``cpu_reset_func_{start,end}`` macros and its -body may only clobber x0 to x14 with x14 being the cpu_rev parameter. +body may only clobber x0 to x14 with x14 being the cpu_rev parameter. The cpu +file should also include a call to ``cpu_reset_prologue`` at the start of the +file for errata to work correctly. CPU specific power down sequence ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S index a878a5f60..31f8811a5 100644 --- a/include/lib/cpus/aarch32/cpu_macros.S +++ b/include/lib/cpus/aarch32/cpu_macros.S @@ -172,11 +172,6 @@ \_cpu\()_errata_list_start: .endif - /* unused on AArch32, maintain for portability */ - .word 0 - /* TODO(errata ABI): this prevents all checker functions from - * being optimised away. Can be done away with unless the ABI - * needs them */ .ifnb \_special .word check_errata_\_special .elseif \_cve @@ -188,9 +183,7 @@ .word \_id .hword \_cve .byte \_chosen - /* TODO(errata ABI): mitigated field for known but unmitigated - * errata*/ - .byte 0x1 + .byte 0x0 /* alignment */ .popsection .endm diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S index c8f4bde25..f3df59568 100644 --- a/include/lib/cpus/aarch64/cpu_macros.S +++ b/include/lib/cpus/aarch64/cpu_macros.S @@ -238,50 +238,22 @@ * _apply_at_reset: * Whether the erratum should be automatically applied at reset */ -.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req +.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req +#if REPORT_ERRATA || ERRATA_ABI_SUPPORT .pushsection .rodata.errata_entries .align 3 .ifndef \_cpu\()_errata_list_start \_cpu\()_errata_list_start: .endif - /* check if unused and compile out if no references */ - .if \_apply_at_reset && \_chosen - .quad erratum_\_cpu\()_\_id\()_wa - .else - .quad 0 - .endif - /* TODO(errata ABI): this prevents all checker functions from - * being optimised away. Can be done away with unless the ABI - * needs them */ .quad check_erratum_\_cpu\()_\_id /* Will fit CVEs with up to 10 character in the ID field */ .word \_id .hword \_cve .byte \_chosen - /* TODO(errata ABI): mitigated field for known but unmitigated - * errata */ - .byte 0x1 + .byte 0x0 /* alignment */ .popsection -.endm - -.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req - add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset - - func erratum_\_cpu\()_\_id\()_wa - mov x8, x30 - - /* save rev_var for workarounds that might need it but don't - * restore to x0 because few will care */ - mov x7, x0 - bl check_erratum_\_cpu\()_\_id - cbz x0, erratum_\_cpu\()_\_id\()_skip -.endm - -.macro _workaround_end _cpu:req, _id:req - erratum_\_cpu\()_\_id\()_skip: - ret x8 - endfunc erratum_\_cpu\()_\_id\()_wa +#endif .endm /******************************************************************************* @@ -311,7 +283,22 @@ * _wa clobbers: x0-x8 (PCS compliant) */ .macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req - _workaround_start \_cpu, \_cve, \_id, \_chosen, 1 + add_erratum_entry \_cpu, \_cve, \_id, \_chosen + + .if \_chosen + /* put errata directly into the reset function */ + .pushsection .text.asm.\_cpu\()_reset_func, "ax" + .else + /* or something else that will get garbage collected by the + * linker */ + .pushsection .text.asm.erratum_\_cpu\()_\_id\()_wa, "ax" + .endif + /* revision is stored in x14, get it */ + mov x0, x14 + bl check_erratum_\_cpu\()_\_id + /* save rev_var for workarounds that might need it */ + mov x7, x14 + cbz x0, erratum_\_cpu\()_\_id\()_skip_reset .endm /* @@ -322,6 +309,10 @@ * for errata applied in generic code */ .macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr + add_erratum_entry \_cpu, \_cve, \_id, \_chosen + + func erratum_\_cpu\()_\_id\()_wa + mov x8, x30 /* * Let errata specify if they need MIDR checking. Sadly, storing the * MIDR in an .equ to retrieve automatically blows up as it stores some @@ -329,11 +320,15 @@ */ .ifnb \_midr jump_if_cpu_midr \_midr, 1f - b erratum_\_cpu\()_\_id\()_skip + b erratum_\_cpu\()_\_id\()_skip_runtime 1: .endif - _workaround_start \_cpu, \_cve, \_id, \_chosen, 0 + /* save rev_var for workarounds that might need it but don't + * restore to x0 because few will care */ + mov x7, x0 + bl check_erratum_\_cpu\()_\_id + cbz x0, erratum_\_cpu\()_\_id\()_skip_runtime .endm /* @@ -341,7 +336,8 @@ * is kept here so the same #define can be used as that macro */ .macro workaround_reset_end _cpu:req, _cve:req, _id:req - _workaround_end \_cpu, \_id + erratum_\_cpu\()_\_id\()_skip_reset: + .popsection .endm /* @@ -361,7 +357,9 @@ .ifb \_no_isb isb .endif - _workaround_end \_cpu, \_id + erratum_\_cpu\()_\_id\()_skip_runtime: + ret x8 + endfunc erratum_\_cpu\()_\_id\()_wa .endm /******************************************************************************* @@ -598,7 +596,21 @@ ******************************************************************************/ /* - * Wrapper to automatically apply all reset-time errata. Will end with an isb. + * Helper to register a cpu with the errata framework. Begins the definition of + * the reset function. + * + * _cpu: + * Name of cpu as given to declare_cpu_ops + */ +.macro cpu_reset_prologue _cpu:req + func \_cpu\()_reset_func + mov x15, x30 + get_rev_var x14, x0 +.endm + +/* + * Wrapper of the reset function to automatically apply all reset-time errata. + * Will end with an isb. * * _cpu: * Name of cpu as given to declare_cpu_ops @@ -608,38 +620,9 @@ * argument x14 - cpu_rev_var */ .macro cpu_reset_func_start _cpu:req - func \_cpu\()_reset_func - mov x15, x30 - get_rev_var x14, x0 - - /* short circuit the location to avoid searching the list */ - adrp x12, \_cpu\()_errata_list_start - add x12, x12, :lo12:\_cpu\()_errata_list_start - adrp x13, \_cpu\()_errata_list_end - add x13, x13, :lo12:\_cpu\()_errata_list_end - - errata_begin: - /* if head catches up with end of list, exit */ - cmp x12, x13 - b.eq errata_end - - ldr x10, [x12, #ERRATUM_WA_FUNC] - /* TODO(errata ABI): check mitigated and checker function fields - * for 0 */ - ldrb w11, [x12, #ERRATUM_CHOSEN] - - /* skip if not chosen */ - cbz x11, 1f - /* skip if runtime erratum */ - cbz x10, 1f - - /* put cpu revision in x0 and call workaround */ - mov x0, x14 - blr x10 - 1: - add x12, x12, #ERRATUM_ENTRY_SIZE - b errata_begin - errata_end: + /* the func/endfunc macros will change sections. So change the section + * back to the reset function's */ + .section .text.asm.\_cpu\()_reset_func, "ax" .endm .macro cpu_reset_func_end _cpu:req diff --git a/include/lib/cpus/errata.h b/include/lib/cpus/errata.h index b9166f713..10b949f6c 100644 --- a/include/lib/cpus/errata.h +++ b/include/lib/cpus/errata.h @@ -9,20 +9,18 @@ #include -#define ERRATUM_WA_FUNC_SIZE CPU_WORD_SIZE #define ERRATUM_CHECK_FUNC_SIZE CPU_WORD_SIZE #define ERRATUM_ID_SIZE 4 #define ERRATUM_CVE_SIZE 2 #define ERRATUM_CHOSEN_SIZE 1 -#define ERRATUM_MITIGATED_SIZE 1 +#define ERRATUM_ALIGNMENT_SIZE 1 -#define ERRATUM_WA_FUNC 0 -#define ERRATUM_CHECK_FUNC ERRATUM_WA_FUNC + ERRATUM_WA_FUNC_SIZE +#define ERRATUM_CHECK_FUNC 0 #define ERRATUM_ID ERRATUM_CHECK_FUNC + ERRATUM_CHECK_FUNC_SIZE #define ERRATUM_CVE ERRATUM_ID + ERRATUM_ID_SIZE #define ERRATUM_CHOSEN ERRATUM_CVE + ERRATUM_CVE_SIZE -#define ERRATUM_MITIGATED ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE -#define ERRATUM_ENTRY_SIZE ERRATUM_MITIGATED + ERRATUM_MITIGATED_SIZE +#define ERRATUM_ALIGNMENT ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE +#define ERRATUM_ENTRY_SIZE ERRATUM_ALIGNMENT + ERRATUM_ALIGNMENT_SIZE /* Errata status */ #define ERRATA_NOT_APPLIES 0 @@ -39,15 +37,13 @@ void print_errata_status(void); * uintptr_t will reflect the change and the alignment will be correct in both. */ struct erratum_entry { - uintptr_t (*wa_func)(uint64_t cpu_rev); uintptr_t (*check_func)(uint64_t cpu_rev); /* Will fit CVEs with up to 10 character in the ID field */ uint32_t id; /* Denote CVEs with their year or errata with 0 */ uint16_t cve; uint8_t chosen; - /* TODO(errata ABI): placeholder for the mitigated field */ - uint8_t _mitigated; + uint8_t _alignment; } __packed; CASSERT(sizeof(struct erratum_entry) == ERRATUM_ENTRY_SIZE, diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S index 9002da65b..9843943f5 100644 --- a/lib/cpus/aarch64/aem_generic.S +++ b/lib/cpus/aarch64/aem_generic.S @@ -8,6 +8,8 @@ #include #include +cpu_reset_prologue aem_generic + func aem_generic_core_pwr_dwn /* --------------------------------------------- * Disable the Data Cache. diff --git a/lib/cpus/aarch64/cortex_a35.S b/lib/cpus/aarch64/cortex_a35.S index c3d8c8dd5..40e620036 100644 --- a/lib/cpus/aarch64/cortex_a35.S +++ b/lib/cpus/aarch64/cortex_a35.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -11,6 +11,7 @@ #include #include +cpu_reset_prologue cortex_a35 /* --------------------------------------------- * Disable L1 data cache and unified L2 cache * --------------------------------------------- diff --git a/lib/cpus/aarch64/cortex_a510.S b/lib/cpus/aarch64/cortex_a510.S index b399bbc5f..cbeeb2b7e 100644 --- a/lib/cpus/aarch64/cortex_a510.S +++ b/lib/cpus/aarch64/cortex_a510.S @@ -22,6 +22,8 @@ #error "Cortex-A510 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a510 + workaround_reset_start cortex_a510, ERRATUM(1922240), ERRATA_A510_1922240 /* Apply the workaround by setting IMP_CMPXACTLR_EL1[11:10] = 0b11. */ sysreg_bitfield_insert CORTEX_A510_CMPXACTLR_EL1, CORTEX_A510_CMPXACTLR_EL1_SNPPREFERUNIQUE_DISABLE, \ diff --git a/lib/cpus/aarch64/cortex_a520.S b/lib/cpus/aarch64/cortex_a520.S index d9e654b0f..7d63dbbbf 100644 --- a/lib/cpus/aarch64/cortex_a520.S +++ b/lib/cpus/aarch64/cortex_a520.S @@ -24,6 +24,8 @@ #error "Cortex A520 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a520 + workaround_reset_start cortex_a520, ERRATUM(2630792), ERRATA_A520_2630792 sysreg_bit_set CORTEX_A520_CPUACTLR_EL1, BIT(38) workaround_reset_end cortex_a520, ERRATUM(2630792) diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S index 4a5b31814..dbfff8795 100644 --- a/lib/cpus/aarch64/cortex_a53.S +++ b/lib/cpus/aarch64/cortex_a53.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -12,6 +12,8 @@ #include #include +cpu_reset_prologue cortex_a53 + /* --------------------------------------------- * Disable L1 data cache and unified L2 cache * --------------------------------------------- @@ -36,12 +38,12 @@ endfunc cortex_a53_disable_smp /* Due to the nature of the errata it is applied unconditionally when chosen */ check_erratum_ls cortex_a53, ERRATUM(819472), CPU_REV(0, 1) /* erratum workaround is interleaved with generic code */ -add_erratum_entry cortex_a53, ERRATUM(819472), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET +add_erratum_entry cortex_a53, ERRATUM(819472), ERRATUM_ALWAYS_CHOSEN /* Due to the nature of the errata it is applied unconditionally when chosen */ check_erratum_ls cortex_a53, ERRATUM(824069), CPU_REV(0, 2) /* erratum workaround is interleaved with generic code */ -add_erratum_entry cortex_a53, ERRATUM(824069), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET +add_erratum_entry cortex_a53, ERRATUM(824069), ERRATUM_ALWAYS_CHOSEN workaround_reset_start cortex_a53, ERRATUM(826319), ERRATA_A53_826319 mrs x1, CORTEX_A53_L2ACTLR_EL1 @@ -55,7 +57,7 @@ check_erratum_ls cortex_a53, ERRATUM(826319), CPU_REV(0, 2) /* Due to the nature of the errata it is applied unconditionally when chosen */ check_erratum_ls cortex_a53, ERRATUM(827319), CPU_REV(0, 2) /* erratum workaround is interleaved with generic code */ -add_erratum_entry cortex_a53, ERRATUM(827319), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET +add_erratum_entry cortex_a53, ERRATUM(827319), ERRATUM_ALWAYS_CHOSEN check_erratum_custom_start cortex_a53, ERRATUM(835769) cmp x0, CPU_REV(0, 4) @@ -78,7 +80,7 @@ exit_check_errata_835769: check_erratum_custom_end cortex_a53, ERRATUM(835769) /* workaround at build time */ -add_erratum_entry cortex_a53, ERRATUM(835769), ERRATA_A53_835769, NO_APPLY_AT_RESET +add_erratum_entry cortex_a53, ERRATUM(835769), ERRATA_A53_835769 /* * Disable the cache non-temporal hint. @@ -114,7 +116,7 @@ exit_check_errata_843419: check_erratum_custom_end cortex_a53, ERRATUM(843419) /* workaround at build time */ -add_erratum_entry cortex_a53, ERRATUM(843419), ERRATA_A53_843419, NO_APPLY_AT_RESET +add_erratum_entry cortex_a53, ERRATUM(843419), ERRATA_A53_843419 /* * Earlier revisions of the core are affected as well, but don't @@ -131,7 +133,7 @@ check_erratum_hs cortex_a53, ERRATUM(855873), CPU_REV(0, 3) check_erratum_chosen cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924 /* erratum has no workaround in the cpu. Generic code must take care */ -add_erratum_entry cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924, NO_APPLY_AT_RESET +add_erratum_entry cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924 cpu_reset_func_start cortex_a53 /* Enable the SMP bit. */ diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S index 2267d667b..cf91431a7 100644 --- a/lib/cpus/aarch64/cortex_a55.S +++ b/lib/cpus/aarch64/cortex_a55.S @@ -20,6 +20,8 @@ .globl cortex_a55_reset_func .globl cortex_a55_core_pwr_dwn +cpu_reset_prologue cortex_a55 + workaround_reset_start cortex_a55, ERRATUM(798953), ERRATA_DSU_798953 errata_dsu_798953_wa_impl workaround_reset_end cortex_a55, ERRATUM(798953) @@ -111,7 +113,7 @@ check_erratum_ls cortex_a55, ERRATUM(1221012), CPU_REV(1, 0) check_erratum_chosen cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923 /* erratum has no workaround in the cpu. Generic code must take care */ -add_erratum_entry cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923, NO_APPLY_AT_RESET +add_erratum_entry cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923 cpu_reset_func_start cortex_a55 cpu_reset_func_end cortex_a55 diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S index aac9c513c..adacc5cb2 100644 --- a/lib/cpus/aarch64/cortex_a57.S +++ b/lib/cpus/aarch64/cortex_a57.S @@ -13,6 +13,8 @@ #include #include +cpu_reset_prologue cortex_a57 + /* --------------------------------------------- * Disable L1 data cache and unified L2 cache * --------------------------------------------- @@ -81,7 +83,7 @@ check_erratum_ls cortex_a57, ERRATUM(806969), CPU_REV(0, 0) /* erratum always worked around, but report it correctly */ check_erratum_ls cortex_a57, ERRATUM(813419), CPU_REV(0, 0) -add_erratum_entry cortex_a57, ERRATUM(813419), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET +add_erratum_entry cortex_a57, ERRATUM(813419), ERRATUM_ALWAYS_CHOSEN workaround_reset_start cortex_a57, ERRATUM(813420), ERRATA_A57_813420 sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_DCC_AS_DCCI @@ -150,7 +152,7 @@ check_erratum_ls cortex_a57, ERRATUM(859972), CPU_REV(1, 3) check_erratum_chosen cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537 /* erratum has no workaround in the cpu. Generic code must take care */ -add_erratum_entry cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537, NO_APPLY_AT_RESET +add_erratum_entry cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537 workaround_reset_start cortex_a57, CVE(2017, 5715), WORKAROUND_CVE_2017_5715 #if IMAGE_BL31 diff --git a/lib/cpus/aarch64/cortex_a65.S b/lib/cpus/aarch64/cortex_a65.S index 064e6f0ec..3c32adb4e 100644 --- a/lib/cpus/aarch64/cortex_a65.S +++ b/lib/cpus/aarch64/cortex_a65.S @@ -23,6 +23,8 @@ #error "Cortex-A65 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a65 + workaround_reset_start cortex_a65, ERRATUM(936184), ERRATA_DSU_936184 errata_dsu_936184_wa_impl workaround_reset_end cortex_a65, ERRATUM(936184) diff --git a/lib/cpus/aarch64/cortex_a65ae.S b/lib/cpus/aarch64/cortex_a65ae.S index d2f9e49f1..f1a63b06c 100644 --- a/lib/cpus/aarch64/cortex_a65ae.S +++ b/lib/cpus/aarch64/cortex_a65ae.S @@ -23,6 +23,8 @@ #error "Cortex-A65AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a65ae + workaround_reset_start cortex_a65ae, ERRATUM(936184), ERRATA_DSU_936184 errata_dsu_936184_wa_impl workaround_reset_end cortex_a65ae, ERRATUM(936184) diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S index 17163a150..cb24aa116 100644 --- a/lib/cpus/aarch64/cortex_a710.S +++ b/lib/cpus/aarch64/cortex_a710.S @@ -29,6 +29,8 @@ wa_cve_2022_23960_bhb_vector_table CORTEX_A710_BHB_LOOP_COUNT, cortex_a710 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_a710 + /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_a710, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set CORTEX_A710_CPUECTLR_EL1, BIT(46) @@ -223,7 +225,7 @@ workaround_reset_end cortex_a710, CVE(2022, 23960) check_erratum_chosen cortex_a710, CVE(2022, 23960), WORKAROUND_CVE_2022_23960 -add_erratum_entry cortex_a710, ERRATUM(3701772), ERRATA_A710_3701772, NO_APPLY_AT_RESET +add_erratum_entry cortex_a710, ERRATUM(3701772), ERRATA_A710_3701772 check_erratum_ls cortex_a710, ERRATUM(3701772), CPU_REV(2, 1) diff --git a/lib/cpus/aarch64/cortex_a715.S b/lib/cpus/aarch64/cortex_a715.S index fbc73eda3..e50764d02 100644 --- a/lib/cpus/aarch64/cortex_a715.S +++ b/lib/cpus/aarch64/cortex_a715.S @@ -28,6 +28,8 @@ wa_cve_2022_23960_bhb_vector_table CORTEX_A715_BHB_LOOP_COUNT, cortex_a715 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_a715 + workaround_reset_start cortex_a715, ERRATUM(2331818), ERRATA_A715_2331818 sysreg_bit_set CORTEX_A715_CPUACTLR2_EL1, BIT(20) workaround_reset_end cortex_a715, ERRATUM(2331818) @@ -129,7 +131,7 @@ workaround_reset_end cortex_a715, CVE(2022, 23960) check_erratum_chosen cortex_a715, CVE(2022, 23960), WORKAROUND_CVE_2022_23960 -add_erratum_entry cortex_a715, ERRATUM(3699560), ERRATA_A715_3699560, NO_APPLY_AT_RESET +add_erratum_entry cortex_a715, ERRATUM(3699560), ERRATA_A715_3699560 check_erratum_ls cortex_a715, ERRATUM(3699560), CPU_REV(1, 3) diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S index c300ea7cf..fee28ee79 100644 --- a/lib/cpus/aarch64/cortex_a72.S +++ b/lib/cpus/aarch64/cortex_a72.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -15,6 +15,8 @@ wa_cve_2022_23960_bhb_vector_table CORTEX_A72_BHB_LOOP_COUNT, cortex_a72 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_a72 + /* --------------------------------------------- * Disable L1 data cache and unified L2 cache * --------------------------------------------- @@ -92,7 +94,7 @@ check_erratum_ls cortex_a72, ERRATUM(859971), CPU_REV(0, 3) /* Due to the nature of the errata it is applied unconditionally when chosen */ check_erratum_chosen cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367 /* erratum workaround is interleaved with generic code */ -add_erratum_entry cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367, NO_APPLY_AT_RESET +add_erratum_entry cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367 workaround_reset_start cortex_a72, CVE(2017, 5715), WORKAROUND_CVE_2017_5715 #if IMAGE_BL31 diff --git a/lib/cpus/aarch64/cortex_a720.S b/lib/cpus/aarch64/cortex_a720.S index ab2c12f3f..2991f93b9 100644 --- a/lib/cpus/aarch64/cortex_a720.S +++ b/lib/cpus/aarch64/cortex_a720.S @@ -22,6 +22,8 @@ #error "Cortex A720 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a720 + .global check_erratum_cortex_a720_3699561 #if WORKAROUND_CVE_2022_23960 @@ -74,7 +76,7 @@ workaround_reset_end cortex_a720, CVE(2022, 23960) check_erratum_chosen cortex_a720, CVE(2022, 23960), WORKAROUND_CVE_2022_23960 -add_erratum_entry cortex_a720, ERRATUM(3699561), ERRATA_A720_3699561, NO_APPLY_AT_RESET +add_erratum_entry cortex_a720, ERRATUM(3699561), ERRATA_A720_3699561 check_erratum_ls cortex_a720, ERRATUM(3699561), CPU_REV(0, 2) diff --git a/lib/cpus/aarch64/cortex_a720_ae.S b/lib/cpus/aarch64/cortex_a720_ae.S index 57a5030d8..c72a29eb8 100644 --- a/lib/cpus/aarch64/cortex_a720_ae.S +++ b/lib/cpus/aarch64/cortex_a720_ae.S @@ -21,9 +21,11 @@ #error "Cortex-A720AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a720_ae + .global check_erratum_cortex_a720_ae_3699562 -add_erratum_entry cortex_a720_ae, ERRATUM(3699562), ERRATA_A720_AE_3699562, NO_APPLY_AT_RESET +add_erratum_entry cortex_a720_ae, ERRATUM(3699562), ERRATA_A720_AE_3699562 check_erratum_ls cortex_a720_ae, ERRATUM(3699562), CPU_REV(0, 0) diff --git a/lib/cpus/aarch64/cortex_a725.S b/lib/cpus/aarch64/cortex_a725.S index c4d603441..a8c0db246 100644 --- a/lib/cpus/aarch64/cortex_a725.S +++ b/lib/cpus/aarch64/cortex_a725.S @@ -21,9 +21,11 @@ #error "Cortex-A725 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a725 + .global check_erratum_cortex_a725_3699564 -add_erratum_entry cortex_a725, ERRATUM(3699564), ERRATA_A725_3699564, NO_APPLY_AT_RESET +add_erratum_entry cortex_a725, ERRATUM(3699564), ERRATA_A725_3699564 check_erratum_ls cortex_a725, ERRATUM(3699564), CPU_REV(0, 1) diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S index 2130ceb1f..d1fc6d405 100644 --- a/lib/cpus/aarch64/cortex_a73.S +++ b/lib/cpus/aarch64/cortex_a73.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -10,6 +10,8 @@ #include #include +cpu_reset_prologue cortex_a73 + /* --------------------------------------------- * Disable L1 data cache * --------------------------------------------- diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S index 336e00e5c..13599ca96 100644 --- a/lib/cpus/aarch64/cortex_a75.S +++ b/lib/cpus/aarch64/cortex_a75.S @@ -18,6 +18,8 @@ #error "Cortex-A75 must be compiled with HW_ASSISTED_COHERENCY enabled" #endif +cpu_reset_prologue cortex_a75 + workaround_reset_start cortex_a75, ERRATUM(764081), ERRATA_A75_764081 sysreg_bit_set sctlr_el3, SCTLR_IESB_BIT workaround_reset_end cortex_a75, ERRATUM(764081) diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S index 589edceda..822ef05a0 100644 --- a/lib/cpus/aarch64/cortex_a76.S +++ b/lib/cpus/aarch64/cortex_a76.S @@ -30,6 +30,8 @@ #define ESR_EL3_A64_SMC0 0x5e000000 #define ESR_EL3_A32_SMC0 0x4e000000 +cpu_reset_prologue cortex_a76 + #if DYNAMIC_WORKAROUND_CVE_2018_3639 /* * This macro applies the mitigation for CVE-2018-3639. @@ -428,7 +430,7 @@ check_erratum_custom_end cortex_a76, ERRATUM(1165522) check_erratum_chosen cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960 /* erratum has no workaround in the cpu. Generic code must take care */ -add_erratum_entry cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960, NO_APPLY_AT_RESET +add_erratum_entry cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960 workaround_reset_start cortex_a76, ERRATUM(798953), ERRATA_DSU_798953 errata_dsu_798953_wa_impl diff --git a/lib/cpus/aarch64/cortex_a76ae.S b/lib/cpus/aarch64/cortex_a76ae.S index 2fe3dbcf7..54af9a09e 100644 --- a/lib/cpus/aarch64/cortex_a76ae.S +++ b/lib/cpus/aarch64/cortex_a76ae.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, Arm Limited. All rights reserved. + * Copyright (c) 2019-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -21,6 +21,8 @@ #error "Cortex-A76AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a76ae + #if WORKAROUND_CVE_2022_23960 wa_cve_2022_23960_bhb_vector_table CORTEX_A76AE_BHB_LOOP_COUNT, cortex_a76ae #endif /* WORKAROUND_CVE_2022_23960 */ diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S index 1759b7760..7fb964d17 100644 --- a/lib/cpus/aarch64/cortex_a77.S +++ b/lib/cpus/aarch64/cortex_a77.S @@ -17,6 +17,8 @@ #error "Cortex-A77 must be compiled with HW_ASSISTED_COHERENCY enabled" #endif +cpu_reset_prologue cortex_a77 + /* 64-bit only core */ #if CTX_INCLUDE_AARCH32_REGS == 1 #error "Cortex-A77 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S index 9f2ffdf8f..a66214bca 100644 --- a/lib/cpus/aarch64/cortex_a78.S +++ b/lib/cpus/aarch64/cortex_a78.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, Arm Limited. All rights reserved. + * Copyright (c) 2019-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -24,6 +24,8 @@ wa_cve_2022_23960_bhb_vector_table CORTEX_A78_BHB_LOOP_COUNT, cortex_a78 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_a78 + /* Disable hardware page aggregation.Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_a78, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set CORTEX_A78_CPUECTLR_EL1, BIT(46) diff --git a/lib/cpus/aarch64/cortex_a78_ae.S b/lib/cpus/aarch64/cortex_a78_ae.S index 7fa1f9bcd..c537967a4 100644 --- a/lib/cpus/aarch64/cortex_a78_ae.S +++ b/lib/cpus/aarch64/cortex_a78_ae.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, Arm Limited. All rights reserved. + * Copyright (c) 2019-2025, Arm Limited. All rights reserved. * Copyright (c) 2021-2023, NVIDIA Corporation. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause @@ -22,6 +22,8 @@ wa_cve_2022_23960_bhb_vector_table CORTEX_A78_AE_BHB_LOOP_COUNT, cortex_a78_ae #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_a78_ae + /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_a78_ae, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set CORTEX_A78_AE_CPUECTLR_EL1, BIT(46) diff --git a/lib/cpus/aarch64/cortex_a78c.S b/lib/cpus/aarch64/cortex_a78c.S index 3f6944a7a..aba7d2593 100644 --- a/lib/cpus/aarch64/cortex_a78c.S +++ b/lib/cpus/aarch64/cortex_a78c.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, Arm Limited. All rights reserved. + * Copyright (c) 2021-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -21,6 +21,8 @@ wa_cve_2022_23960_bhb_vector_table CORTEX_A78C_BHB_LOOP_COUNT, cortex_a78c #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_a78c + /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_a78c, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set CORTEX_A78C_CPUECTLR_EL1, BIT(46) diff --git a/lib/cpus/aarch64/cortex_alto.S b/lib/cpus/aarch64/cortex_alto.S index 1422563ae..97192a6a0 100644 --- a/lib/cpus/aarch64/cortex_alto.S +++ b/lib/cpus/aarch64/cortex_alto.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, Arm Limited. All rights reserved. + * Copyright (c) 2024-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -25,6 +25,8 @@ #error "Travis needs ERRATA_SME_POWER_DOWN=1 to powerdown correctly" #endif +cpu_reset_prologue cortex_alto + cpu_reset_func_start cortex_alto /* Disable speculative loads */ msr SSBS, xzr diff --git a/lib/cpus/aarch64/cortex_arcadia.S b/lib/cpus/aarch64/cortex_arcadia.S index c97d87dbe..ae8eb91d1 100644 --- a/lib/cpus/aarch64/cortex_arcadia.S +++ b/lib/cpus/aarch64/cortex_arcadia.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, Arm Limited. All rights reserved. + * Copyright (c) 2024-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -21,6 +21,8 @@ #error "Cortex-ARCADIA supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_arcadia + cpu_reset_func_start cortex_arcadia /* Disable speculative loads */ msr SSBS, xzr diff --git a/lib/cpus/aarch64/cortex_gelas.S b/lib/cpus/aarch64/cortex_gelas.S index 709bb129b..cdf62841a 100644 --- a/lib/cpus/aarch64/cortex_gelas.S +++ b/lib/cpus/aarch64/cortex_gelas.S @@ -29,6 +29,8 @@ #error "Gelas needs ERRATA_SME_POWER_DOWN=1 to powerdown correctly" #endif +cpu_reset_prologue cortex_gelas + cpu_reset_func_start cortex_gelas /* ---------------------------------------------------- * Disable speculative loads diff --git a/lib/cpus/aarch64/cortex_x1.S b/lib/cpus/aarch64/cortex_x1.S index 5bd020c6c..27d181a02 100644 --- a/lib/cpus/aarch64/cortex_x1.S +++ b/lib/cpus/aarch64/cortex_x1.S @@ -23,6 +23,8 @@ wa_cve_2022_23960_bhb_vector_table CORTEX_X1_BHB_LOOP_COUNT, cortex_x1 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_x1 + /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_x1, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set CORTEX_X1_CPUECTLR_EL1, BIT(46) diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S index 547c430cd..549beec4f 100644 --- a/lib/cpus/aarch64/cortex_x2.S +++ b/lib/cpus/aarch64/cortex_x2.S @@ -25,7 +25,7 @@ .global check_erratum_cortex_x2_3701772 -add_erratum_entry cortex_x2, ERRATUM(3701772), ERRATA_X2_3701772, NO_APPLY_AT_RESET +add_erratum_entry cortex_x2, ERRATUM(3701772), ERRATA_X2_3701772 check_erratum_ls cortex_x2, ERRATUM(3701772), CPU_REV(2, 1) @@ -33,6 +33,8 @@ check_erratum_ls cortex_x2, ERRATUM(3701772), CPU_REV(2, 1) wa_cve_2022_23960_bhb_vector_table CORTEX_X2_BHB_LOOP_COUNT, cortex_x2 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_x2 + /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_x2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set CORTEX_X2_CPUECTLR_EL1, BIT(46) diff --git a/lib/cpus/aarch64/cortex_x3.S b/lib/cpus/aarch64/cortex_x3.S index f57a113d7..da9e30608 100644 --- a/lib/cpus/aarch64/cortex_x3.S +++ b/lib/cpus/aarch64/cortex_x3.S @@ -24,7 +24,7 @@ .global check_erratum_cortex_x3_3701769 -add_erratum_entry cortex_x3, ERRATUM(3701769), ERRATA_X3_3701769, NO_APPLY_AT_RESET +add_erratum_entry cortex_x3, ERRATUM(3701769), ERRATA_X3_3701769 check_erratum_ls cortex_x3, ERRATUM(3701769), CPU_REV(1, 2) @@ -32,6 +32,8 @@ check_erratum_ls cortex_x3, ERRATUM(3701769), CPU_REV(1, 2) wa_cve_2022_23960_bhb_vector_table CORTEX_X3_BHB_LOOP_COUNT, cortex_x3 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_x3 + /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_x3, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set CORTEX_X3_CPUECTLR_EL1, BIT(46) diff --git a/lib/cpus/aarch64/cortex_x4.S b/lib/cpus/aarch64/cortex_x4.S index 4b1cf9175..79a8d54a5 100644 --- a/lib/cpus/aarch64/cortex_x4.S +++ b/lib/cpus/aarch64/cortex_x4.S @@ -22,6 +22,8 @@ #error "Cortex X4 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_x4 + .global check_erratum_cortex_x4_2726228 .global check_erratum_cortex_x4_3701758 @@ -122,7 +124,7 @@ workaround_reset_end cortex_x4, CVE(2024, 7881) check_erratum_chosen cortex_x4, CVE(2024, 7881), WORKAROUND_CVE_2024_7881 -add_erratum_entry cortex_x4, ERRATUM(3701758), ERRATA_X4_3701758, NO_APPLY_AT_RESET +add_erratum_entry cortex_x4, ERRATUM(3701758), ERRATA_X4_3701758 check_erratum_ls cortex_x4, ERRATUM(3701758), CPU_REV(0, 3) diff --git a/lib/cpus/aarch64/cortex_x925.S b/lib/cpus/aarch64/cortex_x925.S index 5974c18d5..7dec3752e 100644 --- a/lib/cpus/aarch64/cortex_x925.S +++ b/lib/cpus/aarch64/cortex_x925.S @@ -21,9 +21,9 @@ #error "Cortex-X925 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif -.global check_erratum_cortex_x925_3701747 +cpu_reset_prologue cortex_x925 -add_erratum_entry cortex_x925, ERRATUM(3701747), ERRATA_X925_3701747, NO_APPLY_AT_RESET +add_erratum_entry cortex_x925, ERRATUM(3701747), ERRATA_X925_3701747 check_erratum_ls cortex_x925, ERRATUM(3701747), CPU_REV(0, 1) diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S index ca250d370..64158e720 100644 --- a/lib/cpus/aarch64/denver.S +++ b/lib/cpus/aarch64/denver.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved. * Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause @@ -13,6 +13,8 @@ #include #include +cpu_reset_prologue denver + /* ------------------------------------------------- * CVE-2017-5715 mitigation * diff --git a/lib/cpus/aarch64/generic.S b/lib/cpus/aarch64/generic.S index 849056f49..0a10eed1a 100644 --- a/lib/cpus/aarch64/generic.S +++ b/lib/cpus/aarch64/generic.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, Arm Limited. All rights reserved. + * Copyright (c) 2020-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -11,6 +11,8 @@ #include #include +cpu_reset_prologue generic + /* --------------------------------------------- * Disable L1 data cache and unified L2 cache * --------------------------------------------- diff --git a/lib/cpus/aarch64/neoverse_e1.S b/lib/cpus/aarch64/neoverse_e1.S index c6dd11783..f37bb2805 100644 --- a/lib/cpus/aarch64/neoverse_e1.S +++ b/lib/cpus/aarch64/neoverse_e1.S @@ -22,6 +22,8 @@ #error "Neoverse-E1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue neoverse_e1 + workaround_reset_start neoverse_e1, ERRATUM(936184), ERRATA_DSU_936184 errata_dsu_936184_wa_impl workaround_reset_end neoverse_e1, ERRATUM(936184) diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S index 638d0d3a3..1ad9557db 100644 --- a/lib/cpus/aarch64/neoverse_n1.S +++ b/lib/cpus/aarch64/neoverse_n1.S @@ -28,6 +28,8 @@ wa_cve_2022_23960_bhb_vector_table NEOVERSE_N1_BHB_LOOP_COUNT, neoverse_n1 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue neoverse_n1 + workaround_reset_start neoverse_n1, ERRATUM(936184), ERRATA_DSU_936184 errata_dsu_936184_wa_impl workaround_reset_end neoverse_n1, ERRATUM(936184) diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S index fd6877dab..9c6f01a49 100644 --- a/lib/cpus/aarch64/neoverse_n2.S +++ b/lib/cpus/aarch64/neoverse_n2.S @@ -23,7 +23,7 @@ .global check_erratum_neoverse_n2_3701773 -add_erratum_entry neoverse_n2, ERRATUM(3701773), ERRATA_N2_3701773, NO_APPLY_AT_RESET +add_erratum_entry neoverse_n2, ERRATUM(3701773), ERRATA_N2_3701773 check_erratum_ls neoverse_n2, ERRATUM(3701773), CPU_REV(0, 3) @@ -31,6 +31,8 @@ check_erratum_ls neoverse_n2, ERRATUM(3701773), CPU_REV(0, 3) wa_cve_2022_23960_bhb_vector_table NEOVERSE_N2_BHB_LOOP_COUNT, neoverse_n2 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue neoverse_n2 + workaround_reset_start neoverse_n2, ERRATUM(2313941), ERRATA_DSU_2313941 errata_dsu_2313941_wa_impl workaround_reset_end neoverse_n2, ERRATUM(2313941) diff --git a/lib/cpus/aarch64/neoverse_n3.S b/lib/cpus/aarch64/neoverse_n3.S index 8abcafeb9..1b7a3e154 100644 --- a/lib/cpus/aarch64/neoverse_n3.S +++ b/lib/cpus/aarch64/neoverse_n3.S @@ -21,9 +21,11 @@ #error "Neoverse-N3 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue neoverse_n3 + .global check_erratum_neoverse_n3_3699563 -add_erratum_entry neoverse_n3, ERRATUM(3699563), ERRATA_N3_3699563, NO_APPLY_AT_RESET +add_erratum_entry neoverse_n3, ERRATUM(3699563), ERRATA_N3_3699563 check_erratum_ls neoverse_n3, ERRATUM(3699563), CPU_REV(0, 0) diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S index f16f7e70b..e1e821490 100644 --- a/lib/cpus/aarch64/neoverse_v1.S +++ b/lib/cpus/aarch64/neoverse_v1.S @@ -22,6 +22,8 @@ #error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue neoverse_v1 + #if WORKAROUND_CVE_2022_23960 wa_cve_2022_23960_bhb_vector_table NEOVERSE_V1_BHB_LOOP_COUNT, neoverse_v1 #endif /* WORKAROUND_CVE_2022_23960 */ diff --git a/lib/cpus/aarch64/neoverse_v2.S b/lib/cpus/aarch64/neoverse_v2.S index b43f6dd19..06521ecb4 100644 --- a/lib/cpus/aarch64/neoverse_v2.S +++ b/lib/cpus/aarch64/neoverse_v2.S @@ -22,6 +22,8 @@ #error "Neoverse V2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue neoverse_v2 + /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start neoverse_v2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set NEOVERSE_V2_CPUECTLR_EL1, BIT(46) diff --git a/lib/cpus/aarch64/neoverse_v3.S b/lib/cpus/aarch64/neoverse_v3.S index dad37099c..29bfd0ead 100644 --- a/lib/cpus/aarch64/neoverse_v3.S +++ b/lib/cpus/aarch64/neoverse_v3.S @@ -22,9 +22,11 @@ #error "Neoverse V3 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue neoverse_v3 + .global check_erratum_neoverse_v3_3701767 -add_erratum_entry neoverse_v3, ERRATUM(3701767), ERRATA_V3_3701767, NO_APPLY_AT_RESET +add_erratum_entry neoverse_v3, ERRATUM(3701767), ERRATA_V3_3701767 check_erratum_ls neoverse_v3, ERRATUM(3701767), CPU_REV(0, 2) diff --git a/lib/cpus/aarch64/nevis.S b/lib/cpus/aarch64/nevis.S index 0180ab7d5..0d04e65ec 100644 --- a/lib/cpus/aarch64/nevis.S +++ b/lib/cpus/aarch64/nevis.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023-2024, Arm Limited. All rights reserved. + * Copyright (c) 2023-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -21,6 +21,8 @@ #error "Nevis supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue nevis + cpu_reset_func_start nevis /* ---------------------------------------------------- * Disable speculative loads diff --git a/lib/cpus/aarch64/qemu_max.S b/lib/cpus/aarch64/qemu_max.S index 529bb4f2a..a7273791e 100644 --- a/lib/cpus/aarch64/qemu_max.S +++ b/lib/cpus/aarch64/qemu_max.S @@ -8,6 +8,8 @@ #include #include +cpu_reset_prologue qemu_max + func qemu_max_core_pwr_dwn /* --------------------------------------------- * Disable the Data Cache. diff --git a/lib/cpus/aarch64/rainier.S b/lib/cpus/aarch64/rainier.S index ea687be6e..9ad93627d 100644 --- a/lib/cpus/aarch64/rainier.S +++ b/lib/cpus/aarch64/rainier.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, Arm Limited. All rights reserved. + * Copyright (c) 2020-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -21,6 +21,8 @@ #error "Rainier CPU supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue rainier + /* -------------------------------------------------- * Disable speculative loads if Rainier supports * SSBS. diff --git a/lib/cpus/aarch64/travis.S b/lib/cpus/aarch64/travis.S index 2e41668f4..0a95e8009 100644 --- a/lib/cpus/aarch64/travis.S +++ b/lib/cpus/aarch64/travis.S @@ -29,6 +29,8 @@ #error "Travis needs ERRATA_SME_POWER_DOWN=1 to powerdown correctly" #endif +cpu_reset_prologue travis + cpu_reset_func_start travis /* ---------------------------------------------------- * Disable speculative loads