From 5cba510ee31c520c6bd3254a4fd791d411d02152 Mon Sep 17 00:00:00 2001 From: Boyan Karatotev Date: Mon, 20 Jan 2025 14:54:35 +0000 Subject: [PATCH 01/10] fix(cpus): declare reset errata correctly The errata in this patch are declared as runtime, but are never called explicitly. This means that they are never called! Convert them to reset errata so that they are called at reset. Their SDENs entries have been checked and confirm that this is how they should be implemented. Also, drop the the MIDR check on the a57 erratum as it's not needed - the erratum is already called from a cpu-specific function. Change-Id: I22c3043ab454ce94b3c122c856e5804bc2ebb18b Signed-off-by: Boyan Karatotev --- lib/cpus/aarch64/cortex_a57.S | 4 ++-- lib/cpus/aarch64/cortex_x3.S | 8 ++++---- lib/cpus/aarch64/neoverse_n2.S | 8 ++++---- lib/cpus/aarch64/neoverse_v1.S | 6 +++--- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S index fecb56f4c..aac9c513c 100644 --- a/lib/cpus/aarch64/cortex_a57.S +++ b/lib/cpus/aarch64/cortex_a57.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved. * Copyright (c) 2020, NVIDIA Corporation. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause @@ -95,7 +95,7 @@ workaround_reset_end cortex_a57, ERRATUM(814670) check_erratum_ls cortex_a57, ERRATUM(814670), CPU_REV(0, 0) -workaround_runtime_start cortex_a57, ERRATUM(817169), ERRATA_A57_817169, CORTEX_A57_MIDR +workaround_runtime_start cortex_a57, ERRATUM(817169), ERRATA_A57_817169 /* Invalidate any TLB address */ mov x0, #0 tlbi vae3, x0 diff --git a/lib/cpus/aarch64/cortex_x3.S b/lib/cpus/aarch64/cortex_x3.S index 503a4e5b6..f57a113d7 100644 --- a/lib/cpus/aarch64/cortex_x3.S +++ b/lib/cpus/aarch64/cortex_x3.S @@ -52,9 +52,9 @@ workaround_reset_end cortex_x3, ERRATUM(2266875) check_erratum_ls cortex_x3, ERRATUM(2266875), CPU_REV(1, 0) -workaround_runtime_start cortex_x3, ERRATUM(2302506), ERRATA_X3_2302506 +workaround_reset_start cortex_x3, ERRATUM(2302506), ERRATA_X3_2302506 sysreg_bit_set CORTEX_X3_CPUACTLR2_EL1, BIT(0) -workaround_runtime_end cortex_x3, ERRATUM(2302506), NO_ISB +workaround_reset_end cortex_x3, ERRATUM(2302506) check_erratum_ls cortex_x3, ERRATUM(2302506), CPU_REV(1, 1) @@ -84,9 +84,9 @@ workaround_reset_end cortex_x3, ERRATUM(2615812) check_erratum_ls cortex_x3, ERRATUM(2615812), CPU_REV(1, 1) -workaround_runtime_start cortex_x3, ERRATUM(2641945), ERRATA_X3_2641945 +workaround_reset_start cortex_x3, ERRATUM(2641945), ERRATA_X3_2641945 sysreg_bit_set CORTEX_X3_CPUACTLR6_EL1, BIT(41) -workaround_runtime_end cortex_x3, ERRATUM(2641945), NO_ISB +workaround_reset_end cortex_x3, ERRATUM(2641945) check_erratum_ls cortex_x3, ERRATUM(2641945), CPU_REV(1, 0) diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S index ce7e8342c..9ffe98fbb 100644 --- a/lib/cpus/aarch64/neoverse_n2.S +++ b/lib/cpus/aarch64/neoverse_n2.S @@ -180,20 +180,20 @@ workaround_runtime_end neoverse_n2, ERRATUM(2326639) check_erratum_ls neoverse_n2, ERRATUM(2326639), CPU_REV(0, 0) -workaround_runtime_start neoverse_n2, ERRATUM(2340933), ERRATA_N2_2340933 +workaround_reset_start neoverse_n2, ERRATUM(2340933), ERRATA_N2_2340933 /* Set bit 61 in CPUACTLR5_EL1 */ sysreg_bit_set NEOVERSE_N2_CPUACTLR5_EL1, BIT(61) -workaround_runtime_end neoverse_n2, ERRATUM(2340933) +workaround_reset_end neoverse_n2, ERRATUM(2340933) check_erratum_ls neoverse_n2, ERRATUM(2340933), CPU_REV(0, 0) -workaround_runtime_start neoverse_n2, ERRATUM(2346952), ERRATA_N2_2346952 +workaround_reset_start neoverse_n2, ERRATUM(2346952), ERRATA_N2_2346952 /* Set TXREQ to STATIC and full L2 TQ size */ mrs x1, NEOVERSE_N2_CPUECTLR2_EL1 mov x0, #CPUECTLR2_EL1_TXREQ_STATIC_FULL bfi x1, x0, #CPUECTLR2_EL1_TXREQ_LSB, #CPUECTLR2_EL1_TXREQ_WIDTH msr NEOVERSE_N2_CPUECTLR2_EL1, x1 -workaround_runtime_end neoverse_n2, ERRATUM(2346952) +workaround_reset_end neoverse_n2, ERRATUM(2346952) check_erratum_ls neoverse_n2, ERRATUM(2346952), CPU_REV(0, 2) diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S index d1a2c24e9..f16f7e70b 100644 --- a/lib/cpus/aarch64/neoverse_v1.S +++ b/lib/cpus/aarch64/neoverse_v1.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, Arm Limited. All rights reserved. + * Copyright (c) 2019-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -203,10 +203,10 @@ workaround_reset_end neoverse_v1, ERRATUM(2294912) check_erratum_ls neoverse_v1, ERRATUM(2294912), CPU_REV(1, 2) -workaround_runtime_start neoverse_v1, ERRATUM(2348377), ERRATA_V1_2348377 +workaround_reset_start neoverse_v1, ERRATUM(2348377), ERRATA_V1_2348377 /* Set bit 61 in CPUACTLR5_EL1 */ sysreg_bit_set NEOVERSE_V1_ACTLR5_EL1, NEOVERSE_V1_ACTLR5_EL1_BIT_61 -workaround_runtime_end neoverse_v1, ERRATUM(2348377) +workaround_reset_end neoverse_v1, ERRATUM(2348377) check_erratum_ls neoverse_v1, ERRATUM(2348377), CPU_REV(1, 1) From 52e89e9e1d49400293d6ae11aea7ccf4c590d1fb Mon Sep 17 00:00:00 2001 From: Boyan Karatotev Date: Thu, 23 Jan 2025 16:17:52 +0000 Subject: [PATCH 02/10] refactor(cpus): convert the Cortex-A65 to use the errata framework Result was verified by manually stepping through the reset function with a debugger. Change-Id: I91cd6111ccf95d6b7ee2364ac2126cb98ee4bb15 Signed-off-by: Boyan Karatotev --- lib/cpus/aarch64/cortex_a65.S | 25 +++++++++++-------------- 1 file changed, 11 insertions(+), 14 deletions(-) diff --git a/lib/cpus/aarch64/cortex_a65.S b/lib/cpus/aarch64/cortex_a65.S index 3023ecbe5..b3c1726eb 100644 --- a/lib/cpus/aarch64/cortex_a65.S +++ b/lib/cpus/aarch64/cortex_a65.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, Arm Limited. All rights reserved. + * Copyright (c) 2019-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -22,20 +22,18 @@ #error "Cortex-A65 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif -/* ------------------------------------------------- - * The CPU Ops reset function for Cortex-A65. - * Shall clobber: x0-x19 - * ------------------------------------------------- +/* + * ERRATA_DSU_936184: + * The errata is defined in dsu_helpers.S and applies to neoverse_e1. + * Henceforth creating symbolic names to the already existing errata + * workaround functions to get them registered under the Errata Framework. */ -func cortex_a65_reset_func - mov x19, x30 +.equ check_erratum_neoverse_e1_936184, check_errata_dsu_936184 +.equ erratum_neoverse_e1_936184_wa, errata_dsu_936184_wa +add_erratum_entry neoverse_e1, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET -#if ERRATA_DSU_936184 - bl errata_dsu_936184_wa -#endif - - ret x19 -endfunc cortex_a65_reset_func +cpu_reset_func_start cortex_a65 +cpu_reset_func_end cortex_a65 func cortex_a65_cpu_pwr_dwn mrs x0, CORTEX_A65_CPUPWRCTLR_EL1 @@ -45,7 +43,6 @@ func cortex_a65_cpu_pwr_dwn ret endfunc cortex_a65_cpu_pwr_dwn - .section .rodata.cortex_a65_regs, "aS" cortex_a65_regs: /* The ascii list of register names to be reported */ .asciz "cpuectlr_el1", "" From b54771678d0db79c709a7b379b107ea4546b33fe Mon Sep 17 00:00:00 2001 From: Boyan Karatotev Date: Tue, 21 Jan 2025 08:16:34 +0000 Subject: [PATCH 03/10] refactor(cpus): convert checker functions to standard helpers The library check_erratum_ls already incorporates the check. The return of ERRATA_MISSING is handled in the errata_report.c functions. Change-Id: Ic1dff2bc5235195f7cfce1709cd42467f88b3e4c Signed-off-by: Boyan Karatotev --- lib/cpus/aarch64/cortex_a520.S | 17 ++--------------- lib/cpus/aarch64/cortex_x4.S | 15 +-------------- 2 files changed, 3 insertions(+), 29 deletions(-) diff --git a/lib/cpus/aarch64/cortex_a520.S b/lib/cpus/aarch64/cortex_a520.S index 811c836ab..d9e654b0f 100644 --- a/lib/cpus/aarch64/cortex_a520.S +++ b/lib/cpus/aarch64/cortex_a520.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, Arm Limited. All rights reserved. + * Copyright (c) 2021-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -39,20 +39,7 @@ check_erratum_ls cortex_a520, ERRATUM(2858100), CPU_REV(0, 1) workaround_runtime_start cortex_a520, ERRATUM(2938996), ERRATA_A520_2938996, CORTEX_A520_MIDR workaround_runtime_end cortex_a520, ERRATUM(2938996) -check_erratum_custom_start cortex_a520, ERRATUM(2938996) - - /* This erratum needs to be enabled for r0p0 and r0p1. - * Check if revision is less than or equal to r0p1. - */ - -#if ERRATA_A520_2938996 - mov x1, #1 - b cpu_rev_var_ls -#else - mov x0, #ERRATA_MISSING -#endif - ret -check_erratum_custom_end cortex_a520, ERRATUM(2938996) +check_erratum_ls cortex_a520, ERRATUM(2938996), CPU_REV(0, 1) /* ---------------------------------------------------- * HW will do the cache maintenance while powering down diff --git a/lib/cpus/aarch64/cortex_x4.S b/lib/cpus/aarch64/cortex_x4.S index 1e81892b1..4b1cf9175 100644 --- a/lib/cpus/aarch64/cortex_x4.S +++ b/lib/cpus/aarch64/cortex_x4.S @@ -32,20 +32,7 @@ workaround_runtime_start cortex_x4, ERRATUM(2726228), ERRATA_X4_2726228, CORTEX_X4_MIDR workaround_runtime_end cortex_x4, ERRATUM(2726228) -check_erratum_custom_start cortex_x4, ERRATUM(2726228) - - /* This erratum needs to be enabled for r0p0 and r0p1. - * Check if revision is less than or equal to r0p1. - */ - -#if ERRATA_X4_2726228 - mov x1, #1 - b cpu_rev_var_ls -#else - mov x0, #ERRATA_MISSING -#endif - ret -check_erratum_custom_end cortex_x4, ERRATUM(2726228) +check_erratum_ls cortex_x4, ERRATUM(2726228), CPU_REV(0, 1) /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_x4, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 From b62673c645752a78f649282cfa293e8da09e3bef Mon Sep 17 00:00:00 2001 From: Boyan Karatotev Date: Thu, 23 Jan 2025 15:27:30 +0000 Subject: [PATCH 04/10] refactor(cpus): register DSU errata with the errata framework's wrappers The existing DSU errata workarounds hijack the errata framework's inner workings to register with it. However, that is undesirable as any change to the framework may end up missing these workarounds. So convert the checks and workarounds to macros and have them included with the standard wrappers. The only problem with this is the is_scu_present_in_dsu weak function. Fortunately, it is only needed for 2 of the errata and only on 3 cores. So drop it, assuming the default behaviour and have the callers handle the exception. Change-Id: Iefa36325804ea093e938f867b9a6f49a6984b8ae Signed-off-by: Boyan Karatotev --- bl1/bl1.mk | 5 +- bl2/bl2.mk | 6 +- bl31/bl31.mk | 3 +- include/lib/cpus/aarch64/dsu_def.h | 20 +- include/lib/cpus/aarch64/dsu_macros.S | 97 +++++++++ include/lib/cpus/aarch64/neoverse_n_common.h | 18 -- lib/cpus/aarch64/cortex_a510.S | 20 +- lib/cpus/aarch64/cortex_a55.S | 35 +-- lib/cpus/aarch64/cortex_a65.S | 18 +- lib/cpus/aarch64/cortex_a65ae.S | 20 +- lib/cpus/aarch64/cortex_a710.S | 17 +- lib/cpus/aarch64/cortex_a75.S | 35 +-- lib/cpus/aarch64/cortex_a76.S | 35 +-- lib/cpus/aarch64/cortex_x2.S | 18 +- lib/cpus/aarch64/dsu_helpers.S | 217 ------------------- lib/cpus/aarch64/neoverse_e1.S | 22 +- lib/cpus/aarch64/neoverse_n1.S | 22 +- lib/cpus/aarch64/neoverse_n2.S | 20 +- lib/cpus/aarch64/neoverse_n_common.S | 26 --- plat/arm/board/arm_fpga/platform.mk | 3 +- plat/arm/board/fvp/platform.mk | 1 - plat/qemu/common/common.mk | 1 - 22 files changed, 256 insertions(+), 403 deletions(-) create mode 100644 include/lib/cpus/aarch64/dsu_macros.S delete mode 100644 include/lib/cpus/aarch64/neoverse_n_common.h delete mode 100644 lib/cpus/aarch64/dsu_helpers.S delete mode 100644 lib/cpus/aarch64/neoverse_n_common.S diff --git a/bl1/bl1.mk b/bl1/bl1.mk index a8a006163..c068ea54c 100644 --- a/bl1/bl1.mk +++ b/bl1/bl1.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved. +# Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # @@ -18,8 +18,7 @@ BL1_SOURCES += bl1/${ARCH}/bl1_arch_setup.c \ ${MBEDTLS_SOURCES} ifeq (${ARCH},aarch64) -BL1_SOURCES += lib/cpus/aarch64/dsu_helpers.S \ - lib/el3_runtime/aarch64/context.S \ +BL1_SOURCES += lib/el3_runtime/aarch64/context.S \ lib/cpus/errata_common.c endif diff --git a/bl2/bl2.mk b/bl2/bl2.mk index 850d82668..2a212e135 100644 --- a/bl2/bl2.mk +++ b/bl2/bl2.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved. +# Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # @@ -43,10 +43,6 @@ BL2_SOURCES += bl2/${ARCH}/bl2_el3_entrypoint.S \ bl2/${ARCH}/bl2_run_next_image.S \ lib/cpus/${ARCH}/cpu_helpers.S -ifeq (${ARCH},aarch64) -BL2_SOURCES += lib/cpus/aarch64/dsu_helpers.S -endif - BL2_DEFAULT_LINKER_SCRIPT_SOURCE := bl2/bl2_el3.ld.S endif diff --git a/bl31/bl31.mk b/bl31/bl31.mk index 2f9dc6501..9b2b139ae 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved. +# Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # @@ -43,7 +43,6 @@ BL31_SOURCES += bl31/bl31_main.c \ bl31/bl31_traps.c \ common/runtime_svc.c \ lib/cpus/errata_common.c \ - lib/cpus/aarch64/dsu_helpers.S \ plat/common/aarch64/platform_mp_stack.S \ services/arm_arch_svc/arm_arch_svc_setup.c \ services/std_svc/std_svc_setup.c \ diff --git a/include/lib/cpus/aarch64/dsu_def.h b/include/lib/cpus/aarch64/dsu_def.h index 51fbfd1db..78b3e7f1c 100644 --- a/include/lib/cpus/aarch64/dsu_def.h +++ b/include/lib/cpus/aarch64/dsu_def.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2022, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -40,7 +40,23 @@ ********************************************************************/ #define DSU_ERRATA_936184_MASK (U(0x3) << 15) +#define CPUCFR_EL1 S3_0_C15_C0_0 +/* SCU bit of CPU Configuration Register, EL1 */ +#define SCU_SHIFT U(2) + #ifndef __ASSEMBLER__ -void dsu_pwr_dwn(void); +DEFINE_RENAME_SYSREG_RW_FUNCS(clusterpwrctlr_el1, CLUSTERPWRCTLR_EL1); + +/* --------------------------------------------- + * controls power features of the cluster + * 1. Cache portion power not request + * 2. Disable the retention circuit + * --------------------------------------------- + */ +static inline void dsu_pwr_dwn(void) +{ + write_clusterpwrctlr_el1(0); + isb(); +} #endif #endif /* DSU_DEF_H */ diff --git a/include/lib/cpus/aarch64/dsu_macros.S b/include/lib/cpus/aarch64/dsu_macros.S new file mode 100644 index 000000000..6c8cb6904 --- /dev/null +++ b/include/lib/cpus/aarch64/dsu_macros.S @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2019-2025, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef DSU_MACROS_S +#define DSU_MACROS_S + +#include +#include +#include + +.macro check_errata_dsu_798953_impl + mov x2, #ERRATA_APPLIES + mov x3, #ERRATA_NOT_APPLIES + + /* Check if DSU is equal to r0p0 */ + mrs x1, CLUSTERIDR_EL1 + + /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */ + ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\ + #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS) + mov x1, #(0x0 << CLUSTERIDR_REV_SHIFT) + cmp x0, x1 + csel x0, x2, x3, EQ +.endm + +.macro errata_dsu_798953_wa_impl + /* If erratum applies, disable high-level clock gating */ + mrs x0, CLUSTERACTLR_EL1 + orr x0, x0, #CLUSTERACTLR_EL1_DISABLE_CLOCK_GATING + msr CLUSTERACTLR_EL1, x0 +.endm + +.macro branch_if_scu_not_present _target:req + /* Check if the SCU L3 Unit is present on the DSU */ + mrs x0, CPUCFR_EL1 + ubfx x0, x0, #SCU_SHIFT, #1 + eor x0, x0, #1 + /* If SCU is not present, return without applying patch */ + cmp x0, xzr + mov x0, #ERRATA_NOT_APPLIES + b.eq \_target +.endm + +.macro check_errata_dsu_936184_impl + mov x0, #ERRATA_NOT_APPLIES + /* Erratum applies only if DSU has the ACP interface */ + mrs x1, CLUSTERCFR_EL1 + ubfx x1, x1, #CLUSTERCFR_ACP_SHIFT, #1 + cbz x1, 1f + + /* If ACP is present, check if DSU is older than r2p0 */ + mrs x1, CLUSTERIDR_EL1 + + /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */ + ubfx x2, x1, #CLUSTERIDR_REV_SHIFT,\ + #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS) + cmp x2, #(0x2 << CLUSTERIDR_VAR_SHIFT) + b.hs 1f + mov x0, #ERRATA_APPLIES +1: +.endm + +.macro errata_dsu_936184_wa_impl + /* If erratum applies, we set a mask to a DSU control register */ + mrs x0, CLUSTERACTLR_EL1 + ldr x1, =DSU_ERRATA_936184_MASK + orr x0, x0, x1 + msr CLUSTERACTLR_EL1, x0 +.endm + +.macro check_errata_dsu_2313941_impl + mov x2, #ERRATA_APPLIES + mov x3, #ERRATA_NOT_APPLIES + + /* Check if DSU version is less than or equal to r3p1 */ + mrs x1, CLUSTERIDR_EL1 + + mov x0, #ERRATA_NOT_APPLIES + /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */ + ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\ + #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS) + mov x1, #(0x31 << CLUSTERIDR_REV_SHIFT) + cmp x0, x1 + csel x0, x2, x3, LS +1: +.endm + +.macro errata_dsu_2313941_wa_impl + /* If erratum applies, disable high-level clock gating */ + mrs x0, CLUSTERACTLR_EL1 + orr x0, x0, #CLUSTERACTLR_EL1_DISABLE_SCLK_GATING + msr CLUSTERACTLR_EL1, x0 +.endm +#endif /* DSU_MACROS_S */ diff --git a/include/lib/cpus/aarch64/neoverse_n_common.h b/include/lib/cpus/aarch64/neoverse_n_common.h deleted file mode 100644 index 7cb91cd05..000000000 --- a/include/lib/cpus/aarch64/neoverse_n_common.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright (c) 2020, Arm Limited. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef NEOVERSE_N_COMMON_H -#define NEOVERSE_N_COMMON_H - -/****************************************************************************** - * Neoverse Nx CPU Configuration register definitions - *****************************************************************************/ -#define CPUCFR_EL1 S3_0_C15_C0_0 - -/* SCU bit of CPU Configuration Register, EL1 */ -#define SCU_SHIFT U(2) - -#endif /* NEOVERSE_N_COMMON_H */ diff --git a/lib/cpus/aarch64/cortex_a510.S b/lib/cpus/aarch64/cortex_a510.S index b49d45a22..b399bbc5f 100644 --- a/lib/cpus/aarch64/cortex_a510.S +++ b/lib/cpus/aarch64/cortex_a510.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023-2024, Arm Limited. All rights reserved. + * Copyright (c) 2023-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -9,6 +9,7 @@ #include #include #include +#include #include /* Hardware handled coherency */ @@ -180,15 +181,14 @@ workaround_runtime_end cortex_a510, ERRATUM(2684597) check_erratum_ls cortex_a510, ERRATUM(2684597), CPU_REV(1, 2) -/* - * ERRATA_DSU_2313941 : - * The errata is defined in dsu_helpers.S but applies to cortex_a510 - * as well. Henceforth creating symbolic names to the already existing errata - * workaround functions to get them registered under the Errata Framework. - */ -.equ check_erratum_cortex_a510_2313941, check_errata_dsu_2313941 -.equ erratum_cortex_a510_2313941_wa, errata_dsu_2313941_wa -add_erratum_entry cortex_a510, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET +workaround_reset_start cortex_a510, ERRATUM(2313941), ERRATA_DSU_2313941 + errata_dsu_2313941_wa_impl +workaround_reset_end cortex_a510, ERRATUM(2313941) + +check_erratum_custom_start cortex_a510, ERRATUM(2313941) + check_errata_dsu_2313941_impl + ret +check_erratum_custom_end cortex_a510, ERRATUM(2313941) /* ---------------------------------------------------- * HW will do the cache maintenance while powering down diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S index d5a74e96d..f9e3edabc 100644 --- a/lib/cpus/aarch64/cortex_a55.S +++ b/lib/cpus/aarch64/cortex_a55.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -9,6 +9,7 @@ #include #include #include +#include #include /* Hardware handled coherency */ @@ -19,23 +20,23 @@ .globl cortex_a55_reset_func .globl cortex_a55_core_pwr_dwn -/* ERRATA_DSU_798953: - * The errata is defined in dsu_helpers.S but applies to cortex_a55 - * as well. Henceforth creating symbolic names to the already existing errata - * workaround functions to get them registered under the Errata Framework. - */ -.equ check_erratum_cortex_a55_798953, check_errata_dsu_798953 -.equ erratum_cortex_a55_798953_wa, errata_dsu_798953_wa -add_erratum_entry cortex_a55, ERRATUM(798953), ERRATA_DSU_798953, APPLY_AT_RESET +workaround_reset_start cortex_a55, ERRATUM(798953), ERRATA_DSU_798953 + errata_dsu_798953_wa_impl +workaround_reset_end cortex_a55, ERRATUM(798953) -/* ERRATA_DSU_936184: - * The errata is defined in dsu_helpers.S but applies to cortex_a55 - * as well. Henceforth creating symbolic names to the already existing errata - * workaround functions to get them registered under the Errata Framework. - */ -.equ check_erratum_cortex_a55_936184, check_errata_dsu_936184 -.equ erratum_cortex_a55_936184_wa, errata_dsu_936184_wa -add_erratum_entry cortex_a55, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET +check_erratum_custom_start cortex_a55, ERRATUM(798953) + check_errata_dsu_798953_impl + ret +check_erratum_custom_end cortex_a55, ERRATUM(798953) + +workaround_reset_start cortex_a55, ERRATUM(936184), ERRATA_DSU_936184 + errata_dsu_936184_wa_impl +workaround_reset_end cortex_a55, ERRATUM(936184) + +check_erratum_custom_start cortex_a55, ERRATUM(936184) + check_errata_dsu_936184_impl + ret +check_erratum_custom_end cortex_a55, ERRATUM(936184) workaround_reset_start cortex_a55, ERRATUM(768277), ERRATA_A55_768277 sysreg_bit_set CORTEX_A55_CPUACTLR_EL1, CORTEX_A55_CPUACTLR_EL1_DISABLE_DUAL_ISSUE diff --git a/lib/cpus/aarch64/cortex_a65.S b/lib/cpus/aarch64/cortex_a65.S index b3c1726eb..064e6f0ec 100644 --- a/lib/cpus/aarch64/cortex_a65.S +++ b/lib/cpus/aarch64/cortex_a65.S @@ -10,6 +10,7 @@ #include #include #include +#include #include /* Hardware handled coherency */ @@ -22,15 +23,14 @@ #error "Cortex-A65 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif -/* - * ERRATA_DSU_936184: - * The errata is defined in dsu_helpers.S and applies to neoverse_e1. - * Henceforth creating symbolic names to the already existing errata - * workaround functions to get them registered under the Errata Framework. - */ -.equ check_erratum_neoverse_e1_936184, check_errata_dsu_936184 -.equ erratum_neoverse_e1_936184_wa, errata_dsu_936184_wa -add_erratum_entry neoverse_e1, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET +workaround_reset_start cortex_a65, ERRATUM(936184), ERRATA_DSU_936184 + errata_dsu_936184_wa_impl +workaround_reset_end cortex_a65, ERRATUM(936184) + +check_erratum_custom_start cortex_a65, ERRATUM(936184) + check_errata_dsu_936184_impl + ret +check_erratum_custom_end cortex_a65, ERRATUM(936184) cpu_reset_func_start cortex_a65 cpu_reset_func_end cortex_a65 diff --git a/lib/cpus/aarch64/cortex_a65ae.S b/lib/cpus/aarch64/cortex_a65ae.S index 1cbb06aff..d2f9e49f1 100644 --- a/lib/cpus/aarch64/cortex_a65ae.S +++ b/lib/cpus/aarch64/cortex_a65ae.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, Arm Limited. All rights reserved. + * Copyright (c) 2019-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -11,6 +11,7 @@ #include #include #include +#include /* Hardware handled coherency */ #if !HW_ASSISTED_COHERENCY @@ -22,15 +23,14 @@ #error "Cortex-A65AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif - /* - * ERRATA_DSU_936184 : - * The errata is defined in dsu_helpers.S but applies to cortex_a65ae - * as well. Henceforth creating symbolic names to the already existing errata - * workaround functions to get them registered under the Errata Framework. - */ -.equ check_erratum_cortex_a65ae_936184, check_errata_dsu_936184 -.equ erratum_cortex_a65ae_936184_wa, errata_dsu_936184_wa -add_erratum_entry cortex_a65ae, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET +workaround_reset_start cortex_a65ae, ERRATUM(936184), ERRATA_DSU_936184 + errata_dsu_936184_wa_impl +workaround_reset_end cortex_a65ae, ERRATUM(936184) + +check_erratum_custom_start cortex_a65ae, ERRATUM(936184) + check_errata_dsu_936184_impl + ret +check_erratum_custom_end cortex_a65ae, ERRATUM(936184) cpu_reset_func_start cortex_a65ae cpu_reset_func_end cortex_a65ae diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S index c50a7d322..17163a150 100644 --- a/lib/cpus/aarch64/cortex_a710.S +++ b/lib/cpus/aarch64/cortex_a710.S @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "wa_cve_2022_23960_bhb_vector.S" @@ -173,14 +174,14 @@ workaround_runtime_end cortex_a710, ERRATUM(2291219), NO_ISB check_erratum_ls cortex_a710, ERRATUM(2291219), CPU_REV(2, 0) -/* - * ERRATA_DSU_2313941 is defined in dsu_helpers.S but applies to Cortex-A710 as - * well. Create a symbollic link to existing errata workaround to get them - * registered under the Errata Framework. - */ -.equ check_erratum_cortex_a710_2313941, check_errata_dsu_2313941 -.equ erratum_cortex_a710_2313941_wa, errata_dsu_2313941_wa -add_erratum_entry cortex_a710, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET +workaround_reset_start cortex_a710, ERRATUM(2313941), ERRATA_DSU_2313941 + errata_dsu_2313941_wa_impl +workaround_reset_end cortex_a710, ERRATUM(2313941) + +check_erratum_custom_start cortex_a710, ERRATUM(2313941) + check_errata_dsu_2313941_impl + ret +check_erratum_custom_end cortex_a710, ERRATUM(2313941) workaround_reset_start cortex_a710, ERRATUM(2371105), ERRATA_A710_2371105 /* Set bit 40 in CPUACTLR2_EL1 */ diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S index 152c81f68..336e00e5c 100644 --- a/lib/cpus/aarch64/cortex_a75.S +++ b/lib/cpus/aarch64/cortex_a75.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -9,6 +9,7 @@ #include #include #include +#include .global check_erratum_cortex_a75_764081 @@ -29,23 +30,23 @@ workaround_reset_end cortex_a75, ERRATUM(790748) check_erratum_ls cortex_a75, ERRATUM(790748), CPU_REV(0, 0) -/* ERRATA_DSU_798953 : - * The errata is defined in dsu_helpers.S but applies to cortex_a75 - * as well. Henceforth creating symbolic names to the already existing errata - * workaround functions to get them registered under the Errata Framework. - */ -.equ check_erratum_cortex_a75_798953, check_errata_dsu_798953 -.equ erratum_cortex_a75_798953_wa, errata_dsu_798953_wa -add_erratum_entry cortex_a75, ERRATUM(798953), ERRATA_DSU_798953, APPLY_AT_RESET +workaround_reset_start cortex_a75, ERRATUM(798953), ERRATA_DSU_798953 + errata_dsu_798953_wa_impl +workaround_reset_end cortex_a75, ERRATUM(798953) -/* ERRATA_DSU_936184 : - * The errata is defined in dsu_helpers.S but applies to cortex_a75 - * as well. Henceforth creating symbolic names to the already existing errata - * workaround functions to get them registered under the Errata Framework. - */ -.equ check_erratum_cortex_a75_936184, check_errata_dsu_936184 -.equ erratum_cortex_a75_936184_wa, errata_dsu_936184_wa -add_erratum_entry cortex_a75, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET +check_erratum_custom_start cortex_a75, ERRATUM(798953) + check_errata_dsu_798953_impl + ret +check_erratum_custom_end cortex_a75, ERRATUM(798953) + +workaround_reset_start cortex_a75, ERRATUM(936184), ERRATA_DSU_936184 + errata_dsu_936184_wa_impl +workaround_reset_end cortex_a75, ERRATUM(936184) + +check_erratum_custom_start cortex_a75, ERRATUM(936184) + check_errata_dsu_936184_impl + ret +check_erratum_custom_end cortex_a75, ERRATUM(936184) workaround_reset_start cortex_a75, CVE(2017, 5715), WORKAROUND_CVE_2017_5715 #if IMAGE_BL31 diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S index 017086aa9..1fd078959 100644 --- a/lib/cpus/aarch64/cortex_a76.S +++ b/lib/cpus/aarch64/cortex_a76.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include "wa_cve_2022_23960_bhb.S" @@ -431,23 +432,23 @@ check_erratum_chosen cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960 /* erratum has no workaround in the cpu. Generic code must take care */ add_erratum_entry cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960, NO_APPLY_AT_RESET -/* ERRATA_DSU_798953 : - * The errata is defined in dsu_helpers.S but applies to cortex_a76 - * as well. Henceforth creating symbolic names to the already existing errata - * workaround functions to get them registered under the Errata Framework. - */ -.equ check_erratum_cortex_a76_798953, check_errata_dsu_798953 -.equ erratum_cortex_a76_798953_wa, errata_dsu_798953_wa -add_erratum_entry cortex_a76, ERRATUM(798953), ERRATA_DSU_798953, APPLY_AT_RESET +workaround_reset_start cortex_a76, ERRATUM(798953), ERRATA_DSU_798953 + errata_dsu_798953_wa_impl +workaround_reset_end cortex_a76, ERRATUM(798953) -/* ERRATA_DSU_936184 : - * The errata is defined in dsu_helpers.S but applies to cortex_a76 - * as well. Henceforth creating symbolic names to the already existing errata - * workaround functions to get them registered under the Errata Framework. - */ -.equ check_erratum_cortex_a76_936184, check_errata_dsu_936184 -.equ erratum_cortex_a76_936184_wa, errata_dsu_936184_wa -add_erratum_entry cortex_a76, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET +check_erratum_custom_start cortex_a76, ERRATUM(798953) + check_errata_dsu_798953_impl + ret +check_erratum_custom_end cortex_a76, ERRATUM(798953) + +workaround_reset_start cortex_a76, ERRATUM(936184), ERRATA_DSU_936184 + errata_dsu_936184_wa_impl +workaround_reset_end cortex_a76, ERRATUM(936184) + +check_erratum_custom_start cortex_a76, ERRATUM(936184) + check_errata_dsu_936184_impl + ret +check_erratum_custom_end cortex_a76, ERRATUM(936184) cpu_reset_func_start cortex_a76 diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S index c18ce3c0f..547c430cd 100644 --- a/lib/cpus/aarch64/cortex_x2.S +++ b/lib/cpus/aarch64/cortex_x2.S @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "wa_cve_2022_23960_bhb_vector.S" @@ -164,15 +165,14 @@ workaround_reset_end cortex_x2, CVE(2022, 23960) check_erratum_chosen cortex_x2, CVE(2022, 23960), WORKAROUND_CVE_2022_23960 -/* - * ERRATA_DSU_2313941 : - * The errata is defined in dsu_helpers.S but applies to cortex_x2 - * as well. Henceforth creating symbolic names to the already existing errata - * workaround functions to get them registered under the Errata Framework. - */ -.equ check_erratum_cortex_x2_2313941, check_errata_dsu_2313941 -.equ erratum_cortex_x2_2313941_wa, errata_dsu_2313941_wa -add_erratum_entry cortex_x2, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET +workaround_reset_start cortex_x2, ERRATUM(2313941), ERRATA_DSU_2313941 + errata_dsu_2313941_wa_impl +workaround_reset_end cortex_x2, ERRATUM(2313941) + +check_erratum_custom_start cortex_x2, ERRATUM(2313941) + check_errata_dsu_2313941_impl + ret +check_erratum_custom_end cortex_x2, ERRATUM(2313941) /* ---------------------------------------------------- * HW will do the cache maintenance while powering down diff --git a/lib/cpus/aarch64/dsu_helpers.S b/lib/cpus/aarch64/dsu_helpers.S deleted file mode 100644 index 3c5bf2ea1..000000000 --- a/lib/cpus/aarch64/dsu_helpers.S +++ /dev/null @@ -1,217 +0,0 @@ -/* - * Copyright (c) 2019-2023, Arm Limited and Contributors. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include -#include -#include - - /* ----------------------------------------------------------------------- - * DSU erratum 798953 check function - * Checks the DSU variant, revision and configuration to determine if - * the erratum applies. Erratum applies on all configurations of the - * DSU and if revision-variant is r0p0. - * - * The erratum was fixed in r0p1. - * - * This function is called from both assembly and C environment. So it - * follows AAPCS. - * - * Clobbers: x0-x3 - * ----------------------------------------------------------------------- - */ - .globl check_errata_dsu_798953 - .globl errata_dsu_798953_wa - .globl dsu_pwr_dwn - -func check_errata_dsu_798953 - mov x2, #ERRATA_APPLIES - mov x3, #ERRATA_NOT_APPLIES - - /* Check if DSU is equal to r0p0 */ - mrs x1, CLUSTERIDR_EL1 - - /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */ - ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\ - #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS) - mov x1, #(0x0 << CLUSTERIDR_REV_SHIFT) - cmp x0, x1 - csel x0, x2, x3, EQ - ret -endfunc check_errata_dsu_798953 - - /* -------------------------------------------------- - * Errata Workaround for DSU erratum #798953. - * - * Can clobber only: x0-x8 - * -------------------------------------------------- - */ -func errata_dsu_798953_wa - mov x8, x30 - bl check_errata_dsu_798953 - cbz x0, 1f - - /* If erratum applies, disable high-level clock gating */ - mrs x0, CLUSTERACTLR_EL1 - orr x0, x0, #CLUSTERACTLR_EL1_DISABLE_CLOCK_GATING - msr CLUSTERACTLR_EL1, x0 - isb -1: - ret x8 -endfunc errata_dsu_798953_wa - - /* ----------------------------------------------------------------------- - * DSU erratum 936184 check function - * Checks the DSU variant, revision and configuration to determine if - * the erratum applies. Erratum applies if ACP interface is present - * in the DSU and revision-variant < r2p0. - * - * The erratum was fixed in r2p0. - * - * This function is called from both assembly and C environment. So it - * follows AAPCS. - * - * Clobbers: x0-x4 - * ----------------------------------------------------------------------- - */ - .globl check_errata_dsu_936184 - .globl errata_dsu_936184_wa - .weak is_scu_present_in_dsu - - /* -------------------------------------------------------------------- - * Default behaviour respresents SCU is always present with DSU. - * CPUs can override this definition if required. - * - * Can clobber only: x0-x3 - * -------------------------------------------------------------------- - */ -func is_scu_present_in_dsu - mov x0, #1 - ret -endfunc is_scu_present_in_dsu - -func check_errata_dsu_936184 - mov x4, x30 - bl is_scu_present_in_dsu - cmp x0, xzr - /* Default error status */ - mov x0, #ERRATA_NOT_APPLIES - - /* If SCU is not present, return without applying patch */ - b.eq 1f - - /* Erratum applies only if DSU has the ACP interface */ - mrs x1, CLUSTERCFR_EL1 - ubfx x1, x1, #CLUSTERCFR_ACP_SHIFT, #1 - cbz x1, 1f - - /* If ACP is present, check if DSU is older than r2p0 */ - mrs x1, CLUSTERIDR_EL1 - - /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */ - ubfx x2, x1, #CLUSTERIDR_REV_SHIFT,\ - #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS) - cmp x2, #(0x2 << CLUSTERIDR_VAR_SHIFT) - b.hs 1f - mov x0, #ERRATA_APPLIES -1: - ret x4 -endfunc check_errata_dsu_936184 - - /* -------------------------------------------------- - * Errata Workaround for DSU erratum #936184. - * - * Can clobber only: x0-x8 - * -------------------------------------------------- - */ -func errata_dsu_936184_wa - mov x8, x30 - bl check_errata_dsu_936184 - cbz x0, 1f - - /* If erratum applies, we set a mask to a DSU control register */ - mrs x0, CLUSTERACTLR_EL1 - ldr x1, =DSU_ERRATA_936184_MASK - orr x0, x0, x1 - msr CLUSTERACTLR_EL1, x0 - isb -1: - ret x8 -endfunc errata_dsu_936184_wa - - /* ----------------------------------------------------------------------- - * DSU erratum 2313941 check function - * Checks the DSU variant, revision and configuration to determine if - * the erratum applies. Erratum applies on all configurations of the - * DSU and if revision-variant is r0p0, r1p0, r2p0, r2p1, r3p0, r3p1. - * - * The erratum is still open. - * - * This function is called from both assembly and C environment. So it - * follows AAPCS. - * - * Clobbers: x0-x4 - * ----------------------------------------------------------------------- - */ - .globl check_errata_dsu_2313941 - .globl errata_dsu_2313941_wa - -func check_errata_dsu_2313941 - mov x4, x30 - bl is_scu_present_in_dsu - cmp x0, xzr - /* Default error status */ - mov x0, #ERRATA_NOT_APPLIES - - /* If SCU is not present, return without applying patch */ - b.eq 1f - - mov x2, #ERRATA_APPLIES - mov x3, #ERRATA_NOT_APPLIES - - /* Check if DSU version is less than or equal to r3p1 */ - mrs x1, CLUSTERIDR_EL1 - - /* DSU variant and revision bitfields in CLUSTERIDR are adjacent */ - ubfx x0, x1, #CLUSTERIDR_REV_SHIFT,\ - #(CLUSTERIDR_REV_BITS + CLUSTERIDR_VAR_BITS) - mov x1, #(0x31 << CLUSTERIDR_REV_SHIFT) - cmp x0, x1 - csel x0, x2, x3, LS -1: - ret x4 -endfunc check_errata_dsu_2313941 - - /* -------------------------------------------------- - * Errata Workaround for DSU erratum #2313941. - * - * Can clobber only: x0-x8 - * -------------------------------------------------- - */ -func errata_dsu_2313941_wa - mov x8, x30 - bl check_errata_dsu_2313941 - cbz x0, 1f - - /* If erratum applies, disable high-level clock gating */ - mrs x0, CLUSTERACTLR_EL1 - orr x0, x0, #CLUSTERACTLR_EL1_DISABLE_SCLK_GATING - msr CLUSTERACTLR_EL1, x0 - isb -1: - ret x8 -endfunc errata_dsu_2313941_wa - - /* --------------------------------------------- - * controls power features of the cluster - * 1. Cache portion power not request - * 2. Disable the retention circuit - * --------------------------------------------- - */ -func dsu_pwr_dwn - msr CLUSTERPWRCTLR_EL1, xzr - isb - ret -endfunc dsu_pwr_dwn diff --git a/lib/cpus/aarch64/neoverse_e1.S b/lib/cpus/aarch64/neoverse_e1.S index 4bc95d054..c6dd11783 100644 --- a/lib/cpus/aarch64/neoverse_e1.S +++ b/lib/cpus/aarch64/neoverse_e1.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -21,15 +22,16 @@ #error "Neoverse-E1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif -/* - * ERRATA_DSU_936184: - * The errata is defined in dsu_helpers.S and applies to neoverse_e1. - * Henceforth creating symbolic names to the already existing errata - * workaround functions to get them registered under the Errata Framework. - */ -.equ check_erratum_neoverse_e1_936184, check_errata_dsu_936184 -.equ erratum_neoverse_e1_936184_wa, errata_dsu_936184_wa -add_erratum_entry neoverse_e1, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET +workaround_reset_start neoverse_e1, ERRATUM(936184), ERRATA_DSU_936184 + errata_dsu_936184_wa_impl +workaround_reset_end neoverse_e1, ERRATUM(936184) + +check_erratum_custom_start neoverse_e1, ERRATUM(936184) + branch_if_scu_not_present 2f /* label 1 is used in the macro */ + check_errata_dsu_936184_impl + 2: + ret +check_erratum_custom_end neoverse_e1, ERRATUM(936184) cpu_reset_func_start neoverse_e1 cpu_reset_func_end neoverse_e1 diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S index f727226bd..638d0d3a3 100644 --- a/lib/cpus/aarch64/neoverse_n1.S +++ b/lib/cpus/aarch64/neoverse_n1.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -8,6 +8,7 @@ #include #include #include +#include #include #include "wa_cve_2022_23960_bhb_vector.S" @@ -27,15 +28,16 @@ wa_cve_2022_23960_bhb_vector_table NEOVERSE_N1_BHB_LOOP_COUNT, neoverse_n1 #endif /* WORKAROUND_CVE_2022_23960 */ -/* - * ERRATA_DSU_936184: - * The errata is defined in dsu_helpers.S and applies to Neoverse N1. - * Henceforth creating symbolic names to the already existing errata - * workaround functions to get them registered under the Errata Framework. - */ -.equ check_erratum_neoverse_n1_936184, check_errata_dsu_936184 -.equ erratum_neoverse_n1_936184_wa, errata_dsu_936184_wa -add_erratum_entry neoverse_n1, ERRATUM(936184), ERRATA_DSU_936184, APPLY_AT_RESET +workaround_reset_start neoverse_n1, ERRATUM(936184), ERRATA_DSU_936184 + errata_dsu_936184_wa_impl +workaround_reset_end neoverse_n1, ERRATUM(936184) + +check_erratum_custom_start neoverse_n1, ERRATUM(936184) + branch_if_scu_not_present 2f /* label 1 is used in the macro */ + check_errata_dsu_936184_impl + 2: + ret +check_erratum_custom_end neoverse_n1, ERRATUM(936184) workaround_reset_start neoverse_n1, ERRATUM(1043202), ERRATA_N1_1043202 /* Apply instruction patching sequence */ diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S index 9ffe98fbb..fd6877dab 100644 --- a/lib/cpus/aarch64/neoverse_n2.S +++ b/lib/cpus/aarch64/neoverse_n2.S @@ -7,6 +7,7 @@ #include #include #include +#include #include #include "wa_cve_2022_23960_bhb_vector.S" @@ -30,15 +31,16 @@ check_erratum_ls neoverse_n2, ERRATUM(3701773), CPU_REV(0, 3) wa_cve_2022_23960_bhb_vector_table NEOVERSE_N2_BHB_LOOP_COUNT, neoverse_n2 #endif /* WORKAROUND_CVE_2022_23960 */ -/* - * ERRATA_DSU_2313941: - * The errata is defined in dsu_helpers.S and applies to Neoverse N2. - * Henceforth creating symbolic names to the already existing errata - * workaround functions to get them registered under the Errata Framework. - */ -.equ check_erratum_neoverse_n2_2313941, check_errata_dsu_2313941 -.equ erratum_neoverse_n2_2313941_wa, errata_dsu_2313941_wa -add_erratum_entry neoverse_n2, ERRATUM(2313941), ERRATA_DSU_2313941, APPLY_AT_RESET +workaround_reset_start neoverse_n2, ERRATUM(2313941), ERRATA_DSU_2313941 + errata_dsu_2313941_wa_impl +workaround_reset_end neoverse_n2, ERRATUM(2313941) + +check_erratum_custom_start neoverse_n2, ERRATUM(2313941) + branch_if_scu_not_present 2f /* label 1 is used in the macro */ + check_errata_dsu_2313941_impl + 2: + ret +check_erratum_custom_end neoverse_n2, ERRATUM(2313941) /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start neoverse_n2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 diff --git a/lib/cpus/aarch64/neoverse_n_common.S b/lib/cpus/aarch64/neoverse_n_common.S deleted file mode 100644 index b816342ba..000000000 --- a/lib/cpus/aarch64/neoverse_n_common.S +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (c) 2020, Arm Limited. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include -#include - - .global is_scu_present_in_dsu - -/* - * Check if the SCU L3 Unit is present on the DSU - * 1-> SCU present - * 0-> SCU not present - * - * This function is implemented as weak on dsu_helpers.S and must be - * overwritten for Neoverse Nx cores. - */ - -func is_scu_present_in_dsu - mrs x0, CPUCFR_EL1 - ubfx x0, x0, #SCU_SHIFT, #1 - eor x0, x0, #1 - ret -endfunc is_scu_present_in_dsu diff --git a/plat/arm/board/arm_fpga/platform.mk b/plat/arm/board/arm_fpga/platform.mk index 967bf2171..31835f171 100644 --- a/plat/arm/board/arm_fpga/platform.mk +++ b/plat/arm/board/arm_fpga/platform.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2021-2024, Arm Limited. All rights reserved. +# Copyright (c) 2021-2025, Arm Limited. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # @@ -78,7 +78,6 @@ else lib/cpus/aarch64/cortex_a720.S \ lib/cpus/aarch64/cortex_x3.S \ lib/cpus/aarch64/cortex_x4.S \ - lib/cpus/aarch64/neoverse_n_common.S \ lib/cpus/aarch64/neoverse_n1.S \ lib/cpus/aarch64/neoverse_n2.S \ lib/cpus/aarch64/neoverse_v1.S \ diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk index 7b55571b0..7bd3e742a 100644 --- a/plat/arm/board/fvp/platform.mk +++ b/plat/arm/board/fvp/platform.mk @@ -210,7 +210,6 @@ else lib/cpus/aarch64/cortex_a715.S \ lib/cpus/aarch64/cortex_a720.S \ lib/cpus/aarch64/cortex_a720_ae.S \ - lib/cpus/aarch64/neoverse_n_common.S \ lib/cpus/aarch64/neoverse_n1.S \ lib/cpus/aarch64/neoverse_n2.S \ lib/cpus/aarch64/neoverse_v1.S \ diff --git a/plat/qemu/common/common.mk b/plat/qemu/common/common.mk index 2dc89bccb..da981e5c9 100644 --- a/plat/qemu/common/common.mk +++ b/plat/qemu/common/common.mk @@ -23,7 +23,6 @@ QEMU_CPU_LIBS := lib/cpus/aarch64/aem_generic.S \ lib/cpus/aarch64/cortex_a72.S \ lib/cpus/aarch64/cortex_a76.S \ lib/cpus/aarch64/cortex_a710.S \ - lib/cpus/aarch64/neoverse_n_common.S \ lib/cpus/aarch64/neoverse_n1.S \ lib/cpus/aarch64/neoverse_v1.S \ lib/cpus/aarch64/neoverse_n2.S \ From 7791ce21a6dbbe57952bbaa04e636d6aa6caf4fe Mon Sep 17 00:00:00 2001 From: Boyan Karatotev Date: Tue, 21 Jan 2025 08:44:52 +0000 Subject: [PATCH 05/10] perf(cpus): inline cpu_rev_var checks We strive to apply errata as close to reset as possible with as few things enabled as possible. Importantly, the I-cache will not be enabled. This means that repeated branches to these tiny functions must be re-fetched all the way from memory each time which has glacial speed. Cores are allowed to fetch things ahead of time though as long as execution is fairly linear. So we can trade a little bit of space (3 to 7 instructions per erratum) to keep things linear and not have to go to memory. While we're at it, optimise the the cpu_rev_var_{ls, hs, range} functions to take up less space. Dropping the moves allows for a bit of assembly magic that produces the same result in 2 and 3 instructions respectively. Change-Id: I51608352f23b2244ea7a99e76c10892d257f12bf Signed-off-by: Boyan Karatotev --- include/lib/cpus/aarch64/cpu_macros.S | 51 +++++++++++++++++++------ lib/cpus/aarch64/cortex_a55.S | 3 +- lib/cpus/aarch64/cortex_a76.S | 10 ++--- lib/cpus/aarch64/cortex_a77.S | 5 +-- lib/cpus/aarch64/cpu_helpers.S | 55 +-------------------------- 5 files changed, 48 insertions(+), 76 deletions(-) diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S index ac26fd74b..5a8219e27 100644 --- a/include/lib/cpus/aarch64/cpu_macros.S +++ b/include/lib/cpus/aarch64/cpu_macros.S @@ -479,8 +479,35 @@ .endm /* - * Helpers to select which revisions errata apply to. Don't leave a link - * register as the cpu_rev_var_*** will call the ret and we can save on one. + * Helpers to report if an erratum applies. Compares the given revision variant + * to the given value. Return ERRATA_APPLIES or ERRATA_NOT_APPLIES accordingly. + * + * _rev_num: the given revision variant. Or + * _rev_num_lo,_rev_num_hi: the lower and upper bounds of the revision variant + * + * in body: + * clobber: x0 + * argument: x0 - cpu_rev_var + */ +.macro cpu_rev_var_ls _rev_num:req + cmp x0, #\_rev_num + cset x0, ls +.endm + +.macro cpu_rev_var_hs _rev_num:req + cmp x0, #\_rev_num + cset x0, hs +.endm + +.macro cpu_rev_var_range _rev_num_lo:req, _rev_num_hi:req + cmp x0, #\_rev_num_lo + mov x1, #\_rev_num_hi + ccmp x0, x1, #2, hs + cset x0, ls +.endm + +/* + * Helpers to select which revisions errata apply to. * * _cpu: * Name of cpu as given to declare_cpu_ops @@ -496,28 +523,27 @@ * Revision to apply to * * in body: - * clobber: x0 to x4 + * clobber: x0 to x1 * argument: x0 - cpu_rev_var */ .macro check_erratum_ls _cpu:req, _cve:req, _id:req, _rev_num:req func check_erratum_\_cpu\()_\_id - mov x1, #\_rev_num - b cpu_rev_var_ls + cpu_rev_var_ls \_rev_num + ret endfunc check_erratum_\_cpu\()_\_id .endm .macro check_erratum_hs _cpu:req, _cve:req, _id:req, _rev_num:req func check_erratum_\_cpu\()_\_id - mov x1, #\_rev_num - b cpu_rev_var_hs + cpu_rev_var_hs \_rev_num + ret endfunc check_erratum_\_cpu\()_\_id .endm .macro check_erratum_range _cpu:req, _cve:req, _id:req, _rev_num_lo:req, _rev_num_hi:req func check_erratum_\_cpu\()_\_id - mov x1, #\_rev_num_lo - mov x2, #\_rev_num_hi - b cpu_rev_var_range + cpu_rev_var_range \_rev_num_lo, \_rev_num_hi + ret endfunc check_erratum_\_cpu\()_\_id .endm @@ -532,7 +558,10 @@ endfunc check_erratum_\_cpu\()_\_id .endm -/* provide a shorthand for the name format for annoying errata */ +/* + * provide a shorthand for the name format for annoying errata + * body: clobber x0 to x3 + */ .macro check_erratum_custom_start _cpu:req, _cve:req, _id:req func check_erratum_\_cpu\()_\_id .endm diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S index f9e3edabc..2267d667b 100644 --- a/lib/cpus/aarch64/cortex_a55.S +++ b/lib/cpus/aarch64/cortex_a55.S @@ -51,8 +51,7 @@ workaround_reset_end cortex_a55, ERRATUM(778703) check_erratum_custom_start cortex_a55, ERRATUM(778703) mov x16, x30 - mov x1, #0x00 - bl cpu_rev_var_ls + cpu_rev_var_ls CPU_REV(0, 0) /* * Check that no private L2 cache is configured */ diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S index 1fd078959..589edceda 100644 --- a/lib/cpus/aarch64/cortex_a76.S +++ b/lib/cpus/aarch64/cortex_a76.S @@ -345,11 +345,10 @@ check_erratum_ls cortex_a76, ERRATUM(1275112), CPU_REV(3, 0) check_erratum_custom_start cortex_a76, ERRATUM(1286807) #if ERRATA_A76_1286807 mov x0, #ERRATA_APPLIES - ret #else - mov x1, #0x30 - b cpu_rev_var_ls + cpu_rev_var_ls CPU_REV(3, 0) #endif + ret check_erratum_custom_end cortex_a76, ERRATUM(1286807) workaround_reset_start cortex_a76, ERRATUM(1791580), ERRATA_A76_1791580 @@ -420,11 +419,10 @@ endfunc cortex_a76_disable_wa_cve_2018_3639 check_erratum_custom_start cortex_a76, ERRATUM(1165522) #if ERRATA_A76_1165522 mov x0, #ERRATA_APPLIES - ret #else - mov x1, #0x30 - b cpu_rev_var_ls + cpu_rev_var_ls CPU_REV(3, 0) #endif + ret check_erratum_custom_end cortex_a76, ERRATUM(1165522) check_erratum_chosen cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960 diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S index 766bdc0c9..1759b7760 100644 --- a/lib/cpus/aarch64/cortex_a77.S +++ b/lib/cpus/aarch64/cortex_a77.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2018-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -36,8 +36,7 @@ check_erratum_ls cortex_a77, CVE(2024, 5660), CPU_REV(1, 1) workaround_reset_start cortex_a77, ERRATUM(1508412), ERRATA_A77_1508412 /* move cpu revision in again and compare against r0p0 */ mov x0, x7 - mov x1, #CPU_REV(0, 0) - bl cpu_rev_var_ls + cpu_rev_var_ls CPU_REV(0, 0) cbz x0, 1f ldr x0, =0x0 diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S index 0f9a3b8cb..537f71536 100644 --- a/lib/cpus/aarch64/cpu_helpers.S +++ b/lib/cpus/aarch64/cpu_helpers.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2023, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -236,59 +236,6 @@ func cpu_get_rev_var ret endfunc cpu_get_rev_var -/* - * Compare the CPU's revision-variant (x0) with a given value (x1), for errata - * application purposes. If the revision-variant is less than or same as a given - * value, indicates that errata applies; otherwise not. - * - * Shall clobber: x0-x3 - */ - .globl cpu_rev_var_ls -func cpu_rev_var_ls - mov x2, #ERRATA_APPLIES - mov x3, #ERRATA_NOT_APPLIES - cmp x0, x1 - csel x0, x2, x3, ls - ret -endfunc cpu_rev_var_ls - -/* - * Compare the CPU's revision-variant (x0) with a given value (x1), for errata - * application purposes. If the revision-variant is higher than or same as a - * given value, indicates that errata applies; otherwise not. - * - * Shall clobber: x0-x3 - */ - .globl cpu_rev_var_hs -func cpu_rev_var_hs - mov x2, #ERRATA_APPLIES - mov x3, #ERRATA_NOT_APPLIES - cmp x0, x1 - csel x0, x2, x3, hs - ret -endfunc cpu_rev_var_hs - -/* - * Compare the CPU's revision-variant (x0) with a given range (x1 - x2), for errata - * application purposes. If the revision-variant is between or includes the given - * values, this indicates that errata applies; otherwise not. - * - * Shall clobber: x0-x4 - */ - .globl cpu_rev_var_range -func cpu_rev_var_range - mov x3, #ERRATA_APPLIES - mov x4, #ERRATA_NOT_APPLIES - cmp x0, x1 - csel x1, x3, x4, hs - cbz x1, 1f - cmp x0, x2 - csel x1, x3, x4, ls -1: - mov x0, x1 - ret -endfunc cpu_rev_var_range - /* * int check_wa_cve_2017_5715(void); * From 36eeb59f9eb0c2e966bd41b02c0dc588faffce35 Mon Sep 17 00:00:00 2001 From: Boyan Karatotev Date: Wed, 4 Dec 2024 15:25:27 +0000 Subject: [PATCH 06/10] perf(cpus): inline the cpu_get_rev_var call Similar to the cpu_rev_var_xy functions, branching far away so early in the reset sequence incurs significant slowdowns. Inline the function. Change-Id: Ifc349015902cd803e11a1946208141bfe7606b89 Signed-off-by: Boyan Karatotev --- include/lib/cpus/aarch64/cpu_macros.S | 28 ++++++++++++++++++++++++--- lib/cpus/aarch64/cpu_helpers.S | 16 +-------------- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S index 5a8219e27..17592d349 100644 --- a/include/lib/cpus/aarch64/cpu_macros.S +++ b/include/lib/cpus/aarch64/cpu_macros.S @@ -440,6 +440,29 @@ msr \_reg, x0 .endm +/* + * Extract CPU revision and variant, and combine them into a single numeric for + * easier comparison. + * + * _res: + * register where the result will be placed + * _tmp: + * register to clobber for temporaries + */ +.macro get_rev_var _res:req, _tmp:req + mrs \_tmp, midr_el1 + + /* + * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them + * as variant[7:4] and revision[3:0] of x0. + * + * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then + * extract x1[3:0] into x0[3:0] retaining other bits. + */ + ubfx \_res, \_tmp, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS) + bfxil \_res, \_tmp, #MIDR_REV_SHIFT, #MIDR_REV_BITS +.endm + /* * Apply erratum * @@ -560,7 +583,7 @@ /* * provide a shorthand for the name format for annoying errata - * body: clobber x0 to x3 + * body: clobber x0 to x4 */ .macro check_erratum_custom_start _cpu:req, _cve:req, _id:req func check_erratum_\_cpu\()_\_id @@ -588,8 +611,7 @@ .macro cpu_reset_func_start _cpu:req func \_cpu\()_reset_func mov x15, x30 - bl cpu_get_rev_var - mov x14, x0 + get_rev_var x14, x0 /* short circuit the location to avoid searching the list */ adrp x12, \_cpu\()_errata_list_start diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S index 537f71536..ead91c388 100644 --- a/lib/cpus/aarch64/cpu_helpers.S +++ b/lib/cpus/aarch64/cpu_helpers.S @@ -216,23 +216,9 @@ error_exit: ret endfunc get_cpu_ops_ptr -/* - * Extract CPU revision and variant, and combine them into a single numeric for - * easier comparison. - */ .globl cpu_get_rev_var func cpu_get_rev_var - mrs x1, midr_el1 - - /* - * Extract the variant[23:20] and revision[3:0] from MIDR, and pack them - * as variant[7:4] and revision[3:0] of x0. - * - * First extract x1[23:16] to x0[7:0] and zero fill the rest. Then - * extract x1[3:0] into x0[3:0] retaining other bits. - */ - ubfx x0, x1, #(MIDR_VAR_SHIFT - MIDR_REV_BITS), #(MIDR_REV_BITS + MIDR_VAR_BITS) - bfxil x0, x1, #MIDR_REV_SHIFT, #MIDR_REV_BITS + get_rev_var x0, x1 ret endfunc cpu_get_rev_var From 0d020822ae88b8623fa6c9c55973f0045194dcef Mon Sep 17 00:00:00 2001 From: Boyan Karatotev Date: Tue, 19 Nov 2024 11:27:01 +0000 Subject: [PATCH 07/10] perf(cpus): inline the reset function Similar to the cpu_rev_var and cpu_ger_rev_var functions, inline the call_reset_handler handler. This way we skip the costly branch at no extra cost as this is the only place where this is called. While we're at it, drop the options for CPU_NO_RESET_FUNC. The only cpus that need that are virtual cpus which can spare the tiny bit of performance lost. The rest are real cores which can save on the check for zero. Now is a good time to put the assert for a missing cpu in the get_cpu_ops_ptr function so that it's a bit better encapsulated. Change-Id: Ia7c3dcd13b75e5d7c8bafad4698994ea65f42406 Signed-off-by: Boyan Karatotev --- docs/design/firmware-design.rst | 6 +-- include/arch/aarch64/asm_macros.S | 23 +++++++++++ include/arch/aarch64/el2_common_macros.S | 4 +- include/arch/aarch64/el3_common_macros.S | 12 +----- include/lib/cpus/aarch32/cpu_macros.S | 5 +-- include/lib/cpus/aarch64/cpu_macros.S | 3 +- include/lib/cpus/cpu_ops.h | 2 - lib/cpus/aarch32/aem_generic.S | 7 +++- lib/cpus/aarch64/a64fx.S | 5 ++- lib/cpus/aarch64/aem_generic.S | 9 +++-- lib/cpus/aarch64/cpu_helpers.S | 49 ++++-------------------- lib/cpus/aarch64/generic.S | 4 +- lib/cpus/aarch64/qemu_max.S | 7 +++- 13 files changed, 64 insertions(+), 72 deletions(-) diff --git a/docs/design/firmware-design.rst b/docs/design/firmware-design.rst index cda80ca4c..975c1f2fa 100644 --- a/docs/design/firmware-design.rst +++ b/docs/design/firmware-design.rst @@ -247,7 +247,7 @@ BL1 performs minimal architectural initialization as follows. - CPU initialization - BL1 calls the ``reset_handler()`` function which in turn calls the CPU + BL1 calls the ``reset_handler`` macro/function which in turn calls the CPU specific reset handler function (see the section: "CPU specific operations framework"). @@ -1337,7 +1337,7 @@ Guidelines for Reset Handlers TF-A implements a framework that allows CPU and platform ports to perform actions very early after a CPU is released from reset in both the cold and warm -boot paths. This is done by calling the ``reset_handler()`` function in both +boot paths. This is done by calling the ``reset_handler`` macro/function in both the BL1 and BL31 images. It in turn calls the platform and CPU specific reset handling functions. @@ -2904,7 +2904,7 @@ kernel at boot time. These can be found in the ``fdts`` directory. -------------- -*Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.* +*Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.* .. _SMCCC: https://developer.arm.com/docs/den0028/latest .. _PSCI: https://developer.arm.com/documentation/den0022/latest/ diff --git a/include/arch/aarch64/asm_macros.S b/include/arch/aarch64/asm_macros.S index ff0127819..dce07d9c0 100644 --- a/include/arch/aarch64/asm_macros.S +++ b/include/arch/aarch64/asm_macros.S @@ -8,6 +8,7 @@ #include #include +#include #include /* @@ -340,4 +341,26 @@ mrs \reg, ID_AA64ISAR2_EL1 ands \reg, \reg, #(ID_AA64ISAR2_SYSREG128_MASK << ID_AA64ISAR2_SYSREG128_SHIFT) .endm + +.macro call_reset_handler +#if !(defined(IMAGE_BL2) && ENABLE_RME) + /* --------------------------------------------------------------------- + * It is a cold boot. + * Perform any processor specific actions upon reset e.g. cache, TLB + * invalidations etc. + * --------------------------------------------------------------------- + */ + /* The plat_reset_handler can clobber x0 - x18, x30 */ + bl plat_reset_handler + + /* Get the matching cpu_ops pointer */ + bl get_cpu_ops_ptr + + /* Get the cpu_ops reset handler */ + ldr x2, [x0, #CPU_RESET_FUNC] + + /* The cpu_ops reset handler can clobber x0 - x19, x30 */ + blr x2 +#endif +.endm #endif /* ASM_MACROS_S */ diff --git a/include/arch/aarch64/el2_common_macros.S b/include/arch/aarch64/el2_common_macros.S index b9b0e3db6..5db683124 100644 --- a/include/arch/aarch64/el2_common_macros.S +++ b/include/arch/aarch64/el2_common_macros.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2021-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -293,7 +293,7 @@ * invalidations etc. * --------------------------------------------------------------------- */ - bl reset_handler + call_reset_handler el2_arch_init_common diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S index 204625cee..fd16fb5cb 100644 --- a/include/arch/aarch64/el3_common_macros.S +++ b/include/arch/aarch64/el3_common_macros.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -219,15 +219,7 @@ msr vbar_el3, x0 isb -#if !(defined(IMAGE_BL2) && ENABLE_RME) - /* --------------------------------------------------------------------- - * It is a cold boot. - * Perform any processor specific actions upon reset e.g. cache, TLB - * invalidations etc. - * --------------------------------------------------------------------- - */ - bl reset_handler -#endif + call_reset_handler el3_arch_init_common diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S index cfa5831fc..a878a5f60 100644 --- a/include/lib/cpus/aarch32/cpu_macros.S +++ b/include/lib/cpus/aarch32/cpu_macros.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -48,8 +48,7 @@ * _midr: * Numeric value expected to read from CPU's MIDR * _resetfunc: - * Reset function for the CPU. If there's no CPU reset function, - * specify CPU_NO_RESET_FUNC + * Reset function for the CPU * _power_down_ops: * Comma-separated list of functions to perform power-down * operatios on the CPU. At least one, and up to diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S index 17592d349..c8f4bde25 100644 --- a/include/lib/cpus/aarch64/cpu_macros.S +++ b/include/lib/cpus/aarch64/cpu_macros.S @@ -49,8 +49,7 @@ * _midr: * Numeric value expected to read from CPU's MIDR * _resetfunc: - * Reset function for the CPU. If there's no CPU reset function, - * specify CPU_NO_RESET_FUNC + * Reset function for the CPU. * _extra1: * This is a placeholder for future per CPU operations. Currently, * some CPUs use this entry to set a test function to determine if diff --git a/include/lib/cpus/cpu_ops.h b/include/lib/cpus/cpu_ops.h index c1bdf8d01..0b08919f4 100644 --- a/include/lib/cpus/cpu_ops.h +++ b/include/lib/cpus/cpu_ops.h @@ -21,8 +21,6 @@ /* The number of CPU operations allowed */ #define CPU_MAX_PWR_DWN_OPS 2 -/* Special constant to specify that CPU has no reset function */ -#define CPU_NO_RESET_FUNC 0 #if __aarch64__ #define CPU_NO_EXTRA1_FUNC 0 diff --git a/lib/cpus/aarch32/aem_generic.S b/lib/cpus/aarch32/aem_generic.S index f4dc0d172..a42457534 100644 --- a/lib/cpus/aarch32/aem_generic.S +++ b/lib/cpus/aarch32/aem_generic.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -40,8 +40,11 @@ func aem_generic_cluster_pwr_dwn b dcsw_op_all endfunc aem_generic_cluster_pwr_dwn +func aem_generic_reset_func + bx lr +endfunc aem_generic_reset_func /* cpu_ops for Base AEM FVP */ -declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \ +declare_cpu_ops aem_generic, BASE_AEM_MIDR, aem_generic_reset_func, \ aem_generic_core_pwr_dwn, \ aem_generic_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/a64fx.S b/lib/cpus/aarch64/a64fx.S index 4893a44d7..a53467aa1 100644 --- a/lib/cpus/aarch64/a64fx.S +++ b/lib/cpus/aarch64/a64fx.S @@ -29,12 +29,15 @@ endfunc a64fx_cluster_pwr_dwn a64fx_regs: /* The ascii list of register names to be reported */ .asciz "" +cpu_reset_func_start a64fx +cpu_reset_func_end a64fx + func a64fx_cpu_reg_dump adr x6, a64fx_regs ret endfunc a64fx_cpu_reg_dump -declare_cpu_ops a64fx, A64FX_MIDR, CPU_NO_RESET_FUNC \ +declare_cpu_ops a64fx, A64FX_MIDR, a64fx_reset_func \ a64fx_core_pwr_dwn, \ a64fx_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S index d5634cff8..9002da65b 100644 --- a/lib/cpus/aarch64/aem_generic.S +++ b/lib/cpus/aarch64/aem_generic.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -74,6 +74,9 @@ func aem_generic_cluster_pwr_dwn b dcsw_op_all endfunc aem_generic_cluster_pwr_dwn +cpu_reset_func_start aem_generic +cpu_reset_func_end aem_generic + /* --------------------------------------------- * This function provides cpu specific * register information for crash reporting. @@ -94,11 +97,11 @@ endfunc aem_generic_cpu_reg_dump /* cpu_ops for Base AEM FVP */ -declare_cpu_ops aem_generic, BASE_AEM_MIDR, CPU_NO_RESET_FUNC, \ +declare_cpu_ops aem_generic, BASE_AEM_MIDR, aem_generic_reset_func, \ aem_generic_core_pwr_dwn, \ aem_generic_cluster_pwr_dwn /* cpu_ops for Foundation FVP */ -declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, CPU_NO_RESET_FUNC, \ +declare_cpu_ops aem_generic, FOUNDATION_AEM_MIDR, aem_generic_reset_func, \ aem_generic_core_pwr_dwn, \ aem_generic_cluster_pwr_dwn diff --git a/lib/cpus/aarch64/cpu_helpers.S b/lib/cpus/aarch64/cpu_helpers.S index ead91c388..e60842211 100644 --- a/lib/cpus/aarch64/cpu_helpers.S +++ b/lib/cpus/aarch64/cpu_helpers.S @@ -14,47 +14,6 @@ #include #include - /* Reset fn is needed in BL at reset vector */ -#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || \ - (defined(IMAGE_BL2) && RESET_TO_BL2) - /* - * The reset handler common to all platforms. After a matching - * cpu_ops structure entry is found, the correponding reset_handler - * in the cpu_ops is invoked. - * Clobbers: x0 - x19, x30 - */ - .globl reset_handler -func reset_handler - mov x19, x30 - - /* The plat_reset_handler can clobber x0 - x18, x30 */ - bl plat_reset_handler - - /* Get the matching cpu_ops pointer */ - bl get_cpu_ops_ptr - -#if ENABLE_ASSERTIONS - /* - * Assert if invalid cpu_ops obtained. If this is not valid, it may - * suggest that the proper CPU file hasn't been included. - */ - cmp x0, #0 - ASM_ASSERT(ne) -#endif - - /* Get the cpu_ops reset handler */ - ldr x2, [x0, #CPU_RESET_FUNC] - mov x30, x19 - cbz x2, 1f - - /* The cpu_ops reset handler can clobber x0 - x19, x30 */ - br x2 -1: - ret -endfunc reset_handler - -#endif - #ifdef IMAGE_BL31 /* The power down core and cluster is needed only in BL31 */ /* * void prepare_cpu_pwr_dwn(unsigned int power_level) @@ -212,6 +171,14 @@ search_def_ptr: mov x2, #0 b 1b error_exit: +#endif +#if ENABLE_ASSERTIONS + /* + * Assert if invalid cpu_ops obtained. If this is not valid, it may + * suggest that the proper CPU file hasn't been included. + */ + cmp x0, #0 + ASM_ASSERT(ne) #endif ret endfunc get_cpu_ops_ptr diff --git a/lib/cpus/aarch64/generic.S b/lib/cpus/aarch64/generic.S index 5d7a857e0..849056f49 100644 --- a/lib/cpus/aarch64/generic.S +++ b/lib/cpus/aarch64/generic.S @@ -80,7 +80,9 @@ endfunc generic_cluster_pwr_dwn * --------------------------------------------- */ .equ generic_cpu_reg_dump, 0 -.equ generic_reset_func, 0 + +cpu_reset_func_start generic +cpu_reset_func_end generic declare_cpu_ops generic, AARCH64_GENERIC_MIDR, \ generic_reset_func, \ diff --git a/lib/cpus/aarch64/qemu_max.S b/lib/cpus/aarch64/qemu_max.S index fb03cf15b..529bb4f2a 100644 --- a/lib/cpus/aarch64/qemu_max.S +++ b/lib/cpus/aarch64/qemu_max.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -47,6 +47,9 @@ func qemu_max_cluster_pwr_dwn b dcsw_op_all endfunc qemu_max_cluster_pwr_dwn +cpu_reset_func_start qemu_max +cpu_reset_func_end qemu_max + /* --------------------------------------------- * This function provides cpu specific * register information for crash reporting. @@ -67,6 +70,6 @@ endfunc qemu_max_cpu_reg_dump /* cpu_ops for QEMU MAX */ -declare_cpu_ops qemu_max, QEMU_MAX_MIDR, CPU_NO_RESET_FUNC, \ +declare_cpu_ops qemu_max, QEMU_MAX_MIDR, qemu_max_reset_func, \ qemu_max_core_pwr_dwn, \ qemu_max_cluster_pwr_dwn From b07c317f678aa9812427c9ae823cec22cecc6814 Mon Sep 17 00:00:00 2001 From: Boyan Karatotev Date: Tue, 19 Nov 2024 11:27:01 +0000 Subject: [PATCH 08/10] perf(cpus): inline the init_cpu_data_ptr function Similar to the reset function inline, inline this too to not do a costly branch with no extra cost. Change-Id: I54cc399e570e9d0f373ae13c7224d32dbdfae1e5 Signed-off-by: Boyan Karatotev --- include/arch/aarch64/el3_common_macros.S | 4 +++- include/lib/el3_runtime/cpu_data.h | 3 +-- lib/el3_runtime/aarch64/cpu_data.S | 22 +--------------------- 3 files changed, 5 insertions(+), 24 deletions(-) diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S index fd16fb5cb..4864596ac 100644 --- a/include/arch/aarch64/el3_common_macros.S +++ b/include/arch/aarch64/el3_common_macros.S @@ -49,7 +49,9 @@ * due to a NULL TPIDR_EL3. * --------------------------------------------------------------------- */ - bl init_cpu_data_ptr + bl plat_my_core_pos + bl _cpu_data_by_index + msr tpidr_el3, x0 #endif /* IMAGE_BL31 */ /* --------------------------------------------------------------------- diff --git a/include/lib/el3_runtime/cpu_data.h b/include/lib/el3_runtime/cpu_data.h index 8b5480674..e417f4506 100644 --- a/include/lib/el3_runtime/cpu_data.h +++ b/include/lib/el3_runtime/cpu_data.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -230,7 +230,6 @@ static inline context_pas_t get_cpu_context_index(uint32_t security_state) * APIs for initialising and accessing per-cpu data *************************************************************************/ -void init_cpu_data_ptr(void); void init_cpu_ops(void); #define get_cpu_data(_m) _cpu_data()->_m diff --git a/lib/el3_runtime/aarch64/cpu_data.S b/lib/el3_runtime/aarch64/cpu_data.S index 313f8822e..02d94159d 100644 --- a/lib/el3_runtime/aarch64/cpu_data.S +++ b/lib/el3_runtime/aarch64/cpu_data.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2020, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -7,28 +7,8 @@ #include #include -.globl init_cpu_data_ptr .globl _cpu_data_by_index -/* ----------------------------------------------------------------- - * void init_cpu_data_ptr(void) - * - * Initialise the TPIDR_EL3 register to refer to the cpu_data_t - * for the calling CPU. This must be called before cm_get_cpu_data() - * - * This can be called without a valid stack. It assumes that - * plat_my_core_pos() does not clobber register x10. - * clobbers: x0, x1, x10 - * ----------------------------------------------------------------- - */ -func init_cpu_data_ptr - mov x10, x30 - bl plat_my_core_pos - bl _cpu_data_by_index - msr tpidr_el3, x0 - ret x10 -endfunc init_cpu_data_ptr - /* ----------------------------------------------------------------- * cpu_data_t *_cpu_data_by_index(uint32_t cpu_index) * From 89dba82dfa85fea03e7b2f6ad6a90fcd0aecce55 Mon Sep 17 00:00:00 2001 From: Boyan Karatotev Date: Wed, 22 Jan 2025 13:54:43 +0000 Subject: [PATCH 09/10] perf(cpus): make reset errata do fewer branches Errata application is painful for performance. For a start, it's done when the core has just come out of reset, which means branch predictors and caches will be empty so a branch to a workaround function must be fetched from memory and that round trip is very slow. Then it also runs with the I-cache off, which means that the loop to iterate over the workarounds must also be fetched from memory on each iteration. We can remove both branches. First, we can simply apply every erratum directly instead of defining a workaround function and jumping to it. Currently, no errata that need to be applied at both reset and runtime, with the same workaround function, exist. If the need arose in future, this should be achievable with a reset + runtime wrapper combo. Then, we can construct a function that applies each erratum linearly instead of looping over the list. If this function is part of the reset function, then the only "far" branches at reset will be for the checker functions. Importantly, this mitigates the slowdown even when an erratum is disabled. The result is ~50% speedup on N1SDP and ~20% on AArch64 Juno on wakeup from PSCI calls that end in powerdown. This is roughly back to the baseline of v2.9, before the errata framework regressed on performance (or a little better). It is important to note that there are other slowdowns since then that remain unknown. Change-Id: Ie4d5288a331b11fd648e5c4a0b652b74160b07b9 Signed-off-by: Boyan Karatotev --- docs/design/firmware-design.rst | 4 +- include/lib/cpus/aarch32/cpu_macros.S | 9 +- include/lib/cpus/aarch64/cpu_macros.S | 123 +++++++++++--------------- include/lib/cpus/errata.h | 14 ++- lib/cpus/aarch64/aem_generic.S | 2 + lib/cpus/aarch64/cortex_a35.S | 3 +- lib/cpus/aarch64/cortex_a510.S | 2 + lib/cpus/aarch64/cortex_a520.S | 2 + lib/cpus/aarch64/cortex_a53.S | 16 ++-- lib/cpus/aarch64/cortex_a55.S | 4 +- lib/cpus/aarch64/cortex_a57.S | 6 +- lib/cpus/aarch64/cortex_a65.S | 2 + lib/cpus/aarch64/cortex_a65ae.S | 2 + lib/cpus/aarch64/cortex_a710.S | 4 +- lib/cpus/aarch64/cortex_a715.S | 4 +- lib/cpus/aarch64/cortex_a72.S | 6 +- lib/cpus/aarch64/cortex_a720.S | 4 +- lib/cpus/aarch64/cortex_a720_ae.S | 4 +- lib/cpus/aarch64/cortex_a725.S | 4 +- lib/cpus/aarch64/cortex_a73.S | 4 +- lib/cpus/aarch64/cortex_a75.S | 2 + lib/cpus/aarch64/cortex_a76.S | 4 +- lib/cpus/aarch64/cortex_a76ae.S | 4 +- lib/cpus/aarch64/cortex_a77.S | 2 + lib/cpus/aarch64/cortex_a78.S | 4 +- lib/cpus/aarch64/cortex_a78_ae.S | 4 +- lib/cpus/aarch64/cortex_a78c.S | 4 +- lib/cpus/aarch64/cortex_alto.S | 4 +- lib/cpus/aarch64/cortex_arcadia.S | 4 +- lib/cpus/aarch64/cortex_gelas.S | 2 + lib/cpus/aarch64/cortex_x1.S | 2 + lib/cpus/aarch64/cortex_x2.S | 4 +- lib/cpus/aarch64/cortex_x3.S | 4 +- lib/cpus/aarch64/cortex_x4.S | 4 +- lib/cpus/aarch64/cortex_x925.S | 4 +- lib/cpus/aarch64/denver.S | 4 +- lib/cpus/aarch64/generic.S | 4 +- lib/cpus/aarch64/neoverse_e1.S | 2 + lib/cpus/aarch64/neoverse_n1.S | 2 + lib/cpus/aarch64/neoverse_n2.S | 4 +- lib/cpus/aarch64/neoverse_n3.S | 4 +- lib/cpus/aarch64/neoverse_v1.S | 2 + lib/cpus/aarch64/neoverse_v2.S | 2 + lib/cpus/aarch64/neoverse_v3.S | 4 +- lib/cpus/aarch64/nevis.S | 4 +- lib/cpus/aarch64/qemu_max.S | 2 + lib/cpus/aarch64/rainier.S | 4 +- lib/cpus/aarch64/travis.S | 2 + 48 files changed, 185 insertions(+), 126 deletions(-) diff --git a/docs/design/firmware-design.rst b/docs/design/firmware-design.rst index 975c1f2fa..cf8cbc760 100644 --- a/docs/design/firmware-design.rst +++ b/docs/design/firmware-design.rst @@ -1481,7 +1481,9 @@ the returned ``cpu_ops`` is then invoked which executes the required reset handling for that CPU and also any errata workarounds enabled by the platform. It should be defined using the ``cpu_reset_func_{start,end}`` macros and its -body may only clobber x0 to x14 with x14 being the cpu_rev parameter. +body may only clobber x0 to x14 with x14 being the cpu_rev parameter. The cpu +file should also include a call to ``cpu_reset_prologue`` at the start of the +file for errata to work correctly. CPU specific power down sequence ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/include/lib/cpus/aarch32/cpu_macros.S b/include/lib/cpus/aarch32/cpu_macros.S index a878a5f60..31f8811a5 100644 --- a/include/lib/cpus/aarch32/cpu_macros.S +++ b/include/lib/cpus/aarch32/cpu_macros.S @@ -172,11 +172,6 @@ \_cpu\()_errata_list_start: .endif - /* unused on AArch32, maintain for portability */ - .word 0 - /* TODO(errata ABI): this prevents all checker functions from - * being optimised away. Can be done away with unless the ABI - * needs them */ .ifnb \_special .word check_errata_\_special .elseif \_cve @@ -188,9 +183,7 @@ .word \_id .hword \_cve .byte \_chosen - /* TODO(errata ABI): mitigated field for known but unmitigated - * errata*/ - .byte 0x1 + .byte 0x0 /* alignment */ .popsection .endm diff --git a/include/lib/cpus/aarch64/cpu_macros.S b/include/lib/cpus/aarch64/cpu_macros.S index c8f4bde25..f3df59568 100644 --- a/include/lib/cpus/aarch64/cpu_macros.S +++ b/include/lib/cpus/aarch64/cpu_macros.S @@ -238,50 +238,22 @@ * _apply_at_reset: * Whether the erratum should be automatically applied at reset */ -.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req +.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req +#if REPORT_ERRATA || ERRATA_ABI_SUPPORT .pushsection .rodata.errata_entries .align 3 .ifndef \_cpu\()_errata_list_start \_cpu\()_errata_list_start: .endif - /* check if unused and compile out if no references */ - .if \_apply_at_reset && \_chosen - .quad erratum_\_cpu\()_\_id\()_wa - .else - .quad 0 - .endif - /* TODO(errata ABI): this prevents all checker functions from - * being optimised away. Can be done away with unless the ABI - * needs them */ .quad check_erratum_\_cpu\()_\_id /* Will fit CVEs with up to 10 character in the ID field */ .word \_id .hword \_cve .byte \_chosen - /* TODO(errata ABI): mitigated field for known but unmitigated - * errata */ - .byte 0x1 + .byte 0x0 /* alignment */ .popsection -.endm - -.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req - add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset - - func erratum_\_cpu\()_\_id\()_wa - mov x8, x30 - - /* save rev_var for workarounds that might need it but don't - * restore to x0 because few will care */ - mov x7, x0 - bl check_erratum_\_cpu\()_\_id - cbz x0, erratum_\_cpu\()_\_id\()_skip -.endm - -.macro _workaround_end _cpu:req, _id:req - erratum_\_cpu\()_\_id\()_skip: - ret x8 - endfunc erratum_\_cpu\()_\_id\()_wa +#endif .endm /******************************************************************************* @@ -311,7 +283,22 @@ * _wa clobbers: x0-x8 (PCS compliant) */ .macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req - _workaround_start \_cpu, \_cve, \_id, \_chosen, 1 + add_erratum_entry \_cpu, \_cve, \_id, \_chosen + + .if \_chosen + /* put errata directly into the reset function */ + .pushsection .text.asm.\_cpu\()_reset_func, "ax" + .else + /* or something else that will get garbage collected by the + * linker */ + .pushsection .text.asm.erratum_\_cpu\()_\_id\()_wa, "ax" + .endif + /* revision is stored in x14, get it */ + mov x0, x14 + bl check_erratum_\_cpu\()_\_id + /* save rev_var for workarounds that might need it */ + mov x7, x14 + cbz x0, erratum_\_cpu\()_\_id\()_skip_reset .endm /* @@ -322,6 +309,10 @@ * for errata applied in generic code */ .macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr + add_erratum_entry \_cpu, \_cve, \_id, \_chosen + + func erratum_\_cpu\()_\_id\()_wa + mov x8, x30 /* * Let errata specify if they need MIDR checking. Sadly, storing the * MIDR in an .equ to retrieve automatically blows up as it stores some @@ -329,11 +320,15 @@ */ .ifnb \_midr jump_if_cpu_midr \_midr, 1f - b erratum_\_cpu\()_\_id\()_skip + b erratum_\_cpu\()_\_id\()_skip_runtime 1: .endif - _workaround_start \_cpu, \_cve, \_id, \_chosen, 0 + /* save rev_var for workarounds that might need it but don't + * restore to x0 because few will care */ + mov x7, x0 + bl check_erratum_\_cpu\()_\_id + cbz x0, erratum_\_cpu\()_\_id\()_skip_runtime .endm /* @@ -341,7 +336,8 @@ * is kept here so the same #define can be used as that macro */ .macro workaround_reset_end _cpu:req, _cve:req, _id:req - _workaround_end \_cpu, \_id + erratum_\_cpu\()_\_id\()_skip_reset: + .popsection .endm /* @@ -361,7 +357,9 @@ .ifb \_no_isb isb .endif - _workaround_end \_cpu, \_id + erratum_\_cpu\()_\_id\()_skip_runtime: + ret x8 + endfunc erratum_\_cpu\()_\_id\()_wa .endm /******************************************************************************* @@ -598,7 +596,21 @@ ******************************************************************************/ /* - * Wrapper to automatically apply all reset-time errata. Will end with an isb. + * Helper to register a cpu with the errata framework. Begins the definition of + * the reset function. + * + * _cpu: + * Name of cpu as given to declare_cpu_ops + */ +.macro cpu_reset_prologue _cpu:req + func \_cpu\()_reset_func + mov x15, x30 + get_rev_var x14, x0 +.endm + +/* + * Wrapper of the reset function to automatically apply all reset-time errata. + * Will end with an isb. * * _cpu: * Name of cpu as given to declare_cpu_ops @@ -608,38 +620,9 @@ * argument x14 - cpu_rev_var */ .macro cpu_reset_func_start _cpu:req - func \_cpu\()_reset_func - mov x15, x30 - get_rev_var x14, x0 - - /* short circuit the location to avoid searching the list */ - adrp x12, \_cpu\()_errata_list_start - add x12, x12, :lo12:\_cpu\()_errata_list_start - adrp x13, \_cpu\()_errata_list_end - add x13, x13, :lo12:\_cpu\()_errata_list_end - - errata_begin: - /* if head catches up with end of list, exit */ - cmp x12, x13 - b.eq errata_end - - ldr x10, [x12, #ERRATUM_WA_FUNC] - /* TODO(errata ABI): check mitigated and checker function fields - * for 0 */ - ldrb w11, [x12, #ERRATUM_CHOSEN] - - /* skip if not chosen */ - cbz x11, 1f - /* skip if runtime erratum */ - cbz x10, 1f - - /* put cpu revision in x0 and call workaround */ - mov x0, x14 - blr x10 - 1: - add x12, x12, #ERRATUM_ENTRY_SIZE - b errata_begin - errata_end: + /* the func/endfunc macros will change sections. So change the section + * back to the reset function's */ + .section .text.asm.\_cpu\()_reset_func, "ax" .endm .macro cpu_reset_func_end _cpu:req diff --git a/include/lib/cpus/errata.h b/include/lib/cpus/errata.h index b9166f713..10b949f6c 100644 --- a/include/lib/cpus/errata.h +++ b/include/lib/cpus/errata.h @@ -9,20 +9,18 @@ #include -#define ERRATUM_WA_FUNC_SIZE CPU_WORD_SIZE #define ERRATUM_CHECK_FUNC_SIZE CPU_WORD_SIZE #define ERRATUM_ID_SIZE 4 #define ERRATUM_CVE_SIZE 2 #define ERRATUM_CHOSEN_SIZE 1 -#define ERRATUM_MITIGATED_SIZE 1 +#define ERRATUM_ALIGNMENT_SIZE 1 -#define ERRATUM_WA_FUNC 0 -#define ERRATUM_CHECK_FUNC ERRATUM_WA_FUNC + ERRATUM_WA_FUNC_SIZE +#define ERRATUM_CHECK_FUNC 0 #define ERRATUM_ID ERRATUM_CHECK_FUNC + ERRATUM_CHECK_FUNC_SIZE #define ERRATUM_CVE ERRATUM_ID + ERRATUM_ID_SIZE #define ERRATUM_CHOSEN ERRATUM_CVE + ERRATUM_CVE_SIZE -#define ERRATUM_MITIGATED ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE -#define ERRATUM_ENTRY_SIZE ERRATUM_MITIGATED + ERRATUM_MITIGATED_SIZE +#define ERRATUM_ALIGNMENT ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE +#define ERRATUM_ENTRY_SIZE ERRATUM_ALIGNMENT + ERRATUM_ALIGNMENT_SIZE /* Errata status */ #define ERRATA_NOT_APPLIES 0 @@ -39,15 +37,13 @@ void print_errata_status(void); * uintptr_t will reflect the change and the alignment will be correct in both. */ struct erratum_entry { - uintptr_t (*wa_func)(uint64_t cpu_rev); uintptr_t (*check_func)(uint64_t cpu_rev); /* Will fit CVEs with up to 10 character in the ID field */ uint32_t id; /* Denote CVEs with their year or errata with 0 */ uint16_t cve; uint8_t chosen; - /* TODO(errata ABI): placeholder for the mitigated field */ - uint8_t _mitigated; + uint8_t _alignment; } __packed; CASSERT(sizeof(struct erratum_entry) == ERRATUM_ENTRY_SIZE, diff --git a/lib/cpus/aarch64/aem_generic.S b/lib/cpus/aarch64/aem_generic.S index 9002da65b..9843943f5 100644 --- a/lib/cpus/aarch64/aem_generic.S +++ b/lib/cpus/aarch64/aem_generic.S @@ -8,6 +8,8 @@ #include #include +cpu_reset_prologue aem_generic + func aem_generic_core_pwr_dwn /* --------------------------------------------- * Disable the Data Cache. diff --git a/lib/cpus/aarch64/cortex_a35.S b/lib/cpus/aarch64/cortex_a35.S index c3d8c8dd5..40e620036 100644 --- a/lib/cpus/aarch64/cortex_a35.S +++ b/lib/cpus/aarch64/cortex_a35.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -11,6 +11,7 @@ #include #include +cpu_reset_prologue cortex_a35 /* --------------------------------------------- * Disable L1 data cache and unified L2 cache * --------------------------------------------- diff --git a/lib/cpus/aarch64/cortex_a510.S b/lib/cpus/aarch64/cortex_a510.S index b399bbc5f..cbeeb2b7e 100644 --- a/lib/cpus/aarch64/cortex_a510.S +++ b/lib/cpus/aarch64/cortex_a510.S @@ -22,6 +22,8 @@ #error "Cortex-A510 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a510 + workaround_reset_start cortex_a510, ERRATUM(1922240), ERRATA_A510_1922240 /* Apply the workaround by setting IMP_CMPXACTLR_EL1[11:10] = 0b11. */ sysreg_bitfield_insert CORTEX_A510_CMPXACTLR_EL1, CORTEX_A510_CMPXACTLR_EL1_SNPPREFERUNIQUE_DISABLE, \ diff --git a/lib/cpus/aarch64/cortex_a520.S b/lib/cpus/aarch64/cortex_a520.S index d9e654b0f..7d63dbbbf 100644 --- a/lib/cpus/aarch64/cortex_a520.S +++ b/lib/cpus/aarch64/cortex_a520.S @@ -24,6 +24,8 @@ #error "Cortex A520 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a520 + workaround_reset_start cortex_a520, ERRATUM(2630792), ERRATA_A520_2630792 sysreg_bit_set CORTEX_A520_CPUACTLR_EL1, BIT(38) workaround_reset_end cortex_a520, ERRATUM(2630792) diff --git a/lib/cpus/aarch64/cortex_a53.S b/lib/cpus/aarch64/cortex_a53.S index 4a5b31814..dbfff8795 100644 --- a/lib/cpus/aarch64/cortex_a53.S +++ b/lib/cpus/aarch64/cortex_a53.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -12,6 +12,8 @@ #include #include +cpu_reset_prologue cortex_a53 + /* --------------------------------------------- * Disable L1 data cache and unified L2 cache * --------------------------------------------- @@ -36,12 +38,12 @@ endfunc cortex_a53_disable_smp /* Due to the nature of the errata it is applied unconditionally when chosen */ check_erratum_ls cortex_a53, ERRATUM(819472), CPU_REV(0, 1) /* erratum workaround is interleaved with generic code */ -add_erratum_entry cortex_a53, ERRATUM(819472), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET +add_erratum_entry cortex_a53, ERRATUM(819472), ERRATUM_ALWAYS_CHOSEN /* Due to the nature of the errata it is applied unconditionally when chosen */ check_erratum_ls cortex_a53, ERRATUM(824069), CPU_REV(0, 2) /* erratum workaround is interleaved with generic code */ -add_erratum_entry cortex_a53, ERRATUM(824069), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET +add_erratum_entry cortex_a53, ERRATUM(824069), ERRATUM_ALWAYS_CHOSEN workaround_reset_start cortex_a53, ERRATUM(826319), ERRATA_A53_826319 mrs x1, CORTEX_A53_L2ACTLR_EL1 @@ -55,7 +57,7 @@ check_erratum_ls cortex_a53, ERRATUM(826319), CPU_REV(0, 2) /* Due to the nature of the errata it is applied unconditionally when chosen */ check_erratum_ls cortex_a53, ERRATUM(827319), CPU_REV(0, 2) /* erratum workaround is interleaved with generic code */ -add_erratum_entry cortex_a53, ERRATUM(827319), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET +add_erratum_entry cortex_a53, ERRATUM(827319), ERRATUM_ALWAYS_CHOSEN check_erratum_custom_start cortex_a53, ERRATUM(835769) cmp x0, CPU_REV(0, 4) @@ -78,7 +80,7 @@ exit_check_errata_835769: check_erratum_custom_end cortex_a53, ERRATUM(835769) /* workaround at build time */ -add_erratum_entry cortex_a53, ERRATUM(835769), ERRATA_A53_835769, NO_APPLY_AT_RESET +add_erratum_entry cortex_a53, ERRATUM(835769), ERRATA_A53_835769 /* * Disable the cache non-temporal hint. @@ -114,7 +116,7 @@ exit_check_errata_843419: check_erratum_custom_end cortex_a53, ERRATUM(843419) /* workaround at build time */ -add_erratum_entry cortex_a53, ERRATUM(843419), ERRATA_A53_843419, NO_APPLY_AT_RESET +add_erratum_entry cortex_a53, ERRATUM(843419), ERRATA_A53_843419 /* * Earlier revisions of the core are affected as well, but don't @@ -131,7 +133,7 @@ check_erratum_hs cortex_a53, ERRATUM(855873), CPU_REV(0, 3) check_erratum_chosen cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924 /* erratum has no workaround in the cpu. Generic code must take care */ -add_erratum_entry cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924, NO_APPLY_AT_RESET +add_erratum_entry cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924 cpu_reset_func_start cortex_a53 /* Enable the SMP bit. */ diff --git a/lib/cpus/aarch64/cortex_a55.S b/lib/cpus/aarch64/cortex_a55.S index 2267d667b..cf91431a7 100644 --- a/lib/cpus/aarch64/cortex_a55.S +++ b/lib/cpus/aarch64/cortex_a55.S @@ -20,6 +20,8 @@ .globl cortex_a55_reset_func .globl cortex_a55_core_pwr_dwn +cpu_reset_prologue cortex_a55 + workaround_reset_start cortex_a55, ERRATUM(798953), ERRATA_DSU_798953 errata_dsu_798953_wa_impl workaround_reset_end cortex_a55, ERRATUM(798953) @@ -111,7 +113,7 @@ check_erratum_ls cortex_a55, ERRATUM(1221012), CPU_REV(1, 0) check_erratum_chosen cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923 /* erratum has no workaround in the cpu. Generic code must take care */ -add_erratum_entry cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923, NO_APPLY_AT_RESET +add_erratum_entry cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923 cpu_reset_func_start cortex_a55 cpu_reset_func_end cortex_a55 diff --git a/lib/cpus/aarch64/cortex_a57.S b/lib/cpus/aarch64/cortex_a57.S index aac9c513c..adacc5cb2 100644 --- a/lib/cpus/aarch64/cortex_a57.S +++ b/lib/cpus/aarch64/cortex_a57.S @@ -13,6 +13,8 @@ #include #include +cpu_reset_prologue cortex_a57 + /* --------------------------------------------- * Disable L1 data cache and unified L2 cache * --------------------------------------------- @@ -81,7 +83,7 @@ check_erratum_ls cortex_a57, ERRATUM(806969), CPU_REV(0, 0) /* erratum always worked around, but report it correctly */ check_erratum_ls cortex_a57, ERRATUM(813419), CPU_REV(0, 0) -add_erratum_entry cortex_a57, ERRATUM(813419), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET +add_erratum_entry cortex_a57, ERRATUM(813419), ERRATUM_ALWAYS_CHOSEN workaround_reset_start cortex_a57, ERRATUM(813420), ERRATA_A57_813420 sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_DCC_AS_DCCI @@ -150,7 +152,7 @@ check_erratum_ls cortex_a57, ERRATUM(859972), CPU_REV(1, 3) check_erratum_chosen cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537 /* erratum has no workaround in the cpu. Generic code must take care */ -add_erratum_entry cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537, NO_APPLY_AT_RESET +add_erratum_entry cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537 workaround_reset_start cortex_a57, CVE(2017, 5715), WORKAROUND_CVE_2017_5715 #if IMAGE_BL31 diff --git a/lib/cpus/aarch64/cortex_a65.S b/lib/cpus/aarch64/cortex_a65.S index 064e6f0ec..3c32adb4e 100644 --- a/lib/cpus/aarch64/cortex_a65.S +++ b/lib/cpus/aarch64/cortex_a65.S @@ -23,6 +23,8 @@ #error "Cortex-A65 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a65 + workaround_reset_start cortex_a65, ERRATUM(936184), ERRATA_DSU_936184 errata_dsu_936184_wa_impl workaround_reset_end cortex_a65, ERRATUM(936184) diff --git a/lib/cpus/aarch64/cortex_a65ae.S b/lib/cpus/aarch64/cortex_a65ae.S index d2f9e49f1..f1a63b06c 100644 --- a/lib/cpus/aarch64/cortex_a65ae.S +++ b/lib/cpus/aarch64/cortex_a65ae.S @@ -23,6 +23,8 @@ #error "Cortex-A65AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a65ae + workaround_reset_start cortex_a65ae, ERRATUM(936184), ERRATA_DSU_936184 errata_dsu_936184_wa_impl workaround_reset_end cortex_a65ae, ERRATUM(936184) diff --git a/lib/cpus/aarch64/cortex_a710.S b/lib/cpus/aarch64/cortex_a710.S index 17163a150..cb24aa116 100644 --- a/lib/cpus/aarch64/cortex_a710.S +++ b/lib/cpus/aarch64/cortex_a710.S @@ -29,6 +29,8 @@ wa_cve_2022_23960_bhb_vector_table CORTEX_A710_BHB_LOOP_COUNT, cortex_a710 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_a710 + /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_a710, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set CORTEX_A710_CPUECTLR_EL1, BIT(46) @@ -223,7 +225,7 @@ workaround_reset_end cortex_a710, CVE(2022, 23960) check_erratum_chosen cortex_a710, CVE(2022, 23960), WORKAROUND_CVE_2022_23960 -add_erratum_entry cortex_a710, ERRATUM(3701772), ERRATA_A710_3701772, NO_APPLY_AT_RESET +add_erratum_entry cortex_a710, ERRATUM(3701772), ERRATA_A710_3701772 check_erratum_ls cortex_a710, ERRATUM(3701772), CPU_REV(2, 1) diff --git a/lib/cpus/aarch64/cortex_a715.S b/lib/cpus/aarch64/cortex_a715.S index fbc73eda3..e50764d02 100644 --- a/lib/cpus/aarch64/cortex_a715.S +++ b/lib/cpus/aarch64/cortex_a715.S @@ -28,6 +28,8 @@ wa_cve_2022_23960_bhb_vector_table CORTEX_A715_BHB_LOOP_COUNT, cortex_a715 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_a715 + workaround_reset_start cortex_a715, ERRATUM(2331818), ERRATA_A715_2331818 sysreg_bit_set CORTEX_A715_CPUACTLR2_EL1, BIT(20) workaround_reset_end cortex_a715, ERRATUM(2331818) @@ -129,7 +131,7 @@ workaround_reset_end cortex_a715, CVE(2022, 23960) check_erratum_chosen cortex_a715, CVE(2022, 23960), WORKAROUND_CVE_2022_23960 -add_erratum_entry cortex_a715, ERRATUM(3699560), ERRATA_A715_3699560, NO_APPLY_AT_RESET +add_erratum_entry cortex_a715, ERRATUM(3699560), ERRATA_A715_3699560 check_erratum_ls cortex_a715, ERRATUM(3699560), CPU_REV(1, 3) diff --git a/lib/cpus/aarch64/cortex_a72.S b/lib/cpus/aarch64/cortex_a72.S index c300ea7cf..fee28ee79 100644 --- a/lib/cpus/aarch64/cortex_a72.S +++ b/lib/cpus/aarch64/cortex_a72.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -15,6 +15,8 @@ wa_cve_2022_23960_bhb_vector_table CORTEX_A72_BHB_LOOP_COUNT, cortex_a72 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_a72 + /* --------------------------------------------- * Disable L1 data cache and unified L2 cache * --------------------------------------------- @@ -92,7 +94,7 @@ check_erratum_ls cortex_a72, ERRATUM(859971), CPU_REV(0, 3) /* Due to the nature of the errata it is applied unconditionally when chosen */ check_erratum_chosen cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367 /* erratum workaround is interleaved with generic code */ -add_erratum_entry cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367, NO_APPLY_AT_RESET +add_erratum_entry cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367 workaround_reset_start cortex_a72, CVE(2017, 5715), WORKAROUND_CVE_2017_5715 #if IMAGE_BL31 diff --git a/lib/cpus/aarch64/cortex_a720.S b/lib/cpus/aarch64/cortex_a720.S index ab2c12f3f..2991f93b9 100644 --- a/lib/cpus/aarch64/cortex_a720.S +++ b/lib/cpus/aarch64/cortex_a720.S @@ -22,6 +22,8 @@ #error "Cortex A720 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a720 + .global check_erratum_cortex_a720_3699561 #if WORKAROUND_CVE_2022_23960 @@ -74,7 +76,7 @@ workaround_reset_end cortex_a720, CVE(2022, 23960) check_erratum_chosen cortex_a720, CVE(2022, 23960), WORKAROUND_CVE_2022_23960 -add_erratum_entry cortex_a720, ERRATUM(3699561), ERRATA_A720_3699561, NO_APPLY_AT_RESET +add_erratum_entry cortex_a720, ERRATUM(3699561), ERRATA_A720_3699561 check_erratum_ls cortex_a720, ERRATUM(3699561), CPU_REV(0, 2) diff --git a/lib/cpus/aarch64/cortex_a720_ae.S b/lib/cpus/aarch64/cortex_a720_ae.S index 57a5030d8..c72a29eb8 100644 --- a/lib/cpus/aarch64/cortex_a720_ae.S +++ b/lib/cpus/aarch64/cortex_a720_ae.S @@ -21,9 +21,11 @@ #error "Cortex-A720AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a720_ae + .global check_erratum_cortex_a720_ae_3699562 -add_erratum_entry cortex_a720_ae, ERRATUM(3699562), ERRATA_A720_AE_3699562, NO_APPLY_AT_RESET +add_erratum_entry cortex_a720_ae, ERRATUM(3699562), ERRATA_A720_AE_3699562 check_erratum_ls cortex_a720_ae, ERRATUM(3699562), CPU_REV(0, 0) diff --git a/lib/cpus/aarch64/cortex_a725.S b/lib/cpus/aarch64/cortex_a725.S index c4d603441..a8c0db246 100644 --- a/lib/cpus/aarch64/cortex_a725.S +++ b/lib/cpus/aarch64/cortex_a725.S @@ -21,9 +21,11 @@ #error "Cortex-A725 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a725 + .global check_erratum_cortex_a725_3699564 -add_erratum_entry cortex_a725, ERRATUM(3699564), ERRATA_A725_3699564, NO_APPLY_AT_RESET +add_erratum_entry cortex_a725, ERRATUM(3699564), ERRATA_A725_3699564 check_erratum_ls cortex_a725, ERRATUM(3699564), CPU_REV(0, 1) diff --git a/lib/cpus/aarch64/cortex_a73.S b/lib/cpus/aarch64/cortex_a73.S index 2130ceb1f..d1fc6d405 100644 --- a/lib/cpus/aarch64/cortex_a73.S +++ b/lib/cpus/aarch64/cortex_a73.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -10,6 +10,8 @@ #include #include +cpu_reset_prologue cortex_a73 + /* --------------------------------------------- * Disable L1 data cache * --------------------------------------------- diff --git a/lib/cpus/aarch64/cortex_a75.S b/lib/cpus/aarch64/cortex_a75.S index 336e00e5c..13599ca96 100644 --- a/lib/cpus/aarch64/cortex_a75.S +++ b/lib/cpus/aarch64/cortex_a75.S @@ -18,6 +18,8 @@ #error "Cortex-A75 must be compiled with HW_ASSISTED_COHERENCY enabled" #endif +cpu_reset_prologue cortex_a75 + workaround_reset_start cortex_a75, ERRATUM(764081), ERRATA_A75_764081 sysreg_bit_set sctlr_el3, SCTLR_IESB_BIT workaround_reset_end cortex_a75, ERRATUM(764081) diff --git a/lib/cpus/aarch64/cortex_a76.S b/lib/cpus/aarch64/cortex_a76.S index 589edceda..822ef05a0 100644 --- a/lib/cpus/aarch64/cortex_a76.S +++ b/lib/cpus/aarch64/cortex_a76.S @@ -30,6 +30,8 @@ #define ESR_EL3_A64_SMC0 0x5e000000 #define ESR_EL3_A32_SMC0 0x4e000000 +cpu_reset_prologue cortex_a76 + #if DYNAMIC_WORKAROUND_CVE_2018_3639 /* * This macro applies the mitigation for CVE-2018-3639. @@ -428,7 +430,7 @@ check_erratum_custom_end cortex_a76, ERRATUM(1165522) check_erratum_chosen cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960 /* erratum has no workaround in the cpu. Generic code must take care */ -add_erratum_entry cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960, NO_APPLY_AT_RESET +add_erratum_entry cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960 workaround_reset_start cortex_a76, ERRATUM(798953), ERRATA_DSU_798953 errata_dsu_798953_wa_impl diff --git a/lib/cpus/aarch64/cortex_a76ae.S b/lib/cpus/aarch64/cortex_a76ae.S index 2fe3dbcf7..54af9a09e 100644 --- a/lib/cpus/aarch64/cortex_a76ae.S +++ b/lib/cpus/aarch64/cortex_a76ae.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, Arm Limited. All rights reserved. + * Copyright (c) 2019-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -21,6 +21,8 @@ #error "Cortex-A76AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_a76ae + #if WORKAROUND_CVE_2022_23960 wa_cve_2022_23960_bhb_vector_table CORTEX_A76AE_BHB_LOOP_COUNT, cortex_a76ae #endif /* WORKAROUND_CVE_2022_23960 */ diff --git a/lib/cpus/aarch64/cortex_a77.S b/lib/cpus/aarch64/cortex_a77.S index 1759b7760..7fb964d17 100644 --- a/lib/cpus/aarch64/cortex_a77.S +++ b/lib/cpus/aarch64/cortex_a77.S @@ -17,6 +17,8 @@ #error "Cortex-A77 must be compiled with HW_ASSISTED_COHERENCY enabled" #endif +cpu_reset_prologue cortex_a77 + /* 64-bit only core */ #if CTX_INCLUDE_AARCH32_REGS == 1 #error "Cortex-A77 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" diff --git a/lib/cpus/aarch64/cortex_a78.S b/lib/cpus/aarch64/cortex_a78.S index 9f2ffdf8f..a66214bca 100644 --- a/lib/cpus/aarch64/cortex_a78.S +++ b/lib/cpus/aarch64/cortex_a78.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, Arm Limited. All rights reserved. + * Copyright (c) 2019-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -24,6 +24,8 @@ wa_cve_2022_23960_bhb_vector_table CORTEX_A78_BHB_LOOP_COUNT, cortex_a78 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_a78 + /* Disable hardware page aggregation.Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_a78, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set CORTEX_A78_CPUECTLR_EL1, BIT(46) diff --git a/lib/cpus/aarch64/cortex_a78_ae.S b/lib/cpus/aarch64/cortex_a78_ae.S index 7fa1f9bcd..c537967a4 100644 --- a/lib/cpus/aarch64/cortex_a78_ae.S +++ b/lib/cpus/aarch64/cortex_a78_ae.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019-2024, Arm Limited. All rights reserved. + * Copyright (c) 2019-2025, Arm Limited. All rights reserved. * Copyright (c) 2021-2023, NVIDIA Corporation. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause @@ -22,6 +22,8 @@ wa_cve_2022_23960_bhb_vector_table CORTEX_A78_AE_BHB_LOOP_COUNT, cortex_a78_ae #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_a78_ae + /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_a78_ae, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set CORTEX_A78_AE_CPUECTLR_EL1, BIT(46) diff --git a/lib/cpus/aarch64/cortex_a78c.S b/lib/cpus/aarch64/cortex_a78c.S index 3f6944a7a..aba7d2593 100644 --- a/lib/cpus/aarch64/cortex_a78c.S +++ b/lib/cpus/aarch64/cortex_a78c.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021-2024, Arm Limited. All rights reserved. + * Copyright (c) 2021-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -21,6 +21,8 @@ wa_cve_2022_23960_bhb_vector_table CORTEX_A78C_BHB_LOOP_COUNT, cortex_a78c #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_a78c + /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_a78c, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set CORTEX_A78C_CPUECTLR_EL1, BIT(46) diff --git a/lib/cpus/aarch64/cortex_alto.S b/lib/cpus/aarch64/cortex_alto.S index 1422563ae..97192a6a0 100644 --- a/lib/cpus/aarch64/cortex_alto.S +++ b/lib/cpus/aarch64/cortex_alto.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, Arm Limited. All rights reserved. + * Copyright (c) 2024-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -25,6 +25,8 @@ #error "Travis needs ERRATA_SME_POWER_DOWN=1 to powerdown correctly" #endif +cpu_reset_prologue cortex_alto + cpu_reset_func_start cortex_alto /* Disable speculative loads */ msr SSBS, xzr diff --git a/lib/cpus/aarch64/cortex_arcadia.S b/lib/cpus/aarch64/cortex_arcadia.S index c97d87dbe..ae8eb91d1 100644 --- a/lib/cpus/aarch64/cortex_arcadia.S +++ b/lib/cpus/aarch64/cortex_arcadia.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2024, Arm Limited. All rights reserved. + * Copyright (c) 2024-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -21,6 +21,8 @@ #error "Cortex-ARCADIA supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_arcadia + cpu_reset_func_start cortex_arcadia /* Disable speculative loads */ msr SSBS, xzr diff --git a/lib/cpus/aarch64/cortex_gelas.S b/lib/cpus/aarch64/cortex_gelas.S index 709bb129b..cdf62841a 100644 --- a/lib/cpus/aarch64/cortex_gelas.S +++ b/lib/cpus/aarch64/cortex_gelas.S @@ -29,6 +29,8 @@ #error "Gelas needs ERRATA_SME_POWER_DOWN=1 to powerdown correctly" #endif +cpu_reset_prologue cortex_gelas + cpu_reset_func_start cortex_gelas /* ---------------------------------------------------- * Disable speculative loads diff --git a/lib/cpus/aarch64/cortex_x1.S b/lib/cpus/aarch64/cortex_x1.S index 5bd020c6c..27d181a02 100644 --- a/lib/cpus/aarch64/cortex_x1.S +++ b/lib/cpus/aarch64/cortex_x1.S @@ -23,6 +23,8 @@ wa_cve_2022_23960_bhb_vector_table CORTEX_X1_BHB_LOOP_COUNT, cortex_x1 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_x1 + /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_x1, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set CORTEX_X1_CPUECTLR_EL1, BIT(46) diff --git a/lib/cpus/aarch64/cortex_x2.S b/lib/cpus/aarch64/cortex_x2.S index 547c430cd..549beec4f 100644 --- a/lib/cpus/aarch64/cortex_x2.S +++ b/lib/cpus/aarch64/cortex_x2.S @@ -25,7 +25,7 @@ .global check_erratum_cortex_x2_3701772 -add_erratum_entry cortex_x2, ERRATUM(3701772), ERRATA_X2_3701772, NO_APPLY_AT_RESET +add_erratum_entry cortex_x2, ERRATUM(3701772), ERRATA_X2_3701772 check_erratum_ls cortex_x2, ERRATUM(3701772), CPU_REV(2, 1) @@ -33,6 +33,8 @@ check_erratum_ls cortex_x2, ERRATUM(3701772), CPU_REV(2, 1) wa_cve_2022_23960_bhb_vector_table CORTEX_X2_BHB_LOOP_COUNT, cortex_x2 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_x2 + /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_x2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set CORTEX_X2_CPUECTLR_EL1, BIT(46) diff --git a/lib/cpus/aarch64/cortex_x3.S b/lib/cpus/aarch64/cortex_x3.S index f57a113d7..da9e30608 100644 --- a/lib/cpus/aarch64/cortex_x3.S +++ b/lib/cpus/aarch64/cortex_x3.S @@ -24,7 +24,7 @@ .global check_erratum_cortex_x3_3701769 -add_erratum_entry cortex_x3, ERRATUM(3701769), ERRATA_X3_3701769, NO_APPLY_AT_RESET +add_erratum_entry cortex_x3, ERRATUM(3701769), ERRATA_X3_3701769 check_erratum_ls cortex_x3, ERRATUM(3701769), CPU_REV(1, 2) @@ -32,6 +32,8 @@ check_erratum_ls cortex_x3, ERRATUM(3701769), CPU_REV(1, 2) wa_cve_2022_23960_bhb_vector_table CORTEX_X3_BHB_LOOP_COUNT, cortex_x3 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue cortex_x3 + /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start cortex_x3, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set CORTEX_X3_CPUECTLR_EL1, BIT(46) diff --git a/lib/cpus/aarch64/cortex_x4.S b/lib/cpus/aarch64/cortex_x4.S index 4b1cf9175..79a8d54a5 100644 --- a/lib/cpus/aarch64/cortex_x4.S +++ b/lib/cpus/aarch64/cortex_x4.S @@ -22,6 +22,8 @@ #error "Cortex X4 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue cortex_x4 + .global check_erratum_cortex_x4_2726228 .global check_erratum_cortex_x4_3701758 @@ -122,7 +124,7 @@ workaround_reset_end cortex_x4, CVE(2024, 7881) check_erratum_chosen cortex_x4, CVE(2024, 7881), WORKAROUND_CVE_2024_7881 -add_erratum_entry cortex_x4, ERRATUM(3701758), ERRATA_X4_3701758, NO_APPLY_AT_RESET +add_erratum_entry cortex_x4, ERRATUM(3701758), ERRATA_X4_3701758 check_erratum_ls cortex_x4, ERRATUM(3701758), CPU_REV(0, 3) diff --git a/lib/cpus/aarch64/cortex_x925.S b/lib/cpus/aarch64/cortex_x925.S index 5974c18d5..7dec3752e 100644 --- a/lib/cpus/aarch64/cortex_x925.S +++ b/lib/cpus/aarch64/cortex_x925.S @@ -21,9 +21,9 @@ #error "Cortex-X925 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif -.global check_erratum_cortex_x925_3701747 +cpu_reset_prologue cortex_x925 -add_erratum_entry cortex_x925, ERRATUM(3701747), ERRATA_X925_3701747, NO_APPLY_AT_RESET +add_erratum_entry cortex_x925, ERRATUM(3701747), ERRATA_X925_3701747 check_erratum_ls cortex_x925, ERRATUM(3701747), CPU_REV(0, 1) diff --git a/lib/cpus/aarch64/denver.S b/lib/cpus/aarch64/denver.S index ca250d370..64158e720 100644 --- a/lib/cpus/aarch64/denver.S +++ b/lib/cpus/aarch64/denver.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved. * Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause @@ -13,6 +13,8 @@ #include #include +cpu_reset_prologue denver + /* ------------------------------------------------- * CVE-2017-5715 mitigation * diff --git a/lib/cpus/aarch64/generic.S b/lib/cpus/aarch64/generic.S index 849056f49..0a10eed1a 100644 --- a/lib/cpus/aarch64/generic.S +++ b/lib/cpus/aarch64/generic.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, Arm Limited. All rights reserved. + * Copyright (c) 2020-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -11,6 +11,8 @@ #include #include +cpu_reset_prologue generic + /* --------------------------------------------- * Disable L1 data cache and unified L2 cache * --------------------------------------------- diff --git a/lib/cpus/aarch64/neoverse_e1.S b/lib/cpus/aarch64/neoverse_e1.S index c6dd11783..f37bb2805 100644 --- a/lib/cpus/aarch64/neoverse_e1.S +++ b/lib/cpus/aarch64/neoverse_e1.S @@ -22,6 +22,8 @@ #error "Neoverse-E1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue neoverse_e1 + workaround_reset_start neoverse_e1, ERRATUM(936184), ERRATA_DSU_936184 errata_dsu_936184_wa_impl workaround_reset_end neoverse_e1, ERRATUM(936184) diff --git a/lib/cpus/aarch64/neoverse_n1.S b/lib/cpus/aarch64/neoverse_n1.S index 638d0d3a3..1ad9557db 100644 --- a/lib/cpus/aarch64/neoverse_n1.S +++ b/lib/cpus/aarch64/neoverse_n1.S @@ -28,6 +28,8 @@ wa_cve_2022_23960_bhb_vector_table NEOVERSE_N1_BHB_LOOP_COUNT, neoverse_n1 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue neoverse_n1 + workaround_reset_start neoverse_n1, ERRATUM(936184), ERRATA_DSU_936184 errata_dsu_936184_wa_impl workaround_reset_end neoverse_n1, ERRATUM(936184) diff --git a/lib/cpus/aarch64/neoverse_n2.S b/lib/cpus/aarch64/neoverse_n2.S index fd6877dab..9c6f01a49 100644 --- a/lib/cpus/aarch64/neoverse_n2.S +++ b/lib/cpus/aarch64/neoverse_n2.S @@ -23,7 +23,7 @@ .global check_erratum_neoverse_n2_3701773 -add_erratum_entry neoverse_n2, ERRATUM(3701773), ERRATA_N2_3701773, NO_APPLY_AT_RESET +add_erratum_entry neoverse_n2, ERRATUM(3701773), ERRATA_N2_3701773 check_erratum_ls neoverse_n2, ERRATUM(3701773), CPU_REV(0, 3) @@ -31,6 +31,8 @@ check_erratum_ls neoverse_n2, ERRATUM(3701773), CPU_REV(0, 3) wa_cve_2022_23960_bhb_vector_table NEOVERSE_N2_BHB_LOOP_COUNT, neoverse_n2 #endif /* WORKAROUND_CVE_2022_23960 */ +cpu_reset_prologue neoverse_n2 + workaround_reset_start neoverse_n2, ERRATUM(2313941), ERRATA_DSU_2313941 errata_dsu_2313941_wa_impl workaround_reset_end neoverse_n2, ERRATUM(2313941) diff --git a/lib/cpus/aarch64/neoverse_n3.S b/lib/cpus/aarch64/neoverse_n3.S index 8abcafeb9..1b7a3e154 100644 --- a/lib/cpus/aarch64/neoverse_n3.S +++ b/lib/cpus/aarch64/neoverse_n3.S @@ -21,9 +21,11 @@ #error "Neoverse-N3 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue neoverse_n3 + .global check_erratum_neoverse_n3_3699563 -add_erratum_entry neoverse_n3, ERRATUM(3699563), ERRATA_N3_3699563, NO_APPLY_AT_RESET +add_erratum_entry neoverse_n3, ERRATUM(3699563), ERRATA_N3_3699563 check_erratum_ls neoverse_n3, ERRATUM(3699563), CPU_REV(0, 0) diff --git a/lib/cpus/aarch64/neoverse_v1.S b/lib/cpus/aarch64/neoverse_v1.S index f16f7e70b..e1e821490 100644 --- a/lib/cpus/aarch64/neoverse_v1.S +++ b/lib/cpus/aarch64/neoverse_v1.S @@ -22,6 +22,8 @@ #error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue neoverse_v1 + #if WORKAROUND_CVE_2022_23960 wa_cve_2022_23960_bhb_vector_table NEOVERSE_V1_BHB_LOOP_COUNT, neoverse_v1 #endif /* WORKAROUND_CVE_2022_23960 */ diff --git a/lib/cpus/aarch64/neoverse_v2.S b/lib/cpus/aarch64/neoverse_v2.S index b43f6dd19..06521ecb4 100644 --- a/lib/cpus/aarch64/neoverse_v2.S +++ b/lib/cpus/aarch64/neoverse_v2.S @@ -22,6 +22,8 @@ #error "Neoverse V2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue neoverse_v2 + /* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */ workaround_reset_start neoverse_v2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660 sysreg_bit_set NEOVERSE_V2_CPUECTLR_EL1, BIT(46) diff --git a/lib/cpus/aarch64/neoverse_v3.S b/lib/cpus/aarch64/neoverse_v3.S index dad37099c..29bfd0ead 100644 --- a/lib/cpus/aarch64/neoverse_v3.S +++ b/lib/cpus/aarch64/neoverse_v3.S @@ -22,9 +22,11 @@ #error "Neoverse V3 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue neoverse_v3 + .global check_erratum_neoverse_v3_3701767 -add_erratum_entry neoverse_v3, ERRATUM(3701767), ERRATA_V3_3701767, NO_APPLY_AT_RESET +add_erratum_entry neoverse_v3, ERRATUM(3701767), ERRATA_V3_3701767 check_erratum_ls neoverse_v3, ERRATUM(3701767), CPU_REV(0, 2) diff --git a/lib/cpus/aarch64/nevis.S b/lib/cpus/aarch64/nevis.S index 0180ab7d5..0d04e65ec 100644 --- a/lib/cpus/aarch64/nevis.S +++ b/lib/cpus/aarch64/nevis.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023-2024, Arm Limited. All rights reserved. + * Copyright (c) 2023-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -21,6 +21,8 @@ #error "Nevis supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue nevis + cpu_reset_func_start nevis /* ---------------------------------------------------- * Disable speculative loads diff --git a/lib/cpus/aarch64/qemu_max.S b/lib/cpus/aarch64/qemu_max.S index 529bb4f2a..a7273791e 100644 --- a/lib/cpus/aarch64/qemu_max.S +++ b/lib/cpus/aarch64/qemu_max.S @@ -8,6 +8,8 @@ #include #include +cpu_reset_prologue qemu_max + func qemu_max_core_pwr_dwn /* --------------------------------------------- * Disable the Data Cache. diff --git a/lib/cpus/aarch64/rainier.S b/lib/cpus/aarch64/rainier.S index ea687be6e..9ad93627d 100644 --- a/lib/cpus/aarch64/rainier.S +++ b/lib/cpus/aarch64/rainier.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020-2024, Arm Limited. All rights reserved. + * Copyright (c) 2020-2025, Arm Limited. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -21,6 +21,8 @@ #error "Rainier CPU supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0" #endif +cpu_reset_prologue rainier + /* -------------------------------------------------- * Disable speculative loads if Rainier supports * SSBS. diff --git a/lib/cpus/aarch64/travis.S b/lib/cpus/aarch64/travis.S index 2e41668f4..0a95e8009 100644 --- a/lib/cpus/aarch64/travis.S +++ b/lib/cpus/aarch64/travis.S @@ -29,6 +29,8 @@ #error "Travis needs ERRATA_SME_POWER_DOWN=1 to powerdown correctly" #endif +cpu_reset_prologue travis + cpu_reset_func_start travis /* ---------------------------------------------------- * Disable speculative loads From 21d068bedbc5f48bebb128998a62bddda7ad625e Mon Sep 17 00:00:00 2001 From: Boyan Karatotev Date: Fri, 24 Jan 2025 09:51:03 +0000 Subject: [PATCH 10/10] refactor(cpus): declare runtime errata correctly There errata don't have a workaround in the cpu file. So calling the wrappers is redundant. We can simply register them with the framework. Change-Id: I316daeee603e86c9f2bdccf91e1b10f7ec6c3f9d Signed-off-by: Boyan Karatotev --- lib/cpus/aarch64/cortex_a520.S | 4 +--- lib/cpus/aarch64/cortex_x4.S | 3 +-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/cpus/aarch64/cortex_a520.S b/lib/cpus/aarch64/cortex_a520.S index 7d63dbbbf..6714a53de 100644 --- a/lib/cpus/aarch64/cortex_a520.S +++ b/lib/cpus/aarch64/cortex_a520.S @@ -11,7 +11,6 @@ #include #include -/* .global erratum_cortex_a520_2938996_wa */ .global check_erratum_cortex_a520_2938996 /* Hardware handled coherency */ @@ -38,8 +37,7 @@ workaround_reset_end cortex_a520, ERRATUM(2858100) check_erratum_ls cortex_a520, ERRATUM(2858100), CPU_REV(0, 1) -workaround_runtime_start cortex_a520, ERRATUM(2938996), ERRATA_A520_2938996, CORTEX_A520_MIDR -workaround_runtime_end cortex_a520, ERRATUM(2938996) +add_erratum_entry cortex_a520, ERRATUM(2938996), ERRATA_A520_2938996 check_erratum_ls cortex_a520, ERRATUM(2938996), CPU_REV(0, 1) diff --git a/lib/cpus/aarch64/cortex_x4.S b/lib/cpus/aarch64/cortex_x4.S index 79a8d54a5..53461c61f 100644 --- a/lib/cpus/aarch64/cortex_x4.S +++ b/lib/cpus/aarch64/cortex_x4.S @@ -31,8 +31,7 @@ cpu_reset_prologue cortex_x4 wa_cve_2022_23960_bhb_vector_table CORTEX_X4_BHB_LOOP_COUNT, cortex_x4 #endif /* WORKAROUND_CVE_2022_23960 */ -workaround_runtime_start cortex_x4, ERRATUM(2726228), ERRATA_X4_2726228, CORTEX_X4_MIDR -workaround_runtime_end cortex_x4, ERRATUM(2726228) +add_erratum_entry cortex_x4, ERRATUM(2726228), ERRATA_X4_2726228 check_erratum_ls cortex_x4, ERRATUM(2726228), CPU_REV(0, 1)