mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-04-08 05:43:53 +00:00

Errata application is painful for performance. For a start, it's done when the core has just come out of reset, which means branch predictors and caches will be empty so a branch to a workaround function must be fetched from memory and that round trip is very slow. Then it also runs with the I-cache off, which means that the loop to iterate over the workarounds must also be fetched from memory on each iteration. We can remove both branches. First, we can simply apply every erratum directly instead of defining a workaround function and jumping to it. Currently, no errata that need to be applied at both reset and runtime, with the same workaround function, exist. If the need arose in future, this should be achievable with a reset + runtime wrapper combo. Then, we can construct a function that applies each erratum linearly instead of looping over the list. If this function is part of the reset function, then the only "far" branches at reset will be for the checker functions. Importantly, this mitigates the slowdown even when an erratum is disabled. The result is ~50% speedup on N1SDP and ~20% on AArch64 Juno on wakeup from PSCI calls that end in powerdown. This is roughly back to the baseline of v2.9, before the errata framework regressed on performance (or a little better). It is important to note that there are other slowdowns since then that remain unknown. Change-Id: Ie4d5288a331b11fd648e5c4a0b652b74160b07b9 Signed-off-by: Boyan Karatotev <boyan.karatotev@arm.com>
99 lines
3.1 KiB
ArmAsm
99 lines
3.1 KiB
ArmAsm
/*
|
|
* Copyright (c) 2022-2024, Google LLC. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#include <asm_macros.S>
|
|
#include <cortex_x1.h>
|
|
#include <cpu_macros.S>
|
|
#include "wa_cve_2022_23960_bhb_vector.S"
|
|
|
|
/* Hardware handled coherency */
|
|
#if HW_ASSISTED_COHERENCY == 0
|
|
#error "Cortex-X1 must be compiled with HW_ASSISTED_COHERENCY enabled"
|
|
#endif
|
|
|
|
/* 64-bit only core */
|
|
#if CTX_INCLUDE_AARCH32_REGS == 1
|
|
#error "Cortex-X1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
|
#endif
|
|
|
|
#if WORKAROUND_CVE_2022_23960
|
|
wa_cve_2022_23960_bhb_vector_table CORTEX_X1_BHB_LOOP_COUNT, cortex_x1
|
|
#endif /* WORKAROUND_CVE_2022_23960 */
|
|
|
|
cpu_reset_prologue cortex_x1
|
|
|
|
/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
|
|
workaround_reset_start cortex_x1, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
|
|
sysreg_bit_set CORTEX_X1_CPUECTLR_EL1, BIT(46)
|
|
workaround_reset_end cortex_x1, CVE(2024, 5660)
|
|
|
|
check_erratum_ls cortex_x1, CVE(2024, 5660), CPU_REV(1, 2)
|
|
|
|
workaround_reset_start cortex_x1, ERRATUM(1688305), ERRATA_X1_1688305
|
|
sysreg_bit_set CORTEX_X1_ACTLR2_EL1, BIT(1)
|
|
workaround_reset_end cortex_x1, ERRATUM(1688305)
|
|
|
|
check_erratum_ls cortex_x1, ERRATUM(1688305), CPU_REV(1, 0)
|
|
|
|
workaround_reset_start cortex_x1, ERRATUM(1821534), ERRATA_X1_1821534
|
|
sysreg_bit_set CORTEX_X1_ACTLR2_EL1, BIT(2)
|
|
workaround_reset_end cortex_x1, ERRATUM(1821534)
|
|
|
|
check_erratum_ls cortex_x1, ERRATUM(1821534), CPU_REV(1, 0)
|
|
|
|
workaround_reset_start cortex_x1, ERRATUM(1827429), ERRATA_X1_1827429
|
|
sysreg_bit_set CORTEX_X1_CPUECTLR_EL1, BIT(53)
|
|
workaround_reset_end cortex_x1, ERRATUM(1827429)
|
|
|
|
check_erratum_ls cortex_x1, ERRATUM(1827429), CPU_REV(1, 0)
|
|
|
|
check_erratum_chosen cortex_x1, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
|
|
|
|
workaround_reset_start cortex_x1, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
|
|
#if IMAGE_BL31
|
|
/*
|
|
* The Cortex-X1 generic vectors are overridden to apply errata
|
|
* mitigation on exception entry from lower ELs.
|
|
*/
|
|
override_vector_table wa_cve_vbar_cortex_x1
|
|
#endif /* IMAGE_BL31 */
|
|
workaround_reset_end cortex_x1, CVE(2022, 23960)
|
|
|
|
cpu_reset_func_start cortex_x1
|
|
cpu_reset_func_end cortex_x1
|
|
|
|
/* ---------------------------------------------
|
|
* HW will do the cache maintenance while powering down
|
|
* ---------------------------------------------
|
|
*/
|
|
func cortex_x1_core_pwr_dwn
|
|
sysreg_bit_set CORTEX_X1_CPUPWRCTLR_EL1, CORTEX_X1_CORE_PWRDN_EN_MASK
|
|
isb
|
|
ret
|
|
endfunc cortex_x1_core_pwr_dwn
|
|
|
|
/* ---------------------------------------------
|
|
* This function provides Cortex X1 specific
|
|
* register information for crash reporting.
|
|
* It needs to return with x6 pointing to
|
|
* a list of register names in ascii and
|
|
* x8 - x15 having values of registers to be
|
|
* reported.
|
|
* ---------------------------------------------
|
|
*/
|
|
.section .rodata.cortex_x1_regs, "aS"
|
|
cortex_x1_regs: /* The ascii list of register names to be reported */
|
|
.asciz "cpuectlr_el1", ""
|
|
|
|
func cortex_x1_cpu_reg_dump
|
|
adr x6, cortex_x1_regs
|
|
mrs x8, CORTEX_X1_CPUECTLR_EL1
|
|
ret
|
|
endfunc cortex_x1_cpu_reg_dump
|
|
|
|
declare_cpu_ops cortex_x1, CORTEX_X1_MIDR, \
|
|
cortex_x1_reset_func, \
|
|
cortex_x1_core_pwr_dwn
|