arm-trusted-firmware/lib/cpus/aarch64/cortex_a75.S
Boyan Karatotev 89dba82dfa perf(cpus): make reset errata do fewer branches
Errata application is painful for performance. For a start, it's done
when the core has just come out of reset, which means branch predictors
and caches will be empty so a branch to a workaround function must be
fetched from memory and that round trip is very slow. Then it also runs
with the I-cache off, which means that the loop to iterate over the
workarounds must also be fetched from memory on each iteration.

We can remove both branches. First, we can simply apply every erratum
directly instead of defining a workaround function and jumping to it.
Currently, no errata that need to be applied at both reset and runtime,
with the same workaround function, exist. If the need arose in future,
this should be achievable with a reset + runtime wrapper combo.

Then, we can construct a function that applies each erratum linearly
instead of looping over the list. If this function is part of the reset
function, then the only "far" branches at reset will be for the checker
functions. Importantly, this mitigates the slowdown even when an erratum
is disabled.

The result is ~50% speedup on N1SDP and ~20% on AArch64 Juno on wakeup
from PSCI calls that end in powerdown. This is roughly back to the
baseline of v2.9, before the errata framework regressed on performance
(or a little better). It is important to note that there are other
slowdowns since then that remain unknown.

Change-Id: Ie4d5288a331b11fd648e5c4a0b652b74160b07b9
Signed-off-by: Boyan Karatotev <boyan.karatotev@arm.com>
2025-02-24 09:36:11 +00:00

178 lines
5.1 KiB
ArmAsm

/*
* Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
#include <cortex_a75.h>
#include <cpuamu.h>
#include <cpu_macros.S>
#include <dsu_macros.S>
.global check_erratum_cortex_a75_764081
/* Hardware handled coherency */
#if HW_ASSISTED_COHERENCY == 0
#error "Cortex-A75 must be compiled with HW_ASSISTED_COHERENCY enabled"
#endif
cpu_reset_prologue cortex_a75
workaround_reset_start cortex_a75, ERRATUM(764081), ERRATA_A75_764081
sysreg_bit_set sctlr_el3, SCTLR_IESB_BIT
workaround_reset_end cortex_a75, ERRATUM(764081)
check_erratum_ls cortex_a75, ERRATUM(764081), CPU_REV(0, 0)
workaround_reset_start cortex_a75, ERRATUM(790748), ERRATA_A75_790748
sysreg_bit_set CORTEX_A75_CPUACTLR_EL1, (1 << 13)
workaround_reset_end cortex_a75, ERRATUM(790748)
check_erratum_ls cortex_a75, ERRATUM(790748), CPU_REV(0, 0)
workaround_reset_start cortex_a75, ERRATUM(798953), ERRATA_DSU_798953
errata_dsu_798953_wa_impl
workaround_reset_end cortex_a75, ERRATUM(798953)
check_erratum_custom_start cortex_a75, ERRATUM(798953)
check_errata_dsu_798953_impl
ret
check_erratum_custom_end cortex_a75, ERRATUM(798953)
workaround_reset_start cortex_a75, ERRATUM(936184), ERRATA_DSU_936184
errata_dsu_936184_wa_impl
workaround_reset_end cortex_a75, ERRATUM(936184)
check_erratum_custom_start cortex_a75, ERRATUM(936184)
check_errata_dsu_936184_impl
ret
check_erratum_custom_end cortex_a75, ERRATUM(936184)
workaround_reset_start cortex_a75, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
#if IMAGE_BL31
override_vector_table wa_cve_2017_5715_bpiall_vbar
#endif /* IMAGE_BL31 */
workaround_reset_end cortex_a75, CVE(2017, 5715)
check_erratum_custom_start cortex_a75, CVE(2017, 5715)
cpu_check_csv2 x0, 1f
#if WORKAROUND_CVE_2017_5715
mov x0, #ERRATA_APPLIES
#else
mov x0, #ERRATA_MISSING
#endif
ret
1:
mov x0, #ERRATA_NOT_APPLIES
ret
check_erratum_custom_end cortex_a75, CVE(2017, 5715)
workaround_reset_start cortex_a75, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
sysreg_bit_set CORTEX_A75_CPUACTLR_EL1, CORTEX_A75_CPUACTLR_EL1_DISABLE_LOAD_PASS_STORE
workaround_reset_end cortex_a75, CVE(2018, 3639)
check_erratum_chosen cortex_a75, CVE(2018, 3639), WORKAROUND_CVE_2018_3639
workaround_reset_start cortex_a75, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
#if IMAGE_BL31
/* Skip installing vector table again if already done for CVE(2017, 5715) */
adr x0, wa_cve_2017_5715_bpiall_vbar
mrs x1, vbar_el3
cmp x0, x1
b.eq 1f
msr vbar_el3, x0
1:
#endif /* IMAGE_BL31 */
workaround_reset_end cortex_a75, CVE(2022, 23960)
check_erratum_custom_start cortex_a75, CVE(2022, 23960)
#if WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960
cpu_check_csv2 x0, 1f
mov x0, #ERRATA_APPLIES
ret
1:
# if WORKAROUND_CVE_2022_23960
mov x0, #ERRATA_APPLIES
# else
mov x0, #ERRATA_MISSING
# endif /* WORKAROUND_CVE_2022_23960 */
ret
#endif /* WORKAROUND_CVE_2017_5715 || WORKAROUND_CVE_2022_23960 */
mov x0, #ERRATA_MISSING
ret
check_erratum_custom_end cortex_a75, CVE(2022, 23960)
/* -------------------------------------------------
* The CPU Ops reset function for Cortex-A75.
* -------------------------------------------------
*/
cpu_reset_func_start cortex_a75
#if ENABLE_FEAT_AMU
/* Make sure accesses from EL0/EL1 and EL2 are not trapped to EL3 */
sysreg_bit_set actlr_el3, CORTEX_A75_ACTLR_AMEN_BIT
isb
/* Make sure accesses from EL0/EL1 are not trapped to EL2 */
sysreg_bit_set actlr_el2, CORTEX_A75_ACTLR_AMEN_BIT
isb
/* Enable group0 counters */
mov x0, #CORTEX_A75_AMU_GROUP0_MASK
msr CPUAMCNTENSET_EL0, x0
isb
/* Enable group1 counters */
mov x0, #CORTEX_A75_AMU_GROUP1_MASK
msr CPUAMCNTENSET_EL0, x0
/* isb included in cpu_reset_func_end macro */
#endif
cpu_reset_func_end cortex_a75
func check_smccc_arch_workaround_3
mov x0, #ERRATA_APPLIES
ret
endfunc check_smccc_arch_workaround_3
/* ---------------------------------------------
* HW will do the cache maintenance while powering down
* ---------------------------------------------
*/
func cortex_a75_core_pwr_dwn
/* ---------------------------------------------
* Enable CPU power down bit in power control register
* ---------------------------------------------
*/
sysreg_bit_set CORTEX_A75_CPUPWRCTLR_EL1, \
CORTEX_A75_CORE_PWRDN_EN_MASK
isb
ret
endfunc cortex_a75_core_pwr_dwn
/* ---------------------------------------------
* This function provides cortex_a75 specific
* register information for crash reporting.
* It needs to return with x6 pointing to
* a list of register names in ascii and
* x8 - x15 having values of registers to be
* reported.
* ---------------------------------------------
*/
.section .rodata.cortex_a75_regs, "aS"
cortex_a75_regs: /* The ascii list of register names to be reported */
.asciz "cpuectlr_el1", ""
func cortex_a75_cpu_reg_dump
adr x6, cortex_a75_regs
mrs x8, CORTEX_A75_CPUECTLR_EL1
ret
endfunc cortex_a75_cpu_reg_dump
declare_cpu_ops_wa cortex_a75, CORTEX_A75_MIDR, \
cortex_a75_reset_func, \
check_erratum_cortex_a75_5715, \
CPU_NO_EXTRA2_FUNC, \
check_smccc_arch_workaround_3, \
cortex_a75_core_pwr_dwn