mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-04-08 05:43:53 +00:00
perf(cpus): make reset errata do fewer branches
Errata application is painful for performance. For a start, it's done when the core has just come out of reset, which means branch predictors and caches will be empty so a branch to a workaround function must be fetched from memory and that round trip is very slow. Then it also runs with the I-cache off, which means that the loop to iterate over the workarounds must also be fetched from memory on each iteration. We can remove both branches. First, we can simply apply every erratum directly instead of defining a workaround function and jumping to it. Currently, no errata that need to be applied at both reset and runtime, with the same workaround function, exist. If the need arose in future, this should be achievable with a reset + runtime wrapper combo. Then, we can construct a function that applies each erratum linearly instead of looping over the list. If this function is part of the reset function, then the only "far" branches at reset will be for the checker functions. Importantly, this mitigates the slowdown even when an erratum is disabled. The result is ~50% speedup on N1SDP and ~20% on AArch64 Juno on wakeup from PSCI calls that end in powerdown. This is roughly back to the baseline of v2.9, before the errata framework regressed on performance (or a little better). It is important to note that there are other slowdowns since then that remain unknown. Change-Id: Ie4d5288a331b11fd648e5c4a0b652b74160b07b9 Signed-off-by: Boyan Karatotev <boyan.karatotev@arm.com>
This commit is contained in:
parent
b07c317f67
commit
89dba82dfa
48 changed files with 185 additions and 126 deletions
|
@ -1481,7 +1481,9 @@ the returned ``cpu_ops`` is then invoked which executes the required reset
|
|||
handling for that CPU and also any errata workarounds enabled by the platform.
|
||||
|
||||
It should be defined using the ``cpu_reset_func_{start,end}`` macros and its
|
||||
body may only clobber x0 to x14 with x14 being the cpu_rev parameter.
|
||||
body may only clobber x0 to x14 with x14 being the cpu_rev parameter. The cpu
|
||||
file should also include a call to ``cpu_reset_prologue`` at the start of the
|
||||
file for errata to work correctly.
|
||||
|
||||
CPU specific power down sequence
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
|
|
@ -172,11 +172,6 @@
|
|||
\_cpu\()_errata_list_start:
|
||||
.endif
|
||||
|
||||
/* unused on AArch32, maintain for portability */
|
||||
.word 0
|
||||
/* TODO(errata ABI): this prevents all checker functions from
|
||||
* being optimised away. Can be done away with unless the ABI
|
||||
* needs them */
|
||||
.ifnb \_special
|
||||
.word check_errata_\_special
|
||||
.elseif \_cve
|
||||
|
@ -188,9 +183,7 @@
|
|||
.word \_id
|
||||
.hword \_cve
|
||||
.byte \_chosen
|
||||
/* TODO(errata ABI): mitigated field for known but unmitigated
|
||||
* errata*/
|
||||
.byte 0x1
|
||||
.byte 0x0 /* alignment */
|
||||
.popsection
|
||||
.endm
|
||||
|
||||
|
|
|
@ -238,50 +238,22 @@
|
|||
* _apply_at_reset:
|
||||
* Whether the erratum should be automatically applied at reset
|
||||
*/
|
||||
.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
|
||||
.macro add_erratum_entry _cpu:req, _cve:req, _id:req, _chosen:req
|
||||
#if REPORT_ERRATA || ERRATA_ABI_SUPPORT
|
||||
.pushsection .rodata.errata_entries
|
||||
.align 3
|
||||
.ifndef \_cpu\()_errata_list_start
|
||||
\_cpu\()_errata_list_start:
|
||||
.endif
|
||||
|
||||
/* check if unused and compile out if no references */
|
||||
.if \_apply_at_reset && \_chosen
|
||||
.quad erratum_\_cpu\()_\_id\()_wa
|
||||
.else
|
||||
.quad 0
|
||||
.endif
|
||||
/* TODO(errata ABI): this prevents all checker functions from
|
||||
* being optimised away. Can be done away with unless the ABI
|
||||
* needs them */
|
||||
.quad check_erratum_\_cpu\()_\_id
|
||||
/* Will fit CVEs with up to 10 character in the ID field */
|
||||
.word \_id
|
||||
.hword \_cve
|
||||
.byte \_chosen
|
||||
/* TODO(errata ABI): mitigated field for known but unmitigated
|
||||
* errata */
|
||||
.byte 0x1
|
||||
.byte 0x0 /* alignment */
|
||||
.popsection
|
||||
.endm
|
||||
|
||||
.macro _workaround_start _cpu:req, _cve:req, _id:req, _chosen:req, _apply_at_reset:req
|
||||
add_erratum_entry \_cpu, \_cve, \_id, \_chosen, \_apply_at_reset
|
||||
|
||||
func erratum_\_cpu\()_\_id\()_wa
|
||||
mov x8, x30
|
||||
|
||||
/* save rev_var for workarounds that might need it but don't
|
||||
* restore to x0 because few will care */
|
||||
mov x7, x0
|
||||
bl check_erratum_\_cpu\()_\_id
|
||||
cbz x0, erratum_\_cpu\()_\_id\()_skip
|
||||
.endm
|
||||
|
||||
.macro _workaround_end _cpu:req, _id:req
|
||||
erratum_\_cpu\()_\_id\()_skip:
|
||||
ret x8
|
||||
endfunc erratum_\_cpu\()_\_id\()_wa
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*******************************************************************************
|
||||
|
@ -311,7 +283,22 @@
|
|||
* _wa clobbers: x0-x8 (PCS compliant)
|
||||
*/
|
||||
.macro workaround_reset_start _cpu:req, _cve:req, _id:req, _chosen:req
|
||||
_workaround_start \_cpu, \_cve, \_id, \_chosen, 1
|
||||
add_erratum_entry \_cpu, \_cve, \_id, \_chosen
|
||||
|
||||
.if \_chosen
|
||||
/* put errata directly into the reset function */
|
||||
.pushsection .text.asm.\_cpu\()_reset_func, "ax"
|
||||
.else
|
||||
/* or something else that will get garbage collected by the
|
||||
* linker */
|
||||
.pushsection .text.asm.erratum_\_cpu\()_\_id\()_wa, "ax"
|
||||
.endif
|
||||
/* revision is stored in x14, get it */
|
||||
mov x0, x14
|
||||
bl check_erratum_\_cpu\()_\_id
|
||||
/* save rev_var for workarounds that might need it */
|
||||
mov x7, x14
|
||||
cbz x0, erratum_\_cpu\()_\_id\()_skip_reset
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
@ -322,6 +309,10 @@
|
|||
* for errata applied in generic code
|
||||
*/
|
||||
.macro workaround_runtime_start _cpu:req, _cve:req, _id:req, _chosen:req, _midr
|
||||
add_erratum_entry \_cpu, \_cve, \_id, \_chosen
|
||||
|
||||
func erratum_\_cpu\()_\_id\()_wa
|
||||
mov x8, x30
|
||||
/*
|
||||
* Let errata specify if they need MIDR checking. Sadly, storing the
|
||||
* MIDR in an .equ to retrieve automatically blows up as it stores some
|
||||
|
@ -329,11 +320,15 @@
|
|||
*/
|
||||
.ifnb \_midr
|
||||
jump_if_cpu_midr \_midr, 1f
|
||||
b erratum_\_cpu\()_\_id\()_skip
|
||||
b erratum_\_cpu\()_\_id\()_skip_runtime
|
||||
|
||||
1:
|
||||
.endif
|
||||
_workaround_start \_cpu, \_cve, \_id, \_chosen, 0
|
||||
/* save rev_var for workarounds that might need it but don't
|
||||
* restore to x0 because few will care */
|
||||
mov x7, x0
|
||||
bl check_erratum_\_cpu\()_\_id
|
||||
cbz x0, erratum_\_cpu\()_\_id\()_skip_runtime
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
@ -341,7 +336,8 @@
|
|||
* is kept here so the same #define can be used as that macro
|
||||
*/
|
||||
.macro workaround_reset_end _cpu:req, _cve:req, _id:req
|
||||
_workaround_end \_cpu, \_id
|
||||
erratum_\_cpu\()_\_id\()_skip_reset:
|
||||
.popsection
|
||||
.endm
|
||||
|
||||
/*
|
||||
|
@ -361,7 +357,9 @@
|
|||
.ifb \_no_isb
|
||||
isb
|
||||
.endif
|
||||
_workaround_end \_cpu, \_id
|
||||
erratum_\_cpu\()_\_id\()_skip_runtime:
|
||||
ret x8
|
||||
endfunc erratum_\_cpu\()_\_id\()_wa
|
||||
.endm
|
||||
|
||||
/*******************************************************************************
|
||||
|
@ -598,7 +596,21 @@
|
|||
******************************************************************************/
|
||||
|
||||
/*
|
||||
* Wrapper to automatically apply all reset-time errata. Will end with an isb.
|
||||
* Helper to register a cpu with the errata framework. Begins the definition of
|
||||
* the reset function.
|
||||
*
|
||||
* _cpu:
|
||||
* Name of cpu as given to declare_cpu_ops
|
||||
*/
|
||||
.macro cpu_reset_prologue _cpu:req
|
||||
func \_cpu\()_reset_func
|
||||
mov x15, x30
|
||||
get_rev_var x14, x0
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Wrapper of the reset function to automatically apply all reset-time errata.
|
||||
* Will end with an isb.
|
||||
*
|
||||
* _cpu:
|
||||
* Name of cpu as given to declare_cpu_ops
|
||||
|
@ -608,38 +620,9 @@
|
|||
* argument x14 - cpu_rev_var
|
||||
*/
|
||||
.macro cpu_reset_func_start _cpu:req
|
||||
func \_cpu\()_reset_func
|
||||
mov x15, x30
|
||||
get_rev_var x14, x0
|
||||
|
||||
/* short circuit the location to avoid searching the list */
|
||||
adrp x12, \_cpu\()_errata_list_start
|
||||
add x12, x12, :lo12:\_cpu\()_errata_list_start
|
||||
adrp x13, \_cpu\()_errata_list_end
|
||||
add x13, x13, :lo12:\_cpu\()_errata_list_end
|
||||
|
||||
errata_begin:
|
||||
/* if head catches up with end of list, exit */
|
||||
cmp x12, x13
|
||||
b.eq errata_end
|
||||
|
||||
ldr x10, [x12, #ERRATUM_WA_FUNC]
|
||||
/* TODO(errata ABI): check mitigated and checker function fields
|
||||
* for 0 */
|
||||
ldrb w11, [x12, #ERRATUM_CHOSEN]
|
||||
|
||||
/* skip if not chosen */
|
||||
cbz x11, 1f
|
||||
/* skip if runtime erratum */
|
||||
cbz x10, 1f
|
||||
|
||||
/* put cpu revision in x0 and call workaround */
|
||||
mov x0, x14
|
||||
blr x10
|
||||
1:
|
||||
add x12, x12, #ERRATUM_ENTRY_SIZE
|
||||
b errata_begin
|
||||
errata_end:
|
||||
/* the func/endfunc macros will change sections. So change the section
|
||||
* back to the reset function's */
|
||||
.section .text.asm.\_cpu\()_reset_func, "ax"
|
||||
.endm
|
||||
|
||||
.macro cpu_reset_func_end _cpu:req
|
||||
|
|
|
@ -9,20 +9,18 @@
|
|||
|
||||
#include <lib/cpus/cpu_ops.h>
|
||||
|
||||
#define ERRATUM_WA_FUNC_SIZE CPU_WORD_SIZE
|
||||
#define ERRATUM_CHECK_FUNC_SIZE CPU_WORD_SIZE
|
||||
#define ERRATUM_ID_SIZE 4
|
||||
#define ERRATUM_CVE_SIZE 2
|
||||
#define ERRATUM_CHOSEN_SIZE 1
|
||||
#define ERRATUM_MITIGATED_SIZE 1
|
||||
#define ERRATUM_ALIGNMENT_SIZE 1
|
||||
|
||||
#define ERRATUM_WA_FUNC 0
|
||||
#define ERRATUM_CHECK_FUNC ERRATUM_WA_FUNC + ERRATUM_WA_FUNC_SIZE
|
||||
#define ERRATUM_CHECK_FUNC 0
|
||||
#define ERRATUM_ID ERRATUM_CHECK_FUNC + ERRATUM_CHECK_FUNC_SIZE
|
||||
#define ERRATUM_CVE ERRATUM_ID + ERRATUM_ID_SIZE
|
||||
#define ERRATUM_CHOSEN ERRATUM_CVE + ERRATUM_CVE_SIZE
|
||||
#define ERRATUM_MITIGATED ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE
|
||||
#define ERRATUM_ENTRY_SIZE ERRATUM_MITIGATED + ERRATUM_MITIGATED_SIZE
|
||||
#define ERRATUM_ALIGNMENT ERRATUM_CHOSEN + ERRATUM_CHOSEN_SIZE
|
||||
#define ERRATUM_ENTRY_SIZE ERRATUM_ALIGNMENT + ERRATUM_ALIGNMENT_SIZE
|
||||
|
||||
/* Errata status */
|
||||
#define ERRATA_NOT_APPLIES 0
|
||||
|
@ -39,15 +37,13 @@ void print_errata_status(void);
|
|||
* uintptr_t will reflect the change and the alignment will be correct in both.
|
||||
*/
|
||||
struct erratum_entry {
|
||||
uintptr_t (*wa_func)(uint64_t cpu_rev);
|
||||
uintptr_t (*check_func)(uint64_t cpu_rev);
|
||||
/* Will fit CVEs with up to 10 character in the ID field */
|
||||
uint32_t id;
|
||||
/* Denote CVEs with their year or errata with 0 */
|
||||
uint16_t cve;
|
||||
uint8_t chosen;
|
||||
/* TODO(errata ABI): placeholder for the mitigated field */
|
||||
uint8_t _mitigated;
|
||||
uint8_t _alignment;
|
||||
} __packed;
|
||||
|
||||
CASSERT(sizeof(struct erratum_entry) == ERRATUM_ENTRY_SIZE,
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
#include <asm_macros.S>
|
||||
#include <cpu_macros.S>
|
||||
|
||||
cpu_reset_prologue aem_generic
|
||||
|
||||
func aem_generic_core_pwr_dwn
|
||||
/* ---------------------------------------------
|
||||
* Disable the Data Cache.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
|
||||
* Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -11,6 +11,7 @@
|
|||
#include <cpu_macros.S>
|
||||
#include <plat_macros.S>
|
||||
|
||||
cpu_reset_prologue cortex_a35
|
||||
/* ---------------------------------------------
|
||||
* Disable L1 data cache and unified L2 cache
|
||||
* ---------------------------------------------
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
#error "Cortex-A510 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue cortex_a510
|
||||
|
||||
workaround_reset_start cortex_a510, ERRATUM(1922240), ERRATA_A510_1922240
|
||||
/* Apply the workaround by setting IMP_CMPXACTLR_EL1[11:10] = 0b11. */
|
||||
sysreg_bitfield_insert CORTEX_A510_CMPXACTLR_EL1, CORTEX_A510_CMPXACTLR_EL1_SNPPREFERUNIQUE_DISABLE, \
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
#error "Cortex A520 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue cortex_a520
|
||||
|
||||
workaround_reset_start cortex_a520, ERRATUM(2630792), ERRATA_A520_2630792
|
||||
sysreg_bit_set CORTEX_A520_CPUACTLR_EL1, BIT(38)
|
||||
workaround_reset_end cortex_a520, ERRATUM(2630792)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2014-2024, Arm Limited and Contributors. All rights reserved.
|
||||
* Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -12,6 +12,8 @@
|
|||
#include <plat_macros.S>
|
||||
#include <lib/cpus/errata.h>
|
||||
|
||||
cpu_reset_prologue cortex_a53
|
||||
|
||||
/* ---------------------------------------------
|
||||
* Disable L1 data cache and unified L2 cache
|
||||
* ---------------------------------------------
|
||||
|
@ -36,12 +38,12 @@ endfunc cortex_a53_disable_smp
|
|||
/* Due to the nature of the errata it is applied unconditionally when chosen */
|
||||
check_erratum_ls cortex_a53, ERRATUM(819472), CPU_REV(0, 1)
|
||||
/* erratum workaround is interleaved with generic code */
|
||||
add_erratum_entry cortex_a53, ERRATUM(819472), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a53, ERRATUM(819472), ERRATUM_ALWAYS_CHOSEN
|
||||
|
||||
/* Due to the nature of the errata it is applied unconditionally when chosen */
|
||||
check_erratum_ls cortex_a53, ERRATUM(824069), CPU_REV(0, 2)
|
||||
/* erratum workaround is interleaved with generic code */
|
||||
add_erratum_entry cortex_a53, ERRATUM(824069), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a53, ERRATUM(824069), ERRATUM_ALWAYS_CHOSEN
|
||||
|
||||
workaround_reset_start cortex_a53, ERRATUM(826319), ERRATA_A53_826319
|
||||
mrs x1, CORTEX_A53_L2ACTLR_EL1
|
||||
|
@ -55,7 +57,7 @@ check_erratum_ls cortex_a53, ERRATUM(826319), CPU_REV(0, 2)
|
|||
/* Due to the nature of the errata it is applied unconditionally when chosen */
|
||||
check_erratum_ls cortex_a53, ERRATUM(827319), CPU_REV(0, 2)
|
||||
/* erratum workaround is interleaved with generic code */
|
||||
add_erratum_entry cortex_a53, ERRATUM(827319), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a53, ERRATUM(827319), ERRATUM_ALWAYS_CHOSEN
|
||||
|
||||
check_erratum_custom_start cortex_a53, ERRATUM(835769)
|
||||
cmp x0, CPU_REV(0, 4)
|
||||
|
@ -78,7 +80,7 @@ exit_check_errata_835769:
|
|||
check_erratum_custom_end cortex_a53, ERRATUM(835769)
|
||||
|
||||
/* workaround at build time */
|
||||
add_erratum_entry cortex_a53, ERRATUM(835769), ERRATA_A53_835769, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a53, ERRATUM(835769), ERRATA_A53_835769
|
||||
|
||||
/*
|
||||
* Disable the cache non-temporal hint.
|
||||
|
@ -114,7 +116,7 @@ exit_check_errata_843419:
|
|||
check_erratum_custom_end cortex_a53, ERRATUM(843419)
|
||||
|
||||
/* workaround at build time */
|
||||
add_erratum_entry cortex_a53, ERRATUM(843419), ERRATA_A53_843419, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a53, ERRATUM(843419), ERRATA_A53_843419
|
||||
|
||||
/*
|
||||
* Earlier revisions of the core are affected as well, but don't
|
||||
|
@ -131,7 +133,7 @@ check_erratum_hs cortex_a53, ERRATUM(855873), CPU_REV(0, 3)
|
|||
check_erratum_chosen cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924
|
||||
|
||||
/* erratum has no workaround in the cpu. Generic code must take care */
|
||||
add_erratum_entry cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a53, ERRATUM(1530924), ERRATA_A53_1530924
|
||||
|
||||
cpu_reset_func_start cortex_a53
|
||||
/* Enable the SMP bit. */
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
.globl cortex_a55_reset_func
|
||||
.globl cortex_a55_core_pwr_dwn
|
||||
|
||||
cpu_reset_prologue cortex_a55
|
||||
|
||||
workaround_reset_start cortex_a55, ERRATUM(798953), ERRATA_DSU_798953
|
||||
errata_dsu_798953_wa_impl
|
||||
workaround_reset_end cortex_a55, ERRATUM(798953)
|
||||
|
@ -111,7 +113,7 @@ check_erratum_ls cortex_a55, ERRATUM(1221012), CPU_REV(1, 0)
|
|||
check_erratum_chosen cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923
|
||||
|
||||
/* erratum has no workaround in the cpu. Generic code must take care */
|
||||
add_erratum_entry cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a55, ERRATUM(1530923), ERRATA_A55_1530923
|
||||
|
||||
cpu_reset_func_start cortex_a55
|
||||
cpu_reset_func_end cortex_a55
|
||||
|
|
|
@ -13,6 +13,8 @@
|
|||
#include <cpu_macros.S>
|
||||
#include <plat_macros.S>
|
||||
|
||||
cpu_reset_prologue cortex_a57
|
||||
|
||||
/* ---------------------------------------------
|
||||
* Disable L1 data cache and unified L2 cache
|
||||
* ---------------------------------------------
|
||||
|
@ -81,7 +83,7 @@ check_erratum_ls cortex_a57, ERRATUM(806969), CPU_REV(0, 0)
|
|||
|
||||
/* erratum always worked around, but report it correctly */
|
||||
check_erratum_ls cortex_a57, ERRATUM(813419), CPU_REV(0, 0)
|
||||
add_erratum_entry cortex_a57, ERRATUM(813419), ERRATUM_ALWAYS_CHOSEN, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a57, ERRATUM(813419), ERRATUM_ALWAYS_CHOSEN
|
||||
|
||||
workaround_reset_start cortex_a57, ERRATUM(813420), ERRATA_A57_813420
|
||||
sysreg_bit_set CORTEX_A57_CPUACTLR_EL1, CORTEX_A57_CPUACTLR_EL1_DCC_AS_DCCI
|
||||
|
@ -150,7 +152,7 @@ check_erratum_ls cortex_a57, ERRATUM(859972), CPU_REV(1, 3)
|
|||
|
||||
check_erratum_chosen cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537
|
||||
/* erratum has no workaround in the cpu. Generic code must take care */
|
||||
add_erratum_entry cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a57, ERRATUM(1319537), ERRATA_A57_1319537
|
||||
|
||||
workaround_reset_start cortex_a57, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
|
||||
#if IMAGE_BL31
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#error "Cortex-A65 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue cortex_a65
|
||||
|
||||
workaround_reset_start cortex_a65, ERRATUM(936184), ERRATA_DSU_936184
|
||||
errata_dsu_936184_wa_impl
|
||||
workaround_reset_end cortex_a65, ERRATUM(936184)
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#error "Cortex-A65AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue cortex_a65ae
|
||||
|
||||
workaround_reset_start cortex_a65ae, ERRATUM(936184), ERRATA_DSU_936184
|
||||
errata_dsu_936184_wa_impl
|
||||
workaround_reset_end cortex_a65ae, ERRATUM(936184)
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
wa_cve_2022_23960_bhb_vector_table CORTEX_A710_BHB_LOOP_COUNT, cortex_a710
|
||||
#endif /* WORKAROUND_CVE_2022_23960 */
|
||||
|
||||
cpu_reset_prologue cortex_a710
|
||||
|
||||
/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
|
||||
workaround_reset_start cortex_a710, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
|
||||
sysreg_bit_set CORTEX_A710_CPUECTLR_EL1, BIT(46)
|
||||
|
@ -223,7 +225,7 @@ workaround_reset_end cortex_a710, CVE(2022, 23960)
|
|||
|
||||
check_erratum_chosen cortex_a710, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
|
||||
|
||||
add_erratum_entry cortex_a710, ERRATUM(3701772), ERRATA_A710_3701772, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a710, ERRATUM(3701772), ERRATA_A710_3701772
|
||||
|
||||
check_erratum_ls cortex_a710, ERRATUM(3701772), CPU_REV(2, 1)
|
||||
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
wa_cve_2022_23960_bhb_vector_table CORTEX_A715_BHB_LOOP_COUNT, cortex_a715
|
||||
#endif /* WORKAROUND_CVE_2022_23960 */
|
||||
|
||||
cpu_reset_prologue cortex_a715
|
||||
|
||||
workaround_reset_start cortex_a715, ERRATUM(2331818), ERRATA_A715_2331818
|
||||
sysreg_bit_set CORTEX_A715_CPUACTLR2_EL1, BIT(20)
|
||||
workaround_reset_end cortex_a715, ERRATUM(2331818)
|
||||
|
@ -129,7 +131,7 @@ workaround_reset_end cortex_a715, CVE(2022, 23960)
|
|||
|
||||
check_erratum_chosen cortex_a715, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
|
||||
|
||||
add_erratum_entry cortex_a715, ERRATUM(3699560), ERRATA_A715_3699560, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a715, ERRATUM(3699560), ERRATA_A715_3699560
|
||||
|
||||
check_erratum_ls cortex_a715, ERRATUM(3699560), CPU_REV(1, 3)
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
|
||||
* Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -15,6 +15,8 @@
|
|||
wa_cve_2022_23960_bhb_vector_table CORTEX_A72_BHB_LOOP_COUNT, cortex_a72
|
||||
#endif /* WORKAROUND_CVE_2022_23960 */
|
||||
|
||||
cpu_reset_prologue cortex_a72
|
||||
|
||||
/* ---------------------------------------------
|
||||
* Disable L1 data cache and unified L2 cache
|
||||
* ---------------------------------------------
|
||||
|
@ -92,7 +94,7 @@ check_erratum_ls cortex_a72, ERRATUM(859971), CPU_REV(0, 3)
|
|||
/* Due to the nature of the errata it is applied unconditionally when chosen */
|
||||
check_erratum_chosen cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367
|
||||
/* erratum workaround is interleaved with generic code */
|
||||
add_erratum_entry cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a72, ERRATUM(1319367), ERRATA_A72_1319367
|
||||
|
||||
workaround_reset_start cortex_a72, CVE(2017, 5715), WORKAROUND_CVE_2017_5715
|
||||
#if IMAGE_BL31
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
#error "Cortex A720 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue cortex_a720
|
||||
|
||||
.global check_erratum_cortex_a720_3699561
|
||||
|
||||
#if WORKAROUND_CVE_2022_23960
|
||||
|
@ -74,7 +76,7 @@ workaround_reset_end cortex_a720, CVE(2022, 23960)
|
|||
|
||||
check_erratum_chosen cortex_a720, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
|
||||
|
||||
add_erratum_entry cortex_a720, ERRATUM(3699561), ERRATA_A720_3699561, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a720, ERRATUM(3699561), ERRATA_A720_3699561
|
||||
|
||||
check_erratum_ls cortex_a720, ERRATUM(3699561), CPU_REV(0, 2)
|
||||
|
||||
|
|
|
@ -21,9 +21,11 @@
|
|||
#error "Cortex-A720AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue cortex_a720_ae
|
||||
|
||||
.global check_erratum_cortex_a720_ae_3699562
|
||||
|
||||
add_erratum_entry cortex_a720_ae, ERRATUM(3699562), ERRATA_A720_AE_3699562, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a720_ae, ERRATUM(3699562), ERRATA_A720_AE_3699562
|
||||
|
||||
check_erratum_ls cortex_a720_ae, ERRATUM(3699562), CPU_REV(0, 0)
|
||||
|
||||
|
|
|
@ -21,9 +21,11 @@
|
|||
#error "Cortex-A725 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue cortex_a725
|
||||
|
||||
.global check_erratum_cortex_a725_3699564
|
||||
|
||||
add_erratum_entry cortex_a725, ERRATUM(3699564), ERRATA_A725_3699564, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a725, ERRATUM(3699564), ERRATA_A725_3699564
|
||||
|
||||
check_erratum_ls cortex_a725, ERRATUM(3699564), CPU_REV(0, 1)
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2016-2024, Arm Limited and Contributors. All rights reserved.
|
||||
* Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -10,6 +10,8 @@
|
|||
#include <cpu_macros.S>
|
||||
#include <plat_macros.S>
|
||||
|
||||
cpu_reset_prologue cortex_a73
|
||||
|
||||
/* ---------------------------------------------
|
||||
* Disable L1 data cache
|
||||
* ---------------------------------------------
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
#error "Cortex-A75 must be compiled with HW_ASSISTED_COHERENCY enabled"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue cortex_a75
|
||||
|
||||
workaround_reset_start cortex_a75, ERRATUM(764081), ERRATA_A75_764081
|
||||
sysreg_bit_set sctlr_el3, SCTLR_IESB_BIT
|
||||
workaround_reset_end cortex_a75, ERRATUM(764081)
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#define ESR_EL3_A64_SMC0 0x5e000000
|
||||
#define ESR_EL3_A32_SMC0 0x4e000000
|
||||
|
||||
cpu_reset_prologue cortex_a76
|
||||
|
||||
#if DYNAMIC_WORKAROUND_CVE_2018_3639
|
||||
/*
|
||||
* This macro applies the mitigation for CVE-2018-3639.
|
||||
|
@ -428,7 +430,7 @@ check_erratum_custom_end cortex_a76, ERRATUM(1165522)
|
|||
check_erratum_chosen cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
|
||||
|
||||
/* erratum has no workaround in the cpu. Generic code must take care */
|
||||
add_erratum_entry cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_a76, CVE(2022, 23960), WORKAROUND_CVE_2022_23960
|
||||
|
||||
workaround_reset_start cortex_a76, ERRATUM(798953), ERRATA_DSU_798953
|
||||
errata_dsu_798953_wa_impl
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2019-2024, Arm Limited. All rights reserved.
|
||||
* Copyright (c) 2019-2025, Arm Limited. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -21,6 +21,8 @@
|
|||
#error "Cortex-A76AE supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue cortex_a76ae
|
||||
|
||||
#if WORKAROUND_CVE_2022_23960
|
||||
wa_cve_2022_23960_bhb_vector_table CORTEX_A76AE_BHB_LOOP_COUNT, cortex_a76ae
|
||||
#endif /* WORKAROUND_CVE_2022_23960 */
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
#error "Cortex-A77 must be compiled with HW_ASSISTED_COHERENCY enabled"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue cortex_a77
|
||||
|
||||
/* 64-bit only core */
|
||||
#if CTX_INCLUDE_AARCH32_REGS == 1
|
||||
#error "Cortex-A77 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2019-2024, Arm Limited. All rights reserved.
|
||||
* Copyright (c) 2019-2025, Arm Limited. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -24,6 +24,8 @@
|
|||
wa_cve_2022_23960_bhb_vector_table CORTEX_A78_BHB_LOOP_COUNT, cortex_a78
|
||||
#endif /* WORKAROUND_CVE_2022_23960 */
|
||||
|
||||
cpu_reset_prologue cortex_a78
|
||||
|
||||
/* Disable hardware page aggregation.Enables mitigation for `CVE-2024-5660` */
|
||||
workaround_reset_start cortex_a78, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
|
||||
sysreg_bit_set CORTEX_A78_CPUECTLR_EL1, BIT(46)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2019-2024, Arm Limited. All rights reserved.
|
||||
* Copyright (c) 2019-2025, Arm Limited. All rights reserved.
|
||||
* Copyright (c) 2021-2023, NVIDIA Corporation. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
|
@ -22,6 +22,8 @@
|
|||
wa_cve_2022_23960_bhb_vector_table CORTEX_A78_AE_BHB_LOOP_COUNT, cortex_a78_ae
|
||||
#endif /* WORKAROUND_CVE_2022_23960 */
|
||||
|
||||
cpu_reset_prologue cortex_a78_ae
|
||||
|
||||
/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
|
||||
workaround_reset_start cortex_a78_ae, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
|
||||
sysreg_bit_set CORTEX_A78_AE_CPUECTLR_EL1, BIT(46)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2021-2024, Arm Limited. All rights reserved.
|
||||
* Copyright (c) 2021-2025, Arm Limited. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -21,6 +21,8 @@
|
|||
wa_cve_2022_23960_bhb_vector_table CORTEX_A78C_BHB_LOOP_COUNT, cortex_a78c
|
||||
#endif /* WORKAROUND_CVE_2022_23960 */
|
||||
|
||||
cpu_reset_prologue cortex_a78c
|
||||
|
||||
/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
|
||||
workaround_reset_start cortex_a78c, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
|
||||
sysreg_bit_set CORTEX_A78C_CPUECTLR_EL1, BIT(46)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2024, Arm Limited. All rights reserved.
|
||||
* Copyright (c) 2024-2025, Arm Limited. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -25,6 +25,8 @@
|
|||
#error "Travis needs ERRATA_SME_POWER_DOWN=1 to powerdown correctly"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue cortex_alto
|
||||
|
||||
cpu_reset_func_start cortex_alto
|
||||
/* Disable speculative loads */
|
||||
msr SSBS, xzr
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2024, Arm Limited. All rights reserved.
|
||||
* Copyright (c) 2024-2025, Arm Limited. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -21,6 +21,8 @@
|
|||
#error "Cortex-ARCADIA supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue cortex_arcadia
|
||||
|
||||
cpu_reset_func_start cortex_arcadia
|
||||
/* Disable speculative loads */
|
||||
msr SSBS, xzr
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
#error "Gelas needs ERRATA_SME_POWER_DOWN=1 to powerdown correctly"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue cortex_gelas
|
||||
|
||||
cpu_reset_func_start cortex_gelas
|
||||
/* ----------------------------------------------------
|
||||
* Disable speculative loads
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
wa_cve_2022_23960_bhb_vector_table CORTEX_X1_BHB_LOOP_COUNT, cortex_x1
|
||||
#endif /* WORKAROUND_CVE_2022_23960 */
|
||||
|
||||
cpu_reset_prologue cortex_x1
|
||||
|
||||
/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
|
||||
workaround_reset_start cortex_x1, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
|
||||
sysreg_bit_set CORTEX_X1_CPUECTLR_EL1, BIT(46)
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
|
||||
.global check_erratum_cortex_x2_3701772
|
||||
|
||||
add_erratum_entry cortex_x2, ERRATUM(3701772), ERRATA_X2_3701772, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_x2, ERRATUM(3701772), ERRATA_X2_3701772
|
||||
|
||||
check_erratum_ls cortex_x2, ERRATUM(3701772), CPU_REV(2, 1)
|
||||
|
||||
|
@ -33,6 +33,8 @@ check_erratum_ls cortex_x2, ERRATUM(3701772), CPU_REV(2, 1)
|
|||
wa_cve_2022_23960_bhb_vector_table CORTEX_X2_BHB_LOOP_COUNT, cortex_x2
|
||||
#endif /* WORKAROUND_CVE_2022_23960 */
|
||||
|
||||
cpu_reset_prologue cortex_x2
|
||||
|
||||
/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
|
||||
workaround_reset_start cortex_x2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
|
||||
sysreg_bit_set CORTEX_X2_CPUECTLR_EL1, BIT(46)
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
|
||||
.global check_erratum_cortex_x3_3701769
|
||||
|
||||
add_erratum_entry cortex_x3, ERRATUM(3701769), ERRATA_X3_3701769, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_x3, ERRATUM(3701769), ERRATA_X3_3701769
|
||||
|
||||
check_erratum_ls cortex_x3, ERRATUM(3701769), CPU_REV(1, 2)
|
||||
|
||||
|
@ -32,6 +32,8 @@ check_erratum_ls cortex_x3, ERRATUM(3701769), CPU_REV(1, 2)
|
|||
wa_cve_2022_23960_bhb_vector_table CORTEX_X3_BHB_LOOP_COUNT, cortex_x3
|
||||
#endif /* WORKAROUND_CVE_2022_23960 */
|
||||
|
||||
cpu_reset_prologue cortex_x3
|
||||
|
||||
/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
|
||||
workaround_reset_start cortex_x3, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
|
||||
sysreg_bit_set CORTEX_X3_CPUECTLR_EL1, BIT(46)
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
#error "Cortex X4 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue cortex_x4
|
||||
|
||||
.global check_erratum_cortex_x4_2726228
|
||||
.global check_erratum_cortex_x4_3701758
|
||||
|
||||
|
@ -122,7 +124,7 @@ workaround_reset_end cortex_x4, CVE(2024, 7881)
|
|||
|
||||
check_erratum_chosen cortex_x4, CVE(2024, 7881), WORKAROUND_CVE_2024_7881
|
||||
|
||||
add_erratum_entry cortex_x4, ERRATUM(3701758), ERRATA_X4_3701758, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_x4, ERRATUM(3701758), ERRATA_X4_3701758
|
||||
|
||||
check_erratum_ls cortex_x4, ERRATUM(3701758), CPU_REV(0, 3)
|
||||
|
||||
|
|
|
@ -21,9 +21,9 @@
|
|||
#error "Cortex-X925 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
.global check_erratum_cortex_x925_3701747
|
||||
cpu_reset_prologue cortex_x925
|
||||
|
||||
add_erratum_entry cortex_x925, ERRATUM(3701747), ERRATA_X925_3701747, NO_APPLY_AT_RESET
|
||||
add_erratum_entry cortex_x925, ERRATUM(3701747), ERRATA_X925_3701747
|
||||
|
||||
check_erratum_ls cortex_x925, ERRATUM(3701747), CPU_REV(0, 1)
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2015-2024, Arm Limited and Contributors. All rights reserved.
|
||||
* Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
|
||||
* Copyright (c) 2020-2022, NVIDIA Corporation. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
|
@ -13,6 +13,8 @@
|
|||
#include <cpu_macros.S>
|
||||
#include <plat_macros.S>
|
||||
|
||||
cpu_reset_prologue denver
|
||||
|
||||
/* -------------------------------------------------
|
||||
* CVE-2017-5715 mitigation
|
||||
*
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2020-2024, Arm Limited. All rights reserved.
|
||||
* Copyright (c) 2020-2025, Arm Limited. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -11,6 +11,8 @@
|
|||
#include <cpu_macros.S>
|
||||
#include <plat_macros.S>
|
||||
|
||||
cpu_reset_prologue generic
|
||||
|
||||
/* ---------------------------------------------
|
||||
* Disable L1 data cache and unified L2 cache
|
||||
* ---------------------------------------------
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
#error "Neoverse-E1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue neoverse_e1
|
||||
|
||||
workaround_reset_start neoverse_e1, ERRATUM(936184), ERRATA_DSU_936184
|
||||
errata_dsu_936184_wa_impl
|
||||
workaround_reset_end neoverse_e1, ERRATUM(936184)
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
wa_cve_2022_23960_bhb_vector_table NEOVERSE_N1_BHB_LOOP_COUNT, neoverse_n1
|
||||
#endif /* WORKAROUND_CVE_2022_23960 */
|
||||
|
||||
cpu_reset_prologue neoverse_n1
|
||||
|
||||
workaround_reset_start neoverse_n1, ERRATUM(936184), ERRATA_DSU_936184
|
||||
errata_dsu_936184_wa_impl
|
||||
workaround_reset_end neoverse_n1, ERRATUM(936184)
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
.global check_erratum_neoverse_n2_3701773
|
||||
|
||||
add_erratum_entry neoverse_n2, ERRATUM(3701773), ERRATA_N2_3701773, NO_APPLY_AT_RESET
|
||||
add_erratum_entry neoverse_n2, ERRATUM(3701773), ERRATA_N2_3701773
|
||||
|
||||
check_erratum_ls neoverse_n2, ERRATUM(3701773), CPU_REV(0, 3)
|
||||
|
||||
|
@ -31,6 +31,8 @@ check_erratum_ls neoverse_n2, ERRATUM(3701773), CPU_REV(0, 3)
|
|||
wa_cve_2022_23960_bhb_vector_table NEOVERSE_N2_BHB_LOOP_COUNT, neoverse_n2
|
||||
#endif /* WORKAROUND_CVE_2022_23960 */
|
||||
|
||||
cpu_reset_prologue neoverse_n2
|
||||
|
||||
workaround_reset_start neoverse_n2, ERRATUM(2313941), ERRATA_DSU_2313941
|
||||
errata_dsu_2313941_wa_impl
|
||||
workaround_reset_end neoverse_n2, ERRATUM(2313941)
|
||||
|
|
|
@ -21,9 +21,11 @@
|
|||
#error "Neoverse-N3 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue neoverse_n3
|
||||
|
||||
.global check_erratum_neoverse_n3_3699563
|
||||
|
||||
add_erratum_entry neoverse_n3, ERRATUM(3699563), ERRATA_N3_3699563, NO_APPLY_AT_RESET
|
||||
add_erratum_entry neoverse_n3, ERRATUM(3699563), ERRATA_N3_3699563
|
||||
|
||||
check_erratum_ls neoverse_n3, ERRATUM(3699563), CPU_REV(0, 0)
|
||||
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
#error "Neoverse-V1 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue neoverse_v1
|
||||
|
||||
#if WORKAROUND_CVE_2022_23960
|
||||
wa_cve_2022_23960_bhb_vector_table NEOVERSE_V1_BHB_LOOP_COUNT, neoverse_v1
|
||||
#endif /* WORKAROUND_CVE_2022_23960 */
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
#error "Neoverse V2 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue neoverse_v2
|
||||
|
||||
/* Disable hardware page aggregation. Enables mitigation for `CVE-2024-5660` */
|
||||
workaround_reset_start neoverse_v2, CVE(2024, 5660), WORKAROUND_CVE_2024_5660
|
||||
sysreg_bit_set NEOVERSE_V2_CPUECTLR_EL1, BIT(46)
|
||||
|
|
|
@ -22,9 +22,11 @@
|
|||
#error "Neoverse V3 supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue neoverse_v3
|
||||
|
||||
.global check_erratum_neoverse_v3_3701767
|
||||
|
||||
add_erratum_entry neoverse_v3, ERRATUM(3701767), ERRATA_V3_3701767, NO_APPLY_AT_RESET
|
||||
add_erratum_entry neoverse_v3, ERRATUM(3701767), ERRATA_V3_3701767
|
||||
|
||||
check_erratum_ls neoverse_v3, ERRATUM(3701767), CPU_REV(0, 2)
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2023-2024, Arm Limited. All rights reserved.
|
||||
* Copyright (c) 2023-2025, Arm Limited. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -21,6 +21,8 @@
|
|||
#error "Nevis supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue nevis
|
||||
|
||||
cpu_reset_func_start nevis
|
||||
/* ----------------------------------------------------
|
||||
* Disable speculative loads
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
#include <cpu_macros.S>
|
||||
#include <qemu_max.h>
|
||||
|
||||
cpu_reset_prologue qemu_max
|
||||
|
||||
func qemu_max_core_pwr_dwn
|
||||
/* ---------------------------------------------
|
||||
* Disable the Data Cache.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2020-2024, Arm Limited. All rights reserved.
|
||||
* Copyright (c) 2020-2025, Arm Limited. All rights reserved.
|
||||
*
|
||||
* SPDX-License-Identifier: BSD-3-Clause
|
||||
*/
|
||||
|
@ -21,6 +21,8 @@
|
|||
#error "Rainier CPU supports only AArch64. Compile with CTX_INCLUDE_AARCH32_REGS=0"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue rainier
|
||||
|
||||
/* --------------------------------------------------
|
||||
* Disable speculative loads if Rainier supports
|
||||
* SSBS.
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
#error "Travis needs ERRATA_SME_POWER_DOWN=1 to powerdown correctly"
|
||||
#endif
|
||||
|
||||
cpu_reset_prologue travis
|
||||
|
||||
cpu_reset_func_start travis
|
||||
/* ----------------------------------------------------
|
||||
* Disable speculative loads
|
||||
|
|
Loading…
Add table
Reference in a new issue