arm-trusted-firmware/lib/cpus/aarch32/aem_generic.S
Boyan Karatotev 0d020822ae perf(cpus): inline the reset function
Similar to the cpu_rev_var and cpu_ger_rev_var functions, inline the
call_reset_handler handler. This way we skip the costly branch at no
extra cost as this is the only place where this is called.

While we're at it, drop the options for CPU_NO_RESET_FUNC. The only cpus
that need that are virtual cpus which can spare the tiny bit of
performance lost. The rest are real cores which can save on the check
for zero.

Now is a good time to put the assert for a missing cpu in the
get_cpu_ops_ptr function so that it's a bit better encapsulated.

Change-Id: Ia7c3dcd13b75e5d7c8bafad4698994ea65f42406
Signed-off-by: Boyan Karatotev <boyan.karatotev@arm.com>
2025-02-24 09:36:10 +00:00

50 lines
1.2 KiB
ArmAsm

/*
* Copyright (c) 2016-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <aem_generic.h>
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <cpu_macros.S>
func aem_generic_core_pwr_dwn
/* Assert if cache is enabled */
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Flush L1 cache to PoU.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
b dcsw_op_louis
endfunc aem_generic_core_pwr_dwn
func aem_generic_cluster_pwr_dwn
/* Assert if cache is enabled */
#if ENABLE_ASSERTIONS
ldcopr r0, SCTLR
tst r0, #SCTLR_C_BIT
ASM_ASSERT(eq)
#endif
/* ---------------------------------------------
* Flush L1 and L2 caches to PoC.
* ---------------------------------------------
*/
mov r0, #DC_OP_CISW
b dcsw_op_all
endfunc aem_generic_cluster_pwr_dwn
func aem_generic_reset_func
bx lr
endfunc aem_generic_reset_func
/* cpu_ops for Base AEM FVP */
declare_cpu_ops aem_generic, BASE_AEM_MIDR, aem_generic_reset_func, \
aem_generic_core_pwr_dwn, \
aem_generic_cluster_pwr_dwn