mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-04-17 01:54:22 +00:00

The simplistic view of a core's powerdown sequence is that power is atomically cut upon calling `wfi`. However, it turns out that it has lots to do - it has to talk to the interconnect to exit coherency, clean caches, check for RAS errors, etc. These take significant amounts of time and are certainly not atomic. As such there is a significant window of opportunity for external events to happen. Many of these steps are not destructive to context, so theoretically, the core can just "give up" half way (or roll certain actions back) and carry on running. The point in this sequence after which roll back is not possible is called the point of no return. One of these actions is the checking for RAS errors. It is possible for one to happen during this lengthy sequence, or at least remain undiscovered until that point. If the core were to continue powerdown when that happens, there would be no (easy) way to inform anyone about it. Rejecting the powerdown and letting software handle the error is the best way to implement this. Arm cores since at least the a510 have included this exact feature. So far it hasn't been deemed necessary to account for it in firmware due to the low likelihood of this happening. However, events like GIC wakeup requests are much more probable. Older cores will powerdown and immediately power back up when this happens. Travis and Gelas include a feature similar to the RAS case above, called powerdown abandon. The idea is that this will improve the latency to service the interrupt by saving on work which the core and software need to do. So far firmware has relied on the `wfi` being the point of no return and if it doesn't explicitly detect a pending interrupt quite early on, it will embark onto a sequence that it expects to end with shutdown. To accommodate for it not being a point of no return, we must undo all of the system management we did, just like in the warm boot entrypoint. To achieve that, the pwr_domain_pwr_down_wfi hook must not be terminal. Most recent platforms do some platform management and finish on the standard `wfi`, followed by a panic or an endless loop as this is expected to not return. To make this generic, any platform that wishes to support wakeups must instead let common code call `psci_power_down_wfi()` right after. Besides wakeups, this lets common code handle powerdown errata better as well. Then, the CPU_OFF case is simple - PSCI does not allow it to return. So the best that can be done is to attempt the `wfi` a few times (the choice of 32 is arbitrary) in the hope that the wakeup is transient. If it isn't, the only choice is to panic, as the system is likely to be in a bad state, eg. interrupts weren't routed away. The same applies for SYSTEM_OFF, SYSTEM_RESET, and SYSTEM_RESET2. There the panic won't matter as the system is going offline one way or another. The RAS case will be considered in a separate patch. Now, the CPU_SUSPEND case is more involved. First, to powerdown it must wipe its context as it is not written on warm boot. But it cannot be overwritten in case of a wakeup. To avoid the catch 22, save a copy that will only be used if powerdown fails. That is about 500 bytes on the stack so it hopefully doesn't tip anyone over any limits. In future that can be avoided by having a core manage its own context. Second, when the core wakes up, it must undo anything it did to prepare for poweroff, which for the cores we care about, is writing CPUPWRCTLR_EL1.CORE_PWRDN_EN. The least intrusive for the cpu library way of doing this is to simply call the power off hook again and have the hook toggle the bit. If in the future there need to be more complex sequences, their direction can be advised on the value of this bit. Third, do the actual "resume". Most of the logic is already there for the retention suspend, so that only needs a small touch up to apply to the powerdown case as well. The missing bit is the powerdown specific state management. Luckily, the warmboot entrypoint does exactly that already too, so steal that and we're done. All of this is hidden behind a FEAT_PABANDON flag since it has a large memory and runtime cost that we don't want to burden non pabandon cores with. Finally, do some function renaming to better reflect their purpose and make names a little bit more consistent. Change-Id: I2405b59300c2e24ce02e266f91b7c51474c1145f Signed-off-by: Boyan Karatotev <boyan.karatotev@arm.com>
141 lines
4.3 KiB
ArmAsm
141 lines
4.3 KiB
ArmAsm
/*
|
|
* Copyright (c) 2014-2025, Arm Limited and Contributors. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#include <asm_macros.S>
|
|
#include <assert_macros.S>
|
|
#include <cpu_macros.S>
|
|
#include <lib/psci/psci.h>
|
|
#include <platform_def.h>
|
|
|
|
.globl psci_do_pwrdown_cache_maintenance
|
|
.globl psci_do_pwrup_cache_maintenance
|
|
.globl psci_power_down_wfi
|
|
|
|
/* -----------------------------------------------------------------------
|
|
* void psci_do_pwrdown_cache_maintenance(unsigned int power level);
|
|
*
|
|
* This function performs cache maintenance for the specified power
|
|
* level. The levels of cache affected are determined by the power
|
|
* level which is passed as the argument i.e. level 0 results
|
|
* in a flush of the L1 cache. Both the L1 and L2 caches are flushed
|
|
* for a higher power level.
|
|
*
|
|
* Additionally, this function also ensures that stack memory is correctly
|
|
* flushed out to avoid coherency issues due to a change in its memory
|
|
* attributes after the data cache is disabled.
|
|
* -----------------------------------------------------------------------
|
|
*/
|
|
func psci_do_pwrdown_cache_maintenance
|
|
stp x29, x30, [sp,#-16]!
|
|
stp x19, x20, [sp,#-16]!
|
|
|
|
/* ---------------------------------------------
|
|
* Invoke CPU-specific power down operations for
|
|
* the appropriate level
|
|
* ---------------------------------------------
|
|
*/
|
|
bl prepare_cpu_pwr_dwn
|
|
|
|
/* ---------------------------------------------
|
|
* Do stack maintenance by flushing the used
|
|
* stack to the main memory and invalidating the
|
|
* remainder.
|
|
* ---------------------------------------------
|
|
*/
|
|
bl plat_get_my_stack
|
|
|
|
/* ---------------------------------------------
|
|
* Calculate and store the size of the used
|
|
* stack memory in x1.
|
|
* ---------------------------------------------
|
|
*/
|
|
mov x19, x0
|
|
mov x1, sp
|
|
sub x1, x0, x1
|
|
mov x0, sp
|
|
bl flush_dcache_range
|
|
|
|
/* ---------------------------------------------
|
|
* Calculate and store the size of the unused
|
|
* stack memory in x1. Calculate and store the
|
|
* stack base address in x0.
|
|
* ---------------------------------------------
|
|
*/
|
|
sub x0, x19, #PLATFORM_STACK_SIZE
|
|
sub x1, sp, x0
|
|
bl inv_dcache_range
|
|
|
|
ldp x19, x20, [sp], #16
|
|
ldp x29, x30, [sp], #16
|
|
ret
|
|
endfunc psci_do_pwrdown_cache_maintenance
|
|
|
|
|
|
/* -----------------------------------------------------------------------
|
|
* void psci_do_pwrup_cache_maintenance(void);
|
|
*
|
|
* This function performs cache maintenance after this cpu is powered up.
|
|
* Currently, this involves managing the used stack memory before turning
|
|
* on the data cache.
|
|
* -----------------------------------------------------------------------
|
|
*/
|
|
func psci_do_pwrup_cache_maintenance
|
|
stp x29, x30, [sp,#-16]!
|
|
|
|
/* ---------------------------------------------
|
|
* Ensure any inflight stack writes have made it
|
|
* to main memory.
|
|
* ---------------------------------------------
|
|
*/
|
|
dmb st
|
|
|
|
/* ---------------------------------------------
|
|
* Calculate and store the size of the used
|
|
* stack memory in x1. Calculate and store the
|
|
* stack base address in x0.
|
|
* ---------------------------------------------
|
|
*/
|
|
bl plat_get_my_stack
|
|
mov x1, sp
|
|
sub x1, x0, x1
|
|
mov x0, sp
|
|
bl inv_dcache_range
|
|
|
|
/* ---------------------------------------------
|
|
* Enable the data cache.
|
|
* ---------------------------------------------
|
|
*/
|
|
mrs x0, sctlr_el3
|
|
orr x0, x0, #SCTLR_C_BIT
|
|
msr sctlr_el3, x0
|
|
isb
|
|
|
|
ldp x29, x30, [sp], #16
|
|
ret
|
|
endfunc psci_do_pwrup_cache_maintenance
|
|
|
|
/* -----------------------------------------------------------------------
|
|
* void psci_power_down_wfi(void); This function is called to indicate to the
|
|
* power controller that it is safe to power down this cpu. It may exit if the
|
|
* request was denied and reset did not occur
|
|
* -----------------------------------------------------------------------
|
|
*/
|
|
func psci_power_down_wfi
|
|
apply_erratum cortex_a510, ERRATUM(2684597), ERRATA_A510_2684597
|
|
|
|
dsb sy // ensure write buffer empty
|
|
wfi
|
|
|
|
/*
|
|
* in case the WFI wasn't terminal, we have to undo errata mitigations.
|
|
* These will be smart enough to handle being called the same way
|
|
*/
|
|
apply_erratum cortex_a710, ERRATUM(2291219), ERRATA_A710_2291219
|
|
apply_erratum cortex_x3, ERRATUM(2313909), ERRATA_X3_2313909, NO_GET_CPU_REV
|
|
apply_erratum neoverse_n2, ERRATUM(2326639), ERRATA_N2_2326639, NO_GET_CPU_REV
|
|
|
|
ret
|
|
endfunc psci_power_down_wfi
|