mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-04-15 00:54:22 +00:00

The simplistic view of a core's powerdown sequence is that power is atomically cut upon calling `wfi`. However, it turns out that it has lots to do - it has to talk to the interconnect to exit coherency, clean caches, check for RAS errors, etc. These take significant amounts of time and are certainly not atomic. As such there is a significant window of opportunity for external events to happen. Many of these steps are not destructive to context, so theoretically, the core can just "give up" half way (or roll certain actions back) and carry on running. The point in this sequence after which roll back is not possible is called the point of no return. One of these actions is the checking for RAS errors. It is possible for one to happen during this lengthy sequence, or at least remain undiscovered until that point. If the core were to continue powerdown when that happens, there would be no (easy) way to inform anyone about it. Rejecting the powerdown and letting software handle the error is the best way to implement this. Arm cores since at least the a510 have included this exact feature. So far it hasn't been deemed necessary to account for it in firmware due to the low likelihood of this happening. However, events like GIC wakeup requests are much more probable. Older cores will powerdown and immediately power back up when this happens. Travis and Gelas include a feature similar to the RAS case above, called powerdown abandon. The idea is that this will improve the latency to service the interrupt by saving on work which the core and software need to do. So far firmware has relied on the `wfi` being the point of no return and if it doesn't explicitly detect a pending interrupt quite early on, it will embark onto a sequence that it expects to end with shutdown. To accommodate for it not being a point of no return, we must undo all of the system management we did, just like in the warm boot entrypoint. To achieve that, the pwr_domain_pwr_down_wfi hook must not be terminal. Most recent platforms do some platform management and finish on the standard `wfi`, followed by a panic or an endless loop as this is expected to not return. To make this generic, any platform that wishes to support wakeups must instead let common code call `psci_power_down_wfi()` right after. Besides wakeups, this lets common code handle powerdown errata better as well. Then, the CPU_OFF case is simple - PSCI does not allow it to return. So the best that can be done is to attempt the `wfi` a few times (the choice of 32 is arbitrary) in the hope that the wakeup is transient. If it isn't, the only choice is to panic, as the system is likely to be in a bad state, eg. interrupts weren't routed away. The same applies for SYSTEM_OFF, SYSTEM_RESET, and SYSTEM_RESET2. There the panic won't matter as the system is going offline one way or another. The RAS case will be considered in a separate patch. Now, the CPU_SUSPEND case is more involved. First, to powerdown it must wipe its context as it is not written on warm boot. But it cannot be overwritten in case of a wakeup. To avoid the catch 22, save a copy that will only be used if powerdown fails. That is about 500 bytes on the stack so it hopefully doesn't tip anyone over any limits. In future that can be avoided by having a core manage its own context. Second, when the core wakes up, it must undo anything it did to prepare for poweroff, which for the cores we care about, is writing CPUPWRCTLR_EL1.CORE_PWRDN_EN. The least intrusive for the cpu library way of doing this is to simply call the power off hook again and have the hook toggle the bit. If in the future there need to be more complex sequences, their direction can be advised on the value of this bit. Third, do the actual "resume". Most of the logic is already there for the retention suspend, so that only needs a small touch up to apply to the powerdown case as well. The missing bit is the powerdown specific state management. Luckily, the warmboot entrypoint does exactly that already too, so steal that and we're done. All of this is hidden behind a FEAT_PABANDON flag since it has a large memory and runtime cost that we don't want to burden non pabandon cores with. Finally, do some function renaming to better reflect their purpose and make names a little bit more consistent. Change-Id: I2405b59300c2e24ce02e266f91b7c51474c1145f Signed-off-by: Boyan Karatotev <boyan.karatotev@arm.com>
377 lines
13 KiB
C
377 lines
13 KiB
C
/*
|
|
* Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#ifndef PSCI_PRIVATE_H
|
|
#define PSCI_PRIVATE_H
|
|
|
|
#include <stdbool.h>
|
|
|
|
#include <arch.h>
|
|
#include <arch_helpers.h>
|
|
#include <common/bl_common.h>
|
|
#include <lib/bakery_lock.h>
|
|
#include <lib/el3_runtime/cpu_data.h>
|
|
#include <lib/psci/psci.h>
|
|
#include <lib/spinlock.h>
|
|
|
|
/*
|
|
* The PSCI capability which are provided by the generic code but does not
|
|
* depend on the platform or spd capabilities.
|
|
*/
|
|
#define PSCI_GENERIC_CAP \
|
|
(define_psci_cap(PSCI_VERSION) | \
|
|
define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \
|
|
define_psci_cap(PSCI_FEATURES))
|
|
|
|
/*
|
|
* The PSCI capabilities mask for 64 bit functions.
|
|
*/
|
|
#define PSCI_CAP_64BIT_MASK \
|
|
(define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) | \
|
|
define_psci_cap(PSCI_CPU_ON_AARCH64) | \
|
|
define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) | \
|
|
define_psci_cap(PSCI_MIG_AARCH64) | \
|
|
define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \
|
|
define_psci_cap(PSCI_NODE_HW_STATE_AARCH64) | \
|
|
define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) | \
|
|
define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) | \
|
|
define_psci_cap(PSCI_STAT_COUNT_AARCH64) | \
|
|
define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64) | \
|
|
define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64))
|
|
|
|
/* Internally PSCI uses a uint16_t for various cpu indexes so
|
|
* define a limit to number of CPUs that can be initialised.
|
|
*/
|
|
#define PSCI_MAX_CPUS_INDEX 0xFFFFU
|
|
|
|
/* Invalid parent */
|
|
#define PSCI_PARENT_NODE_INVALID 0xFFFFFFFFU
|
|
|
|
/*
|
|
* Helper functions to get/set the fields of PSCI per-cpu data.
|
|
*/
|
|
static inline void psci_set_aff_info_state(aff_info_state_t aff_state)
|
|
{
|
|
set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state);
|
|
}
|
|
|
|
static inline aff_info_state_t psci_get_aff_info_state(void)
|
|
{
|
|
return get_cpu_data(psci_svc_cpu_data.aff_info_state);
|
|
}
|
|
|
|
static inline aff_info_state_t psci_get_aff_info_state_by_idx(unsigned int idx)
|
|
{
|
|
return get_cpu_data_by_index(idx,
|
|
psci_svc_cpu_data.aff_info_state);
|
|
}
|
|
|
|
static inline void psci_set_aff_info_state_by_idx(unsigned int idx,
|
|
aff_info_state_t aff_state)
|
|
{
|
|
set_cpu_data_by_index(idx,
|
|
psci_svc_cpu_data.aff_info_state, aff_state);
|
|
}
|
|
|
|
static inline unsigned int psci_get_suspend_pwrlvl(void)
|
|
{
|
|
return get_cpu_data(psci_svc_cpu_data.target_pwrlvl);
|
|
}
|
|
|
|
static inline void psci_set_suspend_pwrlvl(unsigned int target_lvl)
|
|
{
|
|
set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl);
|
|
}
|
|
|
|
static inline void psci_set_cpu_local_state(plat_local_state_t state)
|
|
{
|
|
set_cpu_data(psci_svc_cpu_data.local_state, state);
|
|
}
|
|
|
|
static inline plat_local_state_t psci_get_cpu_local_state(void)
|
|
{
|
|
return get_cpu_data(psci_svc_cpu_data.local_state);
|
|
}
|
|
|
|
static inline plat_local_state_t psci_get_cpu_local_state_by_idx(
|
|
unsigned int idx)
|
|
{
|
|
return get_cpu_data_by_index(idx,
|
|
psci_svc_cpu_data.local_state);
|
|
}
|
|
|
|
/* Helper function to identify a CPU standby request in PSCI Suspend call */
|
|
static inline bool is_cpu_standby_req(unsigned int is_power_down_state,
|
|
unsigned int retn_lvl)
|
|
{
|
|
return (is_power_down_state == 0U) && (retn_lvl == 0U);
|
|
}
|
|
|
|
/*******************************************************************************
|
|
* The following two data structures implement the power domain tree. The tree
|
|
* is used to track the state of all the nodes i.e. power domain instances
|
|
* described by the platform. The tree consists of nodes that describe CPU power
|
|
* domains i.e. leaf nodes and all other power domains which are parents of a
|
|
* CPU power domain i.e. non-leaf nodes.
|
|
******************************************************************************/
|
|
typedef struct non_cpu_pwr_domain_node {
|
|
/*
|
|
* Index of the first CPU power domain node level 0 which has this node
|
|
* as its parent.
|
|
*/
|
|
unsigned int cpu_start_idx;
|
|
|
|
/*
|
|
* Number of CPU power domains which are siblings of the domain indexed
|
|
* by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
|
|
* -> cpu_start_idx + ncpus' have this node as their parent.
|
|
*/
|
|
unsigned int ncpus;
|
|
|
|
/*
|
|
* Index of the parent power domain node.
|
|
* TODO: Figure out whether to whether using pointer is more efficient.
|
|
*/
|
|
unsigned int parent_node;
|
|
|
|
plat_local_state_t local_state;
|
|
|
|
unsigned char level;
|
|
|
|
/* For indexing the psci_lock array*/
|
|
uint16_t lock_index;
|
|
} non_cpu_pd_node_t;
|
|
|
|
typedef struct cpu_pwr_domain_node {
|
|
u_register_t mpidr;
|
|
|
|
/*
|
|
* Index of the parent power domain node.
|
|
* TODO: Figure out whether to whether using pointer is more efficient.
|
|
*/
|
|
unsigned int parent_node;
|
|
|
|
/*
|
|
* A CPU power domain does not require state coordination like its
|
|
* parent power domains. Hence this node does not include a bakery
|
|
* lock. A spinlock is required by the CPU_ON handler to prevent a race
|
|
* when multiple CPUs try to turn ON the same target CPU.
|
|
*/
|
|
spinlock_t cpu_lock;
|
|
} cpu_pd_node_t;
|
|
|
|
#if PSCI_OS_INIT_MODE
|
|
/*******************************************************************************
|
|
* The supported power state coordination modes that can be used in CPU_SUSPEND.
|
|
******************************************************************************/
|
|
typedef enum suspend_mode {
|
|
PLAT_COORD = 0,
|
|
OS_INIT = 1
|
|
} suspend_mode_t;
|
|
#endif
|
|
|
|
/*******************************************************************************
|
|
* The following are helpers and declarations of locks.
|
|
******************************************************************************/
|
|
#if HW_ASSISTED_COHERENCY
|
|
/*
|
|
* On systems where participant CPUs are cache-coherent, we can use spinlocks
|
|
* instead of bakery locks.
|
|
*/
|
|
#define DEFINE_PSCI_LOCK(_name) spinlock_t _name
|
|
#define DECLARE_PSCI_LOCK(_name) extern DEFINE_PSCI_LOCK(_name)
|
|
|
|
/* One lock is required per non-CPU power domain node */
|
|
DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
|
|
|
|
/*
|
|
* On systems with hardware-assisted coherency, make PSCI cache operations NOP,
|
|
* as PSCI participants are cache-coherent, and there's no need for explicit
|
|
* cache maintenance operations or barriers to coordinate their state.
|
|
*/
|
|
static inline void psci_flush_dcache_range(uintptr_t __unused addr,
|
|
size_t __unused size)
|
|
{
|
|
/* Empty */
|
|
}
|
|
|
|
#define psci_flush_cpu_data(member)
|
|
#define psci_inv_cpu_data(member)
|
|
|
|
static inline void psci_dsbish(void)
|
|
{
|
|
/* Empty */
|
|
}
|
|
|
|
static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node)
|
|
{
|
|
spin_lock(&psci_locks[non_cpu_pd_node->lock_index]);
|
|
}
|
|
|
|
static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node)
|
|
{
|
|
spin_unlock(&psci_locks[non_cpu_pd_node->lock_index]);
|
|
}
|
|
|
|
#else /* if HW_ASSISTED_COHERENCY == 0 */
|
|
/*
|
|
* Use bakery locks for state coordination as not all PSCI participants are
|
|
* cache coherent.
|
|
*/
|
|
#define DEFINE_PSCI_LOCK(_name) DEFINE_BAKERY_LOCK(_name)
|
|
#define DECLARE_PSCI_LOCK(_name) DECLARE_BAKERY_LOCK(_name)
|
|
|
|
/* One lock is required per non-CPU power domain node */
|
|
DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
|
|
|
|
/*
|
|
* If not all PSCI participants are cache-coherent, perform cache maintenance
|
|
* and issue barriers wherever required to coordinate state.
|
|
*/
|
|
static inline void psci_flush_dcache_range(uintptr_t addr, size_t size)
|
|
{
|
|
flush_dcache_range(addr, size);
|
|
}
|
|
|
|
#define psci_flush_cpu_data(member) flush_cpu_data(member)
|
|
#define psci_inv_cpu_data(member) inv_cpu_data(member)
|
|
|
|
static inline void psci_dsbish(void)
|
|
{
|
|
dsbish();
|
|
}
|
|
|
|
static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node)
|
|
{
|
|
bakery_lock_get(&psci_locks[non_cpu_pd_node->lock_index]);
|
|
}
|
|
|
|
static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node)
|
|
{
|
|
bakery_lock_release(&psci_locks[non_cpu_pd_node->lock_index]);
|
|
}
|
|
|
|
#endif /* HW_ASSISTED_COHERENCY */
|
|
|
|
static inline void psci_lock_init(non_cpu_pd_node_t *non_cpu_pd_node,
|
|
uint16_t idx)
|
|
{
|
|
non_cpu_pd_node[idx].lock_index = idx;
|
|
}
|
|
|
|
/*******************************************************************************
|
|
* Data prototypes
|
|
******************************************************************************/
|
|
extern const plat_psci_ops_t *psci_plat_pm_ops;
|
|
extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
|
|
extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
|
|
extern unsigned int psci_caps;
|
|
extern unsigned int psci_plat_core_count;
|
|
#if PSCI_OS_INIT_MODE
|
|
extern suspend_mode_t psci_suspend_mode;
|
|
#endif
|
|
|
|
/*******************************************************************************
|
|
* SPD's power management hooks registered with PSCI
|
|
******************************************************************************/
|
|
extern const spd_pm_ops_t *psci_spd_pm;
|
|
|
|
/*******************************************************************************
|
|
* Function prototypes
|
|
******************************************************************************/
|
|
/* Private exported functions from psci_common.c */
|
|
int psci_validate_power_state(unsigned int power_state,
|
|
psci_power_state_t *state_info);
|
|
void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
|
|
void psci_init_req_local_pwr_states(void);
|
|
#if PSCI_OS_INIT_MODE
|
|
void psci_update_req_local_pwr_states(unsigned int end_pwrlvl,
|
|
unsigned int cpu_idx,
|
|
psci_power_state_t *state_info,
|
|
plat_local_state_t *prev);
|
|
void psci_restore_req_local_pwr_states(unsigned int cpu_idx,
|
|
plat_local_state_t *prev);
|
|
#endif
|
|
void psci_get_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl,
|
|
psci_power_state_t *target_state);
|
|
void psci_set_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl,
|
|
const psci_power_state_t *target_state);
|
|
int psci_validate_entry_point(entry_point_info_t *ep,
|
|
uintptr_t entrypoint, u_register_t context_id);
|
|
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
|
|
unsigned int end_lvl,
|
|
unsigned int *node_index);
|
|
void psci_do_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl,
|
|
psci_power_state_t *state_info);
|
|
#if PSCI_OS_INIT_MODE
|
|
int psci_validate_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl,
|
|
psci_power_state_t *state_info);
|
|
#endif
|
|
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
|
|
const unsigned int *parent_nodes);
|
|
void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
|
|
const unsigned int *parent_nodes);
|
|
int psci_validate_suspend_req(const psci_power_state_t *state_info,
|
|
unsigned int is_power_down_state);
|
|
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
|
|
unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
|
|
void psci_set_pwr_domains_to_run(unsigned int cpu_idx, unsigned int end_pwrlvl);
|
|
void psci_print_power_domain_map(void);
|
|
bool psci_is_last_on_cpu(unsigned int my_idx);
|
|
int psci_spd_migrate_info(u_register_t *mpidr);
|
|
|
|
/*
|
|
* CPU power down is directly called only when HW_ASSISTED_COHERENCY is
|
|
* available. Otherwise, this needs post-call stack maintenance, which is
|
|
* handled in assembly.
|
|
*/
|
|
void prepare_cpu_pwr_dwn(unsigned int power_level);
|
|
|
|
/* This function applies various CPU errata during power down. */
|
|
void apply_cpu_pwr_dwn_errata(void);
|
|
|
|
/* Private exported functions from psci_on.c */
|
|
int psci_cpu_on_start(u_register_t target_cpu,
|
|
const entry_point_info_t *ep);
|
|
|
|
void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info);
|
|
|
|
/* Private exported functions from psci_off.c */
|
|
int psci_do_cpu_off(unsigned int end_pwrlvl);
|
|
|
|
/* Private exported functions from psci_suspend.c */
|
|
int psci_cpu_suspend_start(unsigned int idx,
|
|
const entry_point_info_t *ep,
|
|
unsigned int end_pwrlvl,
|
|
psci_power_state_t *state_info,
|
|
unsigned int is_power_down_state);
|
|
|
|
void psci_cpu_suspend_to_powerdown_finish(unsigned int cpu_idx, unsigned int max_off_lvl, const psci_power_state_t *state_info);
|
|
|
|
/* Private exported functions from psci_helpers.S */
|
|
void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
|
|
void psci_do_pwrup_cache_maintenance(void);
|
|
|
|
/* Private exported functions from psci_system_off.c */
|
|
void __dead2 psci_system_off(void);
|
|
void __dead2 psci_system_reset(void);
|
|
u_register_t psci_system_reset2(uint32_t reset_type, u_register_t cookie);
|
|
|
|
/* Private exported functions from psci_stat.c */
|
|
void psci_stats_update_pwr_down(unsigned int cpu_idx, unsigned int end_pwrlvl,
|
|
const psci_power_state_t *state_info);
|
|
void psci_stats_update_pwr_up(unsigned int cpu_idx, unsigned int end_pwrlvl,
|
|
const psci_power_state_t *state_info);
|
|
u_register_t psci_stat_residency(u_register_t target_cpu,
|
|
unsigned int power_state);
|
|
u_register_t psci_stat_count(u_register_t target_cpu,
|
|
unsigned int power_state);
|
|
|
|
/* Private exported functions from psci_mem_protect.c */
|
|
u_register_t psci_mem_protect(unsigned int enable);
|
|
u_register_t psci_mem_chk_range(uintptr_t base, u_register_t length);
|
|
|
|
#endif /* PSCI_PRIVATE_H */
|