Merge changes I137f69be,Ia2e7168f,I0e569d12,I614272ec,Ib68293f2 into integration

* changes:
  perf(psci): pass my_core_pos around instead of calling it repeatedly
  refactor(psci): move timestamp collection to psci_pwrdown_cpu
  refactor(psci): factor common code out of the standby finisher
  refactor(psci): don't use PSCI_INVALID_PWR_LVL to signal OFF state
  docs(psci): drop outdated cache maintenance comment
This commit is contained in:
Govindraj Raja 2025-01-15 17:03:27 +01:00 committed by TrustedFirmware Code Review
commit f532cd3069
12 changed files with 130 additions and 161 deletions

View file

@ -312,7 +312,7 @@ void __dead2 css_scp_system_off(int state)
/*
* Send powerdown request to online secondary core(s)
*/
ret = psci_stop_other_cores(0, css_raise_pwr_down_interrupt);
ret = psci_stop_other_cores(plat_my_core_pos(), 0, css_raise_pwr_down_interrupt);
if (ret != PSCI_E_SUCCESS) {
ERROR("Failed to powerdown secondary core(s)\n");
}

View file

@ -302,7 +302,7 @@ typedef struct psci_cpu_data {
/*
* Highest power level which takes part in a power management
* operation.
* operation. May be lower while the core is in suspend state.
*/
unsigned int target_pwrlvl;

View file

@ -89,10 +89,10 @@ void psci_warmboot_entrypoint(void);
void psci_register_spd_pm_hook(const spd_pm_ops_t *pm);
void psci_prepare_next_non_secure_ctx(
entry_point_info_t *next_image_info);
int psci_stop_other_cores(unsigned int wait_ms,
int psci_stop_other_cores(unsigned int this_cpu_idx, unsigned int wait_ms,
void (*stop_func)(u_register_t mpidr));
bool psci_is_last_on_cpu_safe(void);
bool psci_are_all_cpus_on_safe(void);
bool psci_is_last_on_cpu_safe(unsigned int this_core);
bool psci_are_all_cpus_on_safe(unsigned int this_core);
void psci_pwrdown_cpu(unsigned int power_level);
void psci_do_manage_extensions(void);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -16,6 +16,8 @@
#include <drivers/delay_timer.h>
#include <lib/el3_runtime/context_mgmt.h>
#include <lib/extensions/spe.h>
#include <lib/pmf/pmf.h>
#include <lib/runtime_instr.h>
#include <lib/utils.h>
#include <plat/common/platform.h>
@ -170,9 +172,9 @@ void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
* Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
* otherwise.
******************************************************************************/
static bool psci_is_last_cpu_to_idle_at_pwrlvl(unsigned int end_pwrlvl)
static bool psci_is_last_cpu_to_idle_at_pwrlvl(unsigned int my_idx, unsigned int end_pwrlvl)
{
unsigned int my_idx, lvl;
unsigned int lvl;
unsigned int parent_idx = 0;
unsigned int cpu_start_idx, ncpus, cpu_idx;
plat_local_state_t local_state;
@ -181,7 +183,6 @@ static bool psci_is_last_cpu_to_idle_at_pwrlvl(unsigned int end_pwrlvl)
return true;
}
my_idx = plat_my_core_pos();
parent_idx = psci_cpu_pd_nodes[my_idx].parent_node;
for (lvl = PSCI_CPU_PWR_LVL + U(1); lvl < end_pwrlvl; lvl++) {
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
@ -212,11 +213,9 @@ static bool psci_is_last_cpu_to_idle_at_pwrlvl(unsigned int end_pwrlvl)
* turned OFF and the current CPU is the last running CPU in the system.
* Returns true, if the current CPU is the last ON CPU or false otherwise.
******************************************************************************/
bool psci_is_last_on_cpu(void)
bool psci_is_last_on_cpu(unsigned int my_idx)
{
unsigned int cpu_idx, my_idx = plat_my_core_pos();
for (cpu_idx = 0; cpu_idx < psci_plat_core_count; cpu_idx++) {
for (unsigned int cpu_idx = 0; cpu_idx < psci_plat_core_count; cpu_idx++) {
if (cpu_idx == my_idx) {
assert(psci_get_aff_info_state() == AFF_STATE_ON);
continue;
@ -260,13 +259,9 @@ static unsigned int get_power_on_target_pwrlvl(void)
/*
* Assume that this cpu was suspended and retrieve its target power
* level. If it is invalid then it could only have been turned off
* earlier. PLAT_MAX_PWR_LVL will be the highest power level a
* cpu can be turned off to.
* level. If it wasn't, the cpu is off so this will be PLAT_MAX_PWR_LVL.
*/
pwrlvl = psci_get_suspend_pwrlvl();
if (pwrlvl == PSCI_INVALID_PWR_LVL)
pwrlvl = PLAT_MAX_PWR_LVL;
assert(pwrlvl < PSCI_INVALID_PWR_LVL);
return pwrlvl;
}
@ -428,14 +423,14 @@ static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
* function will be called after a cpu is powered on to find the local state
* each power domain has emerged from.
*****************************************************************************/
void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
void psci_get_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl,
psci_power_state_t *target_state)
{
unsigned int parent_idx, lvl;
plat_local_state_t *pd_state = target_state->pwr_domain_state;
pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
/* Copy the local power state from node to state_info */
for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
@ -454,7 +449,7 @@ void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
* enter. This function will be called after coordination of requested power
* states has been done for each power level.
*****************************************************************************/
void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
void psci_set_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl,
const psci_power_state_t *target_state)
{
unsigned int parent_idx, lvl;
@ -468,7 +463,7 @@ void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
*/
psci_flush_cpu_data(psci_svc_cpu_data.local_state);
parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
/* Copy the local_state from state_info */
for (lvl = 1U; lvl <= end_pwrlvl; lvl++) {
@ -500,9 +495,9 @@ void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
* affinity info state, target power state and requested power state for the
* current CPU and all its ancestor power domains to RUN.
*****************************************************************************/
void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
void psci_set_pwr_domains_to_run(unsigned int cpu_idx, unsigned int end_pwrlvl)
{
unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
unsigned int parent_idx, lvl;
parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
/* Reset the local_state to RUN for the non cpu power domains. */
@ -544,10 +539,10 @@ void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
* This function will only be invoked with data cache enabled and while
* powering down a core.
*****************************************************************************/
void psci_do_state_coordination(unsigned int end_pwrlvl,
void psci_do_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl,
psci_power_state_t *state_info)
{
unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
unsigned int lvl, parent_idx;
unsigned int start_idx;
unsigned int ncpus;
plat_local_state_t target_state, *req_states;
@ -620,11 +615,11 @@ void psci_do_state_coordination(unsigned int end_pwrlvl,
* This function will only be invoked with data cache enabled and while
* powering down a core.
*****************************************************************************/
int psci_validate_state_coordination(unsigned int end_pwrlvl,
int psci_validate_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl,
psci_power_state_t *state_info)
{
int rc = PSCI_E_SUCCESS;
unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
unsigned int lvl, parent_idx;
unsigned int start_idx;
unsigned int ncpus;
plat_local_state_t target_state, *req_states;
@ -674,7 +669,7 @@ int psci_validate_state_coordination(unsigned int end_pwrlvl,
* specified power level.
*/
lvl = state_info->last_at_pwrlvl;
if (!psci_is_last_cpu_to_idle_at_pwrlvl(lvl)) {
if (!psci_is_last_cpu_to_idle_at_pwrlvl(cpu_idx, lvl)) {
rc = PSCI_E_DENIED;
}
@ -1004,7 +999,7 @@ void psci_warmboot_entrypoint(void)
*/
psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
psci_get_target_local_pwr_states(cpu_idx, end_pwrlvl, &state_info);
#if ENABLE_PSCI_STAT
plat_psci_stat_accounting_stop(&state_info);
@ -1025,7 +1020,7 @@ void psci_warmboot_entrypoint(void)
if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
psci_cpu_on_finish(cpu_idx, &state_info);
else
psci_cpu_suspend_finish(cpu_idx, &state_info);
psci_cpu_suspend_to_powerdown_finish(cpu_idx, &state_info);
/*
* Generic management: Now we just need to retrieve the
@ -1038,16 +1033,10 @@ void psci_warmboot_entrypoint(void)
* Set the requested and target state of this CPU and all the higher
* power domains which are ancestors of this CPU to run.
*/
psci_set_pwr_domains_to_run(end_pwrlvl);
psci_set_pwr_domains_to_run(cpu_idx, end_pwrlvl);
#if ENABLE_PSCI_STAT
/*
* Update PSCI stats.
* Caches are off when writing stats data on the power down path.
* Since caches are now enabled, it's necessary to do cache
* maintenance before reading that same data.
*/
psci_stats_update_pwr_up(end_pwrlvl, &state_info);
psci_stats_update_pwr_up(cpu_idx, end_pwrlvl, &state_info);
#endif
/*
@ -1169,6 +1158,17 @@ int psci_secondaries_brought_up(void)
******************************************************************************/
void psci_pwrdown_cpu(unsigned int power_level)
{
#if ENABLE_RUNTIME_INSTRUMENTATION
/*
* Flush cache line so that even if CPU power down happens
* the timestamp update is reflected in memory.
*/
PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
RT_INSTR_ENTER_CFLUSH,
PMF_CACHE_MAINT);
#endif
#if HW_ASSISTED_COHERENCY
/*
* With hardware-assisted coherency, the CPU drivers only initiate the
@ -1188,6 +1188,12 @@ void psci_pwrdown_cpu(unsigned int power_level)
*/
psci_do_pwrdown_cache_maintenance(power_level);
#endif
#if ENABLE_RUNTIME_INSTRUMENTATION
PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
RT_INSTR_EXIT_CFLUSH,
PMF_NO_CACHE_MAINT);
#endif
}
/*******************************************************************************
@ -1200,15 +1206,11 @@ void psci_pwrdown_cpu(unsigned int power_level)
* The function returns 'PSCI_E_DENIED' if some cores failed to stop within the
* given timeout.
******************************************************************************/
int psci_stop_other_cores(unsigned int wait_ms,
int psci_stop_other_cores(unsigned int this_cpu_idx, unsigned int wait_ms,
void (*stop_func)(u_register_t mpidr))
{
unsigned int idx, this_cpu_idx;
this_cpu_idx = plat_my_core_pos();
/* Invoke stop_func for each core */
for (idx = 0U; idx < psci_plat_core_count; idx++) {
for (unsigned int idx = 0U; idx < psci_plat_core_count; idx++) {
/* skip current CPU */
if (idx == this_cpu_idx) {
continue;
@ -1222,11 +1224,11 @@ int psci_stop_other_cores(unsigned int wait_ms,
/* Need to wait for other cores to shutdown */
if (wait_ms != 0U) {
while ((wait_ms-- != 0U) && (!psci_is_last_on_cpu())) {
while ((wait_ms-- != 0U) && (!psci_is_last_on_cpu(this_cpu_idx))) {
mdelay(1U);
}
if (!psci_is_last_on_cpu()) {
if (!psci_is_last_on_cpu(this_cpu_idx)) {
WARN("Failed to stop all cores!\n");
psci_print_power_domain_map();
return PSCI_E_DENIED;
@ -1244,16 +1246,15 @@ int psci_stop_other_cores(unsigned int wait_ms,
* This API has following differences with psci_is_last_on_cpu
* 1. PSCI states are locked
******************************************************************************/
bool psci_is_last_on_cpu_safe(void)
bool psci_is_last_on_cpu_safe(unsigned int this_core)
{
unsigned int this_core = plat_my_core_pos();
unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
if (!psci_is_last_on_cpu()) {
if (!psci_is_last_on_cpu(this_core)) {
psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
return false;
}
@ -1270,9 +1271,8 @@ bool psci_is_last_on_cpu_safe(void)
* This API has following differences with psci_are_all_cpus_on
* 1. PSCI states are locked
******************************************************************************/
bool psci_are_all_cpus_on_safe(void)
bool psci_are_all_cpus_on_safe(unsigned int this_core)
{
unsigned int this_core = plat_my_core_pos();
unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);

View file

@ -59,8 +59,8 @@ int psci_cpu_suspend(unsigned int power_state,
entry_point_info_t ep;
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
plat_local_state_t cpu_pd_state;
#if PSCI_OS_INIT_MODE
unsigned int cpu_idx = plat_my_core_pos();
#if PSCI_OS_INIT_MODE
plat_local_state_t prev[PLAT_MAX_PWR_LVL];
#endif
@ -145,7 +145,7 @@ int psci_cpu_suspend(unsigned int power_state,
plat_psci_stat_accounting_stop(&state_info);
/* Update PSCI stats */
psci_stats_update_pwr_up(PSCI_CPU_PWR_LVL, &state_info);
psci_stats_update_pwr_up(cpu_idx, PSCI_CPU_PWR_LVL, &state_info);
#endif
return PSCI_E_SUCCESS;
@ -167,7 +167,8 @@ int psci_cpu_suspend(unsigned int power_state,
* might return if the power down was abandoned for any reason, e.g.
* arrival of an interrupt
*/
rc = psci_cpu_suspend_start(&ep,
rc = psci_cpu_suspend_start(cpu_idx,
&ep,
target_pwrlvl,
&state_info,
is_power_down_state);
@ -181,9 +182,10 @@ int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id)
int rc;
psci_power_state_t state_info;
entry_point_info_t ep;
unsigned int cpu_idx = plat_my_core_pos();
/* Check if the current CPU is the last ON CPU in the system */
if (!psci_is_last_on_cpu())
if (!psci_is_last_on_cpu(cpu_idx))
return PSCI_E_DENIED;
/* Validate the entry point and get the entry_point_info */
@ -212,7 +214,8 @@ int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id)
* might return if the power down was abandoned for any reason, e.g.
* arrival of an interrupt
*/
rc = psci_cpu_suspend_start(&ep,
rc = psci_cpu_suspend_start(cpu_idx,
&ep,
PLAT_MAX_PWR_LVL,
&state_info,
PSTATE_TYPE_POWERDOWN);
@ -399,9 +402,11 @@ int psci_set_suspend_mode(unsigned int mode)
return PSCI_E_SUCCESS;
}
unsigned int this_core = plat_my_core_pos();
if (mode == PLAT_COORD) {
/* Check if the current CPU is the last ON CPU in the system */
if (!psci_is_last_on_cpu_safe()) {
if (!psci_is_last_on_cpu_safe(this_core)) {
return PSCI_E_DENIED;
}
}
@ -411,8 +416,8 @@ int psci_set_suspend_mode(unsigned int mode)
* Check if all CPUs in the system are ON or if the current
* CPU is the last ON CPU in the system.
*/
if (!(psci_are_all_cpus_on_safe() ||
psci_is_last_on_cpu_safe())) {
if (!(psci_are_all_cpus_on_safe(this_core) ||
psci_is_last_on_cpu_safe(this_core))) {
return PSCI_E_DENIED;
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2023, NVIDIA Corporation. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@ -102,25 +102,14 @@ int psci_do_cpu_off(unsigned int end_pwrlvl)
* it returns the negotiated state info for each power level upto
* the end level specified.
*/
psci_do_state_coordination(end_pwrlvl, &state_info);
psci_do_state_coordination(idx, end_pwrlvl, &state_info);
/* Update the target state in the power domain nodes */
psci_set_target_local_pwr_states(end_pwrlvl, &state_info);
psci_set_target_local_pwr_states(idx, end_pwrlvl, &state_info);
#if ENABLE_PSCI_STAT
/* Update the last cpu for each level till end_pwrlvl */
psci_stats_update_pwr_down(end_pwrlvl, &state_info);
#endif
#if ENABLE_RUNTIME_INSTRUMENTATION
/*
* Flush cache line so that even if CPU power down happens
* the timestamp update is reflected in memory.
*/
PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
RT_INSTR_ENTER_CFLUSH,
PMF_CACHE_MAINT);
psci_stats_update_pwr_down(idx, end_pwrlvl, &state_info);
#endif
/*
@ -128,12 +117,6 @@ int psci_do_cpu_off(unsigned int end_pwrlvl)
*/
psci_pwrdown_cpu(psci_find_max_off_lvl(&state_info));
#if ENABLE_RUNTIME_INSTRUMENTATION
PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
RT_INSTR_EXIT_CFLUSH,
PMF_NO_CACHE_MAINT);
#endif
/*
* Plat. management: Perform platform specific actions to turn this
* cpu off e.g. exit cpu coherency, program the power controller etc.

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -295,19 +295,19 @@ void psci_update_req_local_pwr_states(unsigned int end_pwrlvl,
void psci_restore_req_local_pwr_states(unsigned int cpu_idx,
plat_local_state_t *prev);
#endif
void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
void psci_get_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl,
psci_power_state_t *target_state);
void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
void psci_set_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl,
const psci_power_state_t *target_state);
int psci_validate_entry_point(entry_point_info_t *ep,
uintptr_t entrypoint, u_register_t context_id);
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
unsigned int end_lvl,
unsigned int *node_index);
void psci_do_state_coordination(unsigned int end_pwrlvl,
void psci_do_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl,
psci_power_state_t *state_info);
#if PSCI_OS_INIT_MODE
int psci_validate_state_coordination(unsigned int end_pwrlvl,
int psci_validate_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl,
psci_power_state_t *state_info);
#endif
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
@ -318,9 +318,9 @@ int psci_validate_suspend_req(const psci_power_state_t *state_info,
unsigned int is_power_down_state);
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
void psci_set_pwr_domains_to_run(unsigned int cpu_idx, unsigned int end_pwrlvl);
void psci_print_power_domain_map(void);
bool psci_is_last_on_cpu(void);
bool psci_is_last_on_cpu(unsigned int my_idx);
int psci_spd_migrate_info(u_register_t *mpidr);
/*
@ -343,12 +343,13 @@ void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_in
int psci_do_cpu_off(unsigned int end_pwrlvl);
/* Private exported functions from psci_suspend.c */
int psci_cpu_suspend_start(const entry_point_info_t *ep,
int psci_cpu_suspend_start(unsigned int idx,
const entry_point_info_t *ep,
unsigned int end_pwrlvl,
psci_power_state_t *state_info,
unsigned int is_power_down_state);
void psci_cpu_suspend_finish(unsigned int cpu_idx, const psci_power_state_t *state_info);
void psci_cpu_suspend_to_powerdown_finish(unsigned int cpu_idx, const psci_power_state_t *state_info);
/* Private exported functions from psci_helpers.S */
void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
@ -360,9 +361,9 @@ void __dead2 psci_system_reset(void);
u_register_t psci_system_reset2(uint32_t reset_type, u_register_t cookie);
/* Private exported functions from psci_stat.c */
void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
void psci_stats_update_pwr_down(unsigned int cpu_idx, unsigned int end_pwrlvl,
const psci_power_state_t *state_info);
void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
void psci_stats_update_pwr_up(unsigned int cpu_idx, unsigned int end_pwrlvl,
const psci_power_state_t *state_info);
u_register_t psci_stat_residency(u_register_t target_cpu,
unsigned int power_state);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2020, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -68,8 +68,8 @@ static void __init psci_init_pwr_domain_node(uint16_t node_idx,
/* Set the Affinity Info for the cores as OFF */
svc_cpu_data->aff_info_state = AFF_STATE_OFF;
/* Invalidate the suspend level for the cpu */
svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
/* Default to the highest power level when the cpu is not suspending */
svc_cpu_data->target_pwrlvl = PLAT_MAX_PWR_LVL;
/* Set the power state to OFF state */
svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
@ -202,6 +202,7 @@ static unsigned int __init populate_power_domain_tree(const unsigned char
int __init psci_setup(const psci_lib_args_t *lib_args)
{
const unsigned char *topology_tree;
unsigned int cpu_idx = plat_my_core_pos();
assert(VERIFY_PSCI_LIB_ARGS_V1(lib_args));
@ -218,7 +219,7 @@ int __init psci_setup(const psci_lib_args_t *lib_args)
psci_update_pwrlvl_limits();
/* Populate the mpidr field of cpu node for this CPU */
psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
psci_cpu_pd_nodes[cpu_idx].mpidr =
read_mpidr() & MPIDR_AFFINITY_MASK;
psci_init_req_local_pwr_states();
@ -227,7 +228,7 @@ int __init psci_setup(const psci_lib_args_t *lib_args)
* Set the requested and target state of this CPU and all the higher
* power domain levels for this CPU to run.
*/
psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
psci_set_pwr_domains_to_run(cpu_idx, PLAT_MAX_PWR_LVL);
(void) plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep,
&psci_plat_pm_ops);

View file

@ -73,11 +73,10 @@ static int get_stat_idx(plat_local_state_t local_state, unsigned int pwr_lvl)
* This function will only be invoked with data cache enabled and while
* powering down a core.
******************************************************************************/
void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
void psci_stats_update_pwr_down(unsigned int cpu_idx, unsigned int end_pwrlvl,
const psci_power_state_t *state_info)
{
unsigned int lvl, parent_idx;
unsigned int cpu_idx = plat_my_core_pos();
assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
assert(state_info != NULL);
@ -106,11 +105,10 @@ void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
* and NON-CPU power domains.
* It is called with caches enabled and locks acquired(for NON-CPU domain)
******************************************************************************/
void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
void psci_stats_update_pwr_up(unsigned int cpu_idx, unsigned int end_pwrlvl,
const psci_power_state_t *state_info)
{
unsigned int lvl, parent_idx;
unsigned int cpu_idx = plat_my_core_pos();
int stat_idx;
plat_local_state_t local_state;
u_register_t residency;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -25,42 +25,18 @@
* This function does generic and platform specific operations after a wake-up
* from standby/retention states at multiple power levels.
******************************************************************************/
static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
unsigned int end_pwrlvl)
static void psci_cpu_suspend_to_standby_finish(unsigned int cpu_idx,
unsigned int end_pwrlvl,
psci_power_state_t *state_info)
{
unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
psci_power_state_t state_info;
/* Get the parent nodes */
psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
/*
* Find out which retention states this CPU has exited from until the
* 'end_pwrlvl'. The exit retention state could be deeper than the entry
* state as a result of state coordination amongst other CPUs post wfi.
*/
psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
#if ENABLE_PSCI_STAT
plat_psci_stat_accounting_stop(&state_info);
psci_stats_update_pwr_up(end_pwrlvl, &state_info);
#endif
/*
* Plat. management: Allow the platform to do operations
* on waking up from retention.
*/
psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info);
psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
/*
* Set the requested and target state of this CPU and all the higher
* power domain levels for this CPU to run.
*/
psci_set_pwr_domains_to_run(end_pwrlvl);
psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
/* This loses its meaning when not suspending, reset so it's correct for OFF */
psci_set_suspend_pwrlvl(PLAT_MAX_PWR_LVL);
}
/*******************************************************************************
@ -116,29 +92,12 @@ static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
*/
cm_init_my_context(ep);
#if ENABLE_RUNTIME_INSTRUMENTATION
/*
* Flush cache line so that even if CPU power down happens
* the timestamp update is reflected in memory.
*/
PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
RT_INSTR_ENTER_CFLUSH,
PMF_CACHE_MAINT);
#endif
/*
* Arch. management. Initiate power down sequence.
* TODO : Introduce a mechanism to query the cache level to flush
* and the cpu-ops power down to perform from the platform.
*/
psci_pwrdown_cpu(max_off_lvl);
#if ENABLE_RUNTIME_INSTRUMENTATION
PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
RT_INSTR_EXIT_CFLUSH,
PMF_NO_CACHE_MAINT);
#endif
}
/*******************************************************************************
@ -159,14 +118,14 @@ static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
* the state transition has been done, no further error is expected and it is
* not possible to undo any of the actions taken beyond that point.
******************************************************************************/
int psci_cpu_suspend_start(const entry_point_info_t *ep,
int psci_cpu_suspend_start(unsigned int idx,
const entry_point_info_t *ep,
unsigned int end_pwrlvl,
psci_power_state_t *state_info,
unsigned int is_power_down_state)
{
int rc = PSCI_E_SUCCESS;
bool skip_wfi = false;
unsigned int idx = plat_my_core_pos();
unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
/*
@ -202,7 +161,7 @@ int psci_cpu_suspend_start(const entry_point_info_t *ep,
* This function validates the requested state info for
* OS-initiated mode.
*/
rc = psci_validate_state_coordination(end_pwrlvl, state_info);
rc = psci_validate_state_coordination(idx, end_pwrlvl, state_info);
if (rc != PSCI_E_SUCCESS) {
skip_wfi = true;
goto exit;
@ -214,7 +173,7 @@ int psci_cpu_suspend_start(const entry_point_info_t *ep,
* it returns the negotiated state info for each power level upto
* the end level specified.
*/
psci_do_state_coordination(end_pwrlvl, state_info);
psci_do_state_coordination(idx, end_pwrlvl, state_info);
#if PSCI_OS_INIT_MODE
}
#endif
@ -230,11 +189,11 @@ int psci_cpu_suspend_start(const entry_point_info_t *ep,
#endif
/* Update the target state in the power domain nodes */
psci_set_target_local_pwr_states(end_pwrlvl, state_info);
psci_set_target_local_pwr_states(idx, end_pwrlvl, state_info);
#if ENABLE_PSCI_STAT
/* Update the last cpu for each level till end_pwrlvl */
psci_stats_update_pwr_down(end_pwrlvl, state_info);
psci_stats_update_pwr_down(idx, end_pwrlvl, state_info);
#endif
if (is_power_down_state != 0U)
@ -304,11 +263,32 @@ exit:
PMF_NO_CACHE_MAINT);
#endif
psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
/*
* Find out which retention states this CPU has exited from until the
* 'end_pwrlvl'. The exit retention state could be deeper than the entry
* state as a result of state coordination amongst other CPUs post wfi.
*/
psci_get_target_local_pwr_states(idx, end_pwrlvl, state_info);
#if ENABLE_PSCI_STAT
plat_psci_stat_accounting_stop(state_info);
psci_stats_update_pwr_up(idx, end_pwrlvl, state_info);
#endif
/*
* After we wake up from context retaining suspend, call the
* context retaining suspend finisher.
*/
psci_suspend_to_standby_finisher(idx, end_pwrlvl);
psci_cpu_suspend_to_standby_finish(idx, end_pwrlvl, state_info);
/*
* Set the requested and target state of this CPU and all the higher
* power domain levels for this CPU to run.
*/
psci_set_pwr_domains_to_run(idx, end_pwrlvl);
psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
return rc;
}
@ -318,7 +298,7 @@ exit:
* are called by the common finisher routine in psci_common.c. The `state_info`
* is the psci_power_state from which this CPU has woken up from.
******************************************************************************/
void psci_cpu_suspend_finish(unsigned int cpu_idx, const psci_power_state_t *state_info)
void psci_cpu_suspend_to_powerdown_finish(unsigned int cpu_idx, const psci_power_state_t *state_info)
{
unsigned int counter_freq;
unsigned int max_off_lvl;
@ -363,8 +343,8 @@ void psci_cpu_suspend_finish(unsigned int cpu_idx, const psci_power_state_t *sta
psci_spd_pm->svc_suspend_finish(max_off_lvl);
}
/* Invalidate the suspend level for the cpu */
psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
/* This loses its meaning when not suspending, reset so it's correct for OFF */
psci_set_suspend_pwrlvl(PLAT_MAX_PWR_LVL);
PUBLISH_EVENT(psci_suspend_pwrdown_finish);
}

View file

@ -112,7 +112,8 @@ void request_cpu_pwrdwn(void)
VERBOSE("CPU power down request received\n");
/* Send powerdown request to online secondary core(s) */
ret = psci_stop_other_cores(PWRDWN_WAIT_TIMEOUT, raise_pwr_down_interrupt);
ret = psci_stop_other_cores(plat_my_core_pos(), PWRDWN_WAIT_TIMEOUT,
raise_pwr_down_interrupt);
if (ret != (uint32_t)PSCI_E_SUCCESS) {
ERROR("Failed to powerdown secondary core(s)\n");
}

View file

@ -208,7 +208,7 @@ static enum drtm_retc drtm_dl_check_cores(void)
return DENIED;
}
running_on_single_core = psci_is_last_on_cpu_safe();
running_on_single_core = psci_is_last_on_cpu_safe(plat_my_core_pos());
if (!running_on_single_core) {
ERROR("DRTM: invalid launch due to non-boot PE not being turned off\n");
return SECONDARY_PE_NOT_OFF;