mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-05-07 12:19:26 +00:00
PSCI: Introduce cache and barrier wrappers
The PSCI implementation performs cache maintenance operations on its data structures to ensure their visibility to both cache-coherent and non-cache-coherent participants. These cache maintenance operations can be skipped if all PSCI participants are cache-coherent. When HW_ASSISTED_COHERENCY build option is enabled, we assume PSCI participants are cache-coherent. For usage abstraction, this patch introduces wrappers for PSCI cache maintenance and barrier operations used for state coordination: they are effectively NOPs when HW_ASSISTED_COHERENCY is enabled, but are applied otherwise. Also refactor local state usage and associated cache operations to make it clearer. Change-Id: I77f17a90cba41085b7188c1345fe5731c99fad87 Signed-off-by: Jeenu Viswambharan <jeenu.viswambharan@arm.com>
This commit is contained in:
parent
d4593e4713
commit
a10d3632ac
5 changed files with 85 additions and 39 deletions
|
@ -247,6 +247,50 @@ static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
|
||||||
return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx];
|
return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* psci_non_cpu_pd_nodes can be placed either in normal memory or coherent
|
||||||
|
* memory.
|
||||||
|
*
|
||||||
|
* With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory,
|
||||||
|
* it's accessed by both cached and non-cached participants. To serve the common
|
||||||
|
* minimum, perform a cache flush before read and after write so that non-cached
|
||||||
|
* participants operate on latest data in main memory.
|
||||||
|
*
|
||||||
|
* When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent
|
||||||
|
* memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent.
|
||||||
|
* In both cases, no cache operations are required.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Retrieve local state of non-CPU power domain node from a non-cached CPU,
|
||||||
|
* after any required cache maintenance operation.
|
||||||
|
*/
|
||||||
|
static plat_local_state_t get_non_cpu_pd_node_local_state(
|
||||||
|
unsigned int parent_idx)
|
||||||
|
{
|
||||||
|
#if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY
|
||||||
|
flush_dcache_range(
|
||||||
|
(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
|
||||||
|
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
|
||||||
|
#endif
|
||||||
|
return psci_non_cpu_pd_nodes[parent_idx].local_state;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update local state of non-CPU power domain node from a cached CPU; perform
|
||||||
|
* any required cache maintenance operation afterwards.
|
||||||
|
*/
|
||||||
|
static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
|
||||||
|
plat_local_state_t state)
|
||||||
|
{
|
||||||
|
psci_non_cpu_pd_nodes[parent_idx].local_state = state;
|
||||||
|
#if !USE_COHERENT_MEM || !HW_ASSISTED_COHERENCY
|
||||||
|
flush_dcache_range(
|
||||||
|
(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
|
||||||
|
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
/******************************************************************************
|
/******************************************************************************
|
||||||
* Helper function to return the current local power state of each power domain
|
* Helper function to return the current local power state of each power domain
|
||||||
* from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
|
* from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
|
||||||
|
@ -264,18 +308,7 @@ void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
|
||||||
|
|
||||||
/* Copy the local power state from node to state_info */
|
/* Copy the local power state from node to state_info */
|
||||||
for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
|
for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
|
||||||
#if !USE_COHERENT_MEM
|
pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
|
||||||
/*
|
|
||||||
* If using normal memory for psci_non_cpu_pd_nodes, we need
|
|
||||||
* to flush before reading the local power state as another
|
|
||||||
* cpu in the same power domain could have updated it and this
|
|
||||||
* code runs before caches are enabled.
|
|
||||||
*/
|
|
||||||
flush_dcache_range(
|
|
||||||
(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
|
|
||||||
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
|
|
||||||
#endif
|
|
||||||
pd_state[lvl] = psci_non_cpu_pd_nodes[parent_idx].local_state;
|
|
||||||
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
|
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -299,21 +332,16 @@ static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
|
||||||
psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
|
psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Need to flush as local_state will be accessed with Data Cache
|
* Need to flush as local_state might be accessed with Data Cache
|
||||||
* disabled during power on
|
* disabled during power on
|
||||||
*/
|
*/
|
||||||
flush_cpu_data(psci_svc_cpu_data.local_state);
|
psci_flush_cpu_data(psci_svc_cpu_data.local_state);
|
||||||
|
|
||||||
parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
|
parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
|
||||||
|
|
||||||
/* Copy the local_state from state_info */
|
/* Copy the local_state from state_info */
|
||||||
for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
|
for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
|
||||||
psci_non_cpu_pd_nodes[parent_idx].local_state = pd_state[lvl];
|
set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
|
||||||
#if !USE_COHERENT_MEM
|
|
||||||
flush_dcache_range(
|
|
||||||
(uintptr_t)&psci_non_cpu_pd_nodes[parent_idx],
|
|
||||||
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
|
|
||||||
#endif
|
|
||||||
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
|
parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -347,13 +375,8 @@ void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
|
||||||
|
|
||||||
/* Reset the local_state to RUN for the non cpu power domains. */
|
/* Reset the local_state to RUN for the non cpu power domains. */
|
||||||
for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
|
for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
|
||||||
psci_non_cpu_pd_nodes[parent_idx].local_state =
|
set_non_cpu_pd_node_local_state(parent_idx,
|
||||||
PSCI_LOCAL_STATE_RUN;
|
PSCI_LOCAL_STATE_RUN);
|
||||||
#if !USE_COHERENT_MEM
|
|
||||||
flush_dcache_range(
|
|
||||||
(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
|
|
||||||
sizeof(psci_non_cpu_pd_nodes[parent_idx]));
|
|
||||||
#endif
|
|
||||||
psci_set_req_local_pwr_state(lvl,
|
psci_set_req_local_pwr_state(lvl,
|
||||||
cpu_idx,
|
cpu_idx,
|
||||||
PSCI_LOCAL_STATE_RUN);
|
PSCI_LOCAL_STATE_RUN);
|
||||||
|
@ -364,7 +387,7 @@ void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
|
||||||
psci_set_aff_info_state(AFF_STATE_ON);
|
psci_set_aff_info_state(AFF_STATE_ON);
|
||||||
|
|
||||||
psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
|
psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
|
||||||
flush_cpu_data(psci_svc_cpu_data);
|
psci_flush_cpu_data(psci_svc_cpu_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
/******************************************************************************
|
/******************************************************************************
|
||||||
|
|
|
@ -154,17 +154,17 @@ exit:
|
||||||
*/
|
*/
|
||||||
if (rc == PSCI_E_SUCCESS) {
|
if (rc == PSCI_E_SUCCESS) {
|
||||||
/*
|
/*
|
||||||
* Set the affinity info state to OFF. This writes directly to
|
* Set the affinity info state to OFF. When caches are disabled,
|
||||||
* main memory as caches are disabled, so cache maintenance is
|
* this writes directly to main memory, so cache maintenance is
|
||||||
* required to ensure that later cached reads of aff_info_state
|
* required to ensure that later cached reads of aff_info_state
|
||||||
* return AFF_STATE_OFF. A dsbish() ensures ordering of the
|
* return AFF_STATE_OFF. A dsbish() ensures ordering of the
|
||||||
* update to the affinity info state prior to cache line
|
* update to the affinity info state prior to cache line
|
||||||
* invalidation.
|
* invalidation.
|
||||||
*/
|
*/
|
||||||
flush_cpu_data(psci_svc_cpu_data.aff_info_state);
|
psci_flush_cpu_data(psci_svc_cpu_data.aff_info_state);
|
||||||
psci_set_aff_info_state(AFF_STATE_OFF);
|
psci_set_aff_info_state(AFF_STATE_OFF);
|
||||||
dsbish();
|
psci_dsbish();
|
||||||
inv_cpu_data(psci_svc_cpu_data.aff_info_state);
|
psci_inv_cpu_data(psci_svc_cpu_data.aff_info_state);
|
||||||
|
|
||||||
#if ENABLE_RUNTIME_INSTRUMENTATION
|
#if ENABLE_RUNTIME_INSTRUMENTATION
|
||||||
|
|
||||||
|
|
|
@ -38,6 +38,29 @@
|
||||||
#include <psci.h>
|
#include <psci.h>
|
||||||
#include <spinlock.h>
|
#include <spinlock.h>
|
||||||
|
|
||||||
|
#if HW_ASSISTED_COHERENCY
|
||||||
|
/*
|
||||||
|
* On systems with hardware-assisted coherency, make PSCI cache operations NOP,
|
||||||
|
* as PSCI participants are cache-coherent, and there's no need for explicit
|
||||||
|
* cache maintenance operations or barriers to coordinate their state.
|
||||||
|
*/
|
||||||
|
#define psci_flush_dcache_range(addr, size)
|
||||||
|
#define psci_flush_cpu_data(member)
|
||||||
|
#define psci_inv_cpu_data(member)
|
||||||
|
|
||||||
|
#define psci_dsbish()
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* If not all PSCI participants are cache-coherent, perform cache maintenance
|
||||||
|
* and issue barriers wherever required to coordinate state.
|
||||||
|
*/
|
||||||
|
#define psci_flush_dcache_range(addr, size) flush_dcache_range(addr, size)
|
||||||
|
#define psci_flush_cpu_data(member) flush_cpu_data(member)
|
||||||
|
#define psci_inv_cpu_data(member) inv_cpu_data(member)
|
||||||
|
|
||||||
|
#define psci_dsbish() dsbish()
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The following helper macros abstract the interface to the Bakery
|
* The following helper macros abstract the interface to the Bakery
|
||||||
* Lock API.
|
* Lock API.
|
||||||
|
|
|
@ -86,7 +86,7 @@ static void psci_init_pwr_domain_node(unsigned int node_idx,
|
||||||
/* Set the power state to OFF state */
|
/* Set the power state to OFF state */
|
||||||
svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
|
svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
|
||||||
|
|
||||||
flush_dcache_range((uintptr_t)svc_cpu_data,
|
psci_flush_dcache_range((uintptr_t)svc_cpu_data,
|
||||||
sizeof(*svc_cpu_data));
|
sizeof(*svc_cpu_data));
|
||||||
|
|
||||||
cm_set_context_by_index(node_idx,
|
cm_set_context_by_index(node_idx,
|
||||||
|
@ -242,9 +242,9 @@ int psci_setup(const psci_lib_args_t *lib_args)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
|
* Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
|
||||||
* during warm boot before data cache is enabled.
|
* during warm boot, possibly before data cache is enabled.
|
||||||
*/
|
*/
|
||||||
flush_dcache_range((uintptr_t)&psci_plat_pm_ops,
|
psci_flush_dcache_range((uintptr_t)&psci_plat_pm_ops,
|
||||||
sizeof(psci_plat_pm_ops));
|
sizeof(psci_plat_pm_ops));
|
||||||
|
|
||||||
/* Initialize the psci capability */
|
/* Initialize the psci capability */
|
||||||
|
|
|
@ -91,10 +91,10 @@ static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
|
||||||
psci_set_suspend_pwrlvl(end_pwrlvl);
|
psci_set_suspend_pwrlvl(end_pwrlvl);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Flush the target power level as it will be accessed on power up with
|
* Flush the target power level as it might be accessed on power up with
|
||||||
* Data cache disabled.
|
* Data cache disabled.
|
||||||
*/
|
*/
|
||||||
flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
|
psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call the cpu suspend handler registered by the Secure Payload
|
* Call the cpu suspend handler registered by the Secure Payload
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue