Merge changes from topic "bk/smccc_feature" into integration

* changes:
  fix(trbe): add a tsb before context switching
  fix(spe): add a psb before updating context and remove context saving
This commit is contained in:
Manish Pandey 2024-12-19 12:34:42 +01:00 committed by TrustedFirmware Code Review
commit b41b9997ca
6 changed files with 21 additions and 136 deletions

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2024, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -224,13 +224,6 @@
.space SPINLOCK_ASM_SIZE
.endm
/*
* With RAS extension executes esb instruction, else NOP
*/
.macro esb
.inst 0xd503221f
.endm
/*
* Helper macro to read system register value into x0
*/
@ -265,6 +258,14 @@
msr SYSREG_SB, xzr
.endm
.macro psb_csync
hint #17 /* use the hint synonym for compatibility */
.endm
.macro tsb_csync
hint #18 /* use the hint synonym for compatibility */
.endm
/*
* Macro for using speculation barrier instruction introduced by
* FEAT_SB, if it's enabled.

View file

@ -14,7 +14,6 @@
void spe_enable(cpu_context_t *ctx);
void spe_disable(cpu_context_t *ctx);
void spe_init_el2_unused(void);
void spe_stop(void);
#else
static inline void spe_enable(cpu_context_t *ctx)
{
@ -25,9 +24,6 @@ static inline void spe_disable(cpu_context_t *ctx)
static inline void spe_init_el2_unused(void)
{
}
static inline void spe_stop(void)
{
}
#endif /* ENABLE_SPE_FOR_NS */
#endif /* SPE_H */

View file

@ -400,7 +400,6 @@ no_mpam:
/* PMUv3 is presumed to be always present */
mrs x9, pmcr_el0
str x9, [sp, #CTX_EL3STATE_OFFSET + CTX_PMCR_EL0]
isb
#if CTX_INCLUDE_PAUTH_REGS
/* ----------------------------------------------------------
* Save the ARMv8.3-PAuth keys as they are not banked
@ -440,6 +439,18 @@ no_mpam:
* -----------------------------------------------------------------
*/
func prepare_el3_entry
/*
* context is about to mutate, so make sure we don't affect any still
* in-flight profiling operations. We don't care that they actually
* finish, that can still be later. NOP if not present
*/
#if ENABLE_SPE_FOR_NS
psb_csync
#endif
#if ENABLE_TRBE_FOR_NS
tsb_csync
#endif
isb
save_gp_pmcr_pauth_regs
setup_el3_execution_context
ret

View file

@ -9,26 +9,10 @@
#include <arch.h>
#include <arch_features.h>
#include <arch_helpers.h>
#include <lib/el3_runtime/pubsub.h>
#include <lib/extensions/spe.h>
#include <plat/common/platform.h>
typedef struct spe_ctx {
u_register_t pmblimitr_el1;
} spe_ctx_t;
static struct spe_ctx spe_ctxs[PLATFORM_CORE_COUNT];
static inline void psb_csync(void)
{
/*
* The assembler does not yet understand the psb csync mnemonic
* so use the equivalent hint instruction.
*/
__asm__ volatile("hint #17");
}
void spe_enable(cpu_context_t *ctx)
{
el3_state_t *state = get_el3state_ctx(ctx);
@ -90,63 +74,3 @@ void spe_init_el2_unused(void)
v |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
write_mdcr_el2(v);
}
void spe_stop(void)
{
uint64_t v;
/* Drain buffered data */
psb_csync();
dsbnsh();
/* Disable profiling buffer */
v = read_pmblimitr_el1();
v &= ~(1ULL << 0);
write_pmblimitr_el1(v);
isb();
}
static void *spe_drain_buffers_hook(const void *arg)
{
if (!is_feat_spe_supported())
return (void *)-1;
/* Drain buffered data */
psb_csync();
dsbnsh();
return (void *)0;
}
static void *spe_context_save(const void *arg)
{
unsigned int core_pos;
struct spe_ctx *ctx;
if (is_feat_spe_supported()) {
core_pos = plat_my_core_pos();
ctx = &spe_ctxs[core_pos];
ctx->pmblimitr_el1 = read_pmblimitr_el1();
}
return NULL;
}
static void *spe_context_restore(const void *arg)
{
unsigned int core_pos;
struct spe_ctx *ctx;
if (is_feat_spe_supported()) {
core_pos = plat_my_core_pos();
ctx = &spe_ctxs[core_pos];
write_pmblimitr_el1(ctx->pmblimitr_el1);
}
return NULL;
}
SUBSCRIBE_TO_EVENT(cm_entering_secure_world, spe_drain_buffers_hook);
SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, spe_context_save);
SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, spe_context_restore);

View file

@ -7,18 +7,8 @@
#include <arch.h>
#include <arch_features.h>
#include <arch_helpers.h>
#include <lib/el3_runtime/pubsub.h>
#include <lib/extensions/trbe.h>
static void tsb_csync(void)
{
/*
* The assembler does not yet understand the tsb csync mnemonic
* so use the equivalent hint instruction.
*/
__asm__ volatile("hint #18");
}
void trbe_enable(cpu_context_t *ctx)
{
el3_state_t *state = get_el3state_ctx(ctx);
@ -68,21 +58,3 @@ void trbe_init_el2_unused(void)
*/
write_mdcr_el2(read_mdcr_el2() & ~MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1));
}
static void *trbe_drain_trace_buffers_hook(const void *arg __unused)
{
if (is_feat_trbe_supported()) {
/*
* Before switching from normal world to secure world
* the trace buffers need to be drained out to memory. This is
* required to avoid an invalid memory access when TTBR is switched
* for entry to S-EL1.
*/
tsb_csync();
dsbnsh();
}
return (void *)0;
}
SUBSCRIBE_TO_EVENT(cm_entering_secure_world, trbe_drain_trace_buffers_hook);

View file

@ -1169,8 +1169,6 @@ int psci_secondaries_brought_up(void)
******************************************************************************/
void psci_pwrdown_cpu(unsigned int power_level)
{
psci_do_manage_extensions();
#if HW_ASSISTED_COHERENCY
/*
* With hardware-assisted coherency, the CPU drivers only initiate the
@ -1290,20 +1288,3 @@ bool psci_are_all_cpus_on_safe(void)
return true;
}
/*******************************************************************************
* This function performs architectural feature specific management.
* It ensures the architectural features are disabled during cpu
* power off/suspend operations.
******************************************************************************/
void psci_do_manage_extensions(void)
{
/*
* On power down we need to disable statistical profiling extensions
* before exiting coherency.
*/
if (is_feat_spe_supported()) {
spe_stop();
}
}