mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-04-08 05:43:53 +00:00

The current code is incredibly resilient to updates to the spec and has worked quite well so far. However, recent implementations expose a weakness in that this is rather slow. A large part of it is written in assembly, making it opaque to the compiler for optimisations. The future proofness requires reading registers that are effectively `volatile`, making it even harder for the compiler, as well as adding lots of implicit barriers, making it hard for the microarchitecutre to optimise as well. We can make a few assumptions, checked by a few well placed asserts, and remove a lot of this burden. For a start, at the moment there are 4 group 0 counters with static assignments. Contexting them is a trivial affair that doesn't need a loop. Similarly, there can only be up to 16 group 1 counters. Contexting them is a bit harder, but we can do with a single branch with a falling through switch. If/when both of these change, we have a pair of asserts and the feature detection mechanism to guard us against pretending that we support something we don't. We can drop contexting of the offset registers. They are fully accessible by EL2 and as such are its responsibility to preserve on powerdown. Another small thing we can do, is pass the core_pos into the hook. The caller already knows which core we're running on, we don't need to call this non-trivial function again. Finally, knowing this, we don't really need the auxiliary AMUs to be described by the device tree. Linux doesn't care at the moment, and any information we need for EL3 can be neatly placed in a simple array. All of this, combined with lifting the actual saving out of assembly, reduces the instructions to save the context from 180 to 40, including a lot fewer branches. The code is also much shorter and easier to read. Also propagate to aarch32 so that the two don't diverge too much. Change-Id: Ib62e6e9ba5be7fb9fb8965c8eee148d5598a5361 Signed-off-by: Boyan Karatotev <boyan.karatotev@arm.com>
273 lines
7.3 KiB
C
273 lines
7.3 KiB
C
/*
|
|
* Copyright (c) 2017-2025, Arm Limited and Contributors. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#include <assert.h>
|
|
#include <cdefs.h>
|
|
#include <inttypes.h>
|
|
#include <stdbool.h>
|
|
#include <stdint.h>
|
|
|
|
#include <arch.h>
|
|
#include <arch_features.h>
|
|
#include <arch_helpers.h>
|
|
#include <common/debug.h>
|
|
#include <lib/el3_runtime/pubsub_events.h>
|
|
#include <lib/extensions/amu.h>
|
|
#include <lib/utils_def.h>
|
|
#include <platform_def.h>
|
|
|
|
amu_regs_t amu_ctx[PLATFORM_CORE_COUNT];
|
|
|
|
static inline uint8_t read_amcgcr_el0_cg1nc(void)
|
|
{
|
|
return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
|
|
AMCGCR_EL0_CG1NC_MASK;
|
|
}
|
|
|
|
void amu_enable(cpu_context_t *ctx)
|
|
{
|
|
/* Initialize FEAT_AMUv1p1 features if present. */
|
|
if (is_feat_amuv1p1_supported()) {
|
|
el3_state_t *state = get_el3state_ctx(ctx);
|
|
u_register_t reg;
|
|
|
|
/*
|
|
* Set SCR_EL3.AMVOFFEN to one so that accesses to virtual
|
|
* offset registers at EL2 do not trap to EL3
|
|
*/
|
|
reg = read_ctx_reg(state, CTX_SCR_EL3);
|
|
reg |= SCR_AMVOFFEN_BIT;
|
|
write_ctx_reg(state, CTX_SCR_EL3, reg);
|
|
}
|
|
}
|
|
|
|
void amu_enable_per_world(per_world_context_t *per_world_ctx)
|
|
{
|
|
/*
|
|
* Set CPTR_EL3.TAM to zero so that any accesses to the Activity Monitor
|
|
* registers do not trap to EL3.
|
|
*/
|
|
uint64_t cptr_el3 = per_world_ctx->ctx_cptr_el3;
|
|
|
|
cptr_el3 &= ~TAM_BIT;
|
|
per_world_ctx->ctx_cptr_el3 = cptr_el3;
|
|
}
|
|
|
|
void amu_init_el3(unsigned int core_pos)
|
|
{
|
|
/* architecture is currently pinned to 4 */
|
|
assert((read_amcgcr_el0() & AMCGCR_EL0_CG0NC_MASK) == CTX_AMU_GRP0_ALL);
|
|
|
|
/* Enable all architected counters by default */
|
|
write_amcntenset0_el0(AMCNTENSET0_EL0_Pn_MASK);
|
|
if (is_feat_amu_aux_supported()) {
|
|
/* something went wrong if we're trying to write higher bits */
|
|
assert((get_amu_aux_enables(core_pos) & ~AMCNTENSET1_EL0_Pn_MASK) == 0);
|
|
write_amcntenset1_el0(get_amu_aux_enables(core_pos));
|
|
}
|
|
|
|
if (is_feat_amuv1p1_supported()) {
|
|
#if AMU_RESTRICT_COUNTERS
|
|
/*
|
|
* FEAT_AMUv1p1 adds a register field to restrict access to
|
|
* group 1 counters at all but the highest implemented EL. This
|
|
* is controlled with the `AMU_RESTRICT_COUNTERS` compile time
|
|
* flag, when set, system register reads at lower ELs return
|
|
* zero. Reads from the memory mapped view are unaffected.
|
|
*/
|
|
VERBOSE("AMU group 1 counter access restricted.\n");
|
|
write_amcr_el0(AMCR_CG1RZ_BIT);
|
|
#else
|
|
/* HDBG = 0 in both cases */
|
|
write_amcr_el0(0);
|
|
#endif
|
|
}
|
|
}
|
|
|
|
void amu_init_el2_unused(void)
|
|
{
|
|
/*
|
|
* CPTR_EL2.TAM: Set to zero so any accesses to the Activity Monitor
|
|
* registers do not trap to EL2.
|
|
*/
|
|
write_cptr_el2(read_cptr_el2() & ~CPTR_EL2_TAM_BIT);
|
|
|
|
if (is_feat_amuv1p1_supported()) {
|
|
/* Make sure virtual offsets are disabled */
|
|
write_hcr_el2(read_hcr_el2() & ~HCR_AMVOFFEN_BIT);
|
|
}
|
|
}
|
|
|
|
static void *amu_context_save(const void *arg)
|
|
{
|
|
if (!is_feat_amu_supported()) {
|
|
return (void *)0;
|
|
}
|
|
|
|
unsigned int core_pos = *(unsigned int *)arg;
|
|
amu_regs_t *ctx = &amu_ctx[core_pos];
|
|
|
|
/* disable all counters so we can write them safely later */
|
|
write_amcntenclr0_el0(AMCNTENCLR0_EL0_Pn_MASK);
|
|
if (is_feat_amu_aux_supported()) {
|
|
write_amcntenclr1_el0(get_amu_aux_enables(core_pos));
|
|
}
|
|
|
|
isb();
|
|
|
|
write_amu_grp0_ctx_reg(ctx, 0, read_amevcntr00_el0());
|
|
write_amu_grp0_ctx_reg(ctx, 1, read_amevcntr01_el0());
|
|
write_amu_grp0_ctx_reg(ctx, 2, read_amevcntr02_el0());
|
|
write_amu_grp0_ctx_reg(ctx, 3, read_amevcntr03_el0());
|
|
|
|
if (is_feat_amu_aux_supported()) {
|
|
uint8_t num_counters = read_amcgcr_el0_cg1nc();
|
|
|
|
switch (num_counters) {
|
|
case 0x10:
|
|
write_amu_grp1_ctx_reg(ctx, 0xf, read_amevcntr1f_el0());
|
|
__fallthrough;
|
|
case 0x0f:
|
|
write_amu_grp1_ctx_reg(ctx, 0xe, read_amevcntr1e_el0());
|
|
__fallthrough;
|
|
case 0x0e:
|
|
write_amu_grp1_ctx_reg(ctx, 0xd, read_amevcntr1d_el0());
|
|
__fallthrough;
|
|
case 0x0d:
|
|
write_amu_grp1_ctx_reg(ctx, 0xc, read_amevcntr1c_el0());
|
|
__fallthrough;
|
|
case 0x0c:
|
|
write_amu_grp1_ctx_reg(ctx, 0xb, read_amevcntr1b_el0());
|
|
__fallthrough;
|
|
case 0x0b:
|
|
write_amu_grp1_ctx_reg(ctx, 0xa, read_amevcntr1a_el0());
|
|
__fallthrough;
|
|
case 0x0a:
|
|
write_amu_grp1_ctx_reg(ctx, 0x9, read_amevcntr19_el0());
|
|
__fallthrough;
|
|
case 0x09:
|
|
write_amu_grp1_ctx_reg(ctx, 0x8, read_amevcntr18_el0());
|
|
__fallthrough;
|
|
case 0x08:
|
|
write_amu_grp1_ctx_reg(ctx, 0x7, read_amevcntr17_el0());
|
|
__fallthrough;
|
|
case 0x07:
|
|
write_amu_grp1_ctx_reg(ctx, 0x6, read_amevcntr16_el0());
|
|
__fallthrough;
|
|
case 0x06:
|
|
write_amu_grp1_ctx_reg(ctx, 0x5, read_amevcntr15_el0());
|
|
__fallthrough;
|
|
case 0x05:
|
|
write_amu_grp1_ctx_reg(ctx, 0x4, read_amevcntr14_el0());
|
|
__fallthrough;
|
|
case 0x04:
|
|
write_amu_grp1_ctx_reg(ctx, 0x3, read_amevcntr13_el0());
|
|
__fallthrough;
|
|
case 0x03:
|
|
write_amu_grp1_ctx_reg(ctx, 0x2, read_amevcntr12_el0());
|
|
__fallthrough;
|
|
case 0x02:
|
|
write_amu_grp1_ctx_reg(ctx, 0x1, read_amevcntr11_el0());
|
|
__fallthrough;
|
|
case 0x01:
|
|
write_amu_grp1_ctx_reg(ctx, 0x0, read_amevcntr10_el0());
|
|
__fallthrough;
|
|
case 0x00:
|
|
break;
|
|
default:
|
|
assert(0); /* something is wrong */
|
|
}
|
|
}
|
|
|
|
return (void *)0;
|
|
}
|
|
|
|
static void *amu_context_restore(const void *arg)
|
|
{
|
|
if (!is_feat_amu_supported()) {
|
|
return (void *)0;
|
|
}
|
|
|
|
unsigned int core_pos = *(unsigned int *)arg;
|
|
amu_regs_t *ctx = &amu_ctx[core_pos];
|
|
|
|
write_amevcntr00_el0(read_amu_grp0_ctx_reg(ctx, 0));
|
|
write_amevcntr01_el0(read_amu_grp0_ctx_reg(ctx, 1));
|
|
write_amevcntr02_el0(read_amu_grp0_ctx_reg(ctx, 2));
|
|
write_amevcntr03_el0(read_amu_grp0_ctx_reg(ctx, 3));
|
|
|
|
if (is_feat_amu_aux_supported()) {
|
|
uint8_t num_counters = read_amcgcr_el0_cg1nc();
|
|
|
|
switch (num_counters) {
|
|
case 0x10:
|
|
write_amevcntr1f_el0(read_amu_grp1_ctx_reg(ctx, 0xf));
|
|
__fallthrough;
|
|
case 0x0f:
|
|
write_amevcntr1e_el0(read_amu_grp1_ctx_reg(ctx, 0xe));
|
|
__fallthrough;
|
|
case 0x0e:
|
|
write_amevcntr1d_el0(read_amu_grp1_ctx_reg(ctx, 0xd));
|
|
__fallthrough;
|
|
case 0x0d:
|
|
write_amevcntr1c_el0(read_amu_grp1_ctx_reg(ctx, 0xc));
|
|
__fallthrough;
|
|
case 0x0c:
|
|
write_amevcntr1b_el0(read_amu_grp1_ctx_reg(ctx, 0xb));
|
|
__fallthrough;
|
|
case 0x0b:
|
|
write_amevcntr1a_el0(read_amu_grp1_ctx_reg(ctx, 0xa));
|
|
__fallthrough;
|
|
case 0x0a:
|
|
write_amevcntr19_el0(read_amu_grp1_ctx_reg(ctx, 0x9));
|
|
__fallthrough;
|
|
case 0x09:
|
|
write_amevcntr18_el0(read_amu_grp1_ctx_reg(ctx, 0x8));
|
|
__fallthrough;
|
|
case 0x08:
|
|
write_amevcntr17_el0(read_amu_grp1_ctx_reg(ctx, 0x7));
|
|
__fallthrough;
|
|
case 0x07:
|
|
write_amevcntr16_el0(read_amu_grp1_ctx_reg(ctx, 0x6));
|
|
__fallthrough;
|
|
case 0x06:
|
|
write_amevcntr15_el0(read_amu_grp1_ctx_reg(ctx, 0x5));
|
|
__fallthrough;
|
|
case 0x05:
|
|
write_amevcntr14_el0(read_amu_grp1_ctx_reg(ctx, 0x4));
|
|
__fallthrough;
|
|
case 0x04:
|
|
write_amevcntr13_el0(read_amu_grp1_ctx_reg(ctx, 0x3));
|
|
__fallthrough;
|
|
case 0x03:
|
|
write_amevcntr12_el0(read_amu_grp1_ctx_reg(ctx, 0x2));
|
|
__fallthrough;
|
|
case 0x02:
|
|
write_amevcntr11_el0(read_amu_grp1_ctx_reg(ctx, 0x1));
|
|
__fallthrough;
|
|
case 0x01:
|
|
write_amevcntr10_el0(read_amu_grp1_ctx_reg(ctx, 0x0));
|
|
__fallthrough;
|
|
case 0x00:
|
|
break;
|
|
default:
|
|
assert(0); /* something is wrong */
|
|
}
|
|
}
|
|
|
|
|
|
/* now enable them again */
|
|
write_amcntenset0_el0(AMCNTENSET0_EL0_Pn_MASK);
|
|
if (is_feat_amu_aux_supported()) {
|
|
write_amcntenset1_el0(get_amu_aux_enables(core_pos));
|
|
}
|
|
|
|
isb();
|
|
return (void *)0;
|
|
}
|
|
|
|
SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_start, amu_context_save);
|
|
SUBSCRIBE_TO_EVENT(psci_suspend_pwrdown_finish, amu_context_restore);
|