Merge "fix(amu): limit virtual offset register access to NS world" into integration

This commit is contained in:
Joanna Farley 2022-05-10 15:55:05 +02:00 committed by TrustedFirmware Code Review
commit 926224e22e
3 changed files with 21 additions and 13 deletions

View file

@ -488,7 +488,8 @@
#define SCR_HXEn_BIT (UL(1) << 38)
#define SCR_ENTP2_SHIFT U(41)
#define SCR_ENTP2_BIT (UL(1) << SCR_ENTP2_SHIFT)
#define SCR_AMVOFFEN_BIT (UL(1) << 35)
#define SCR_AMVOFFEN_SHIFT U(35)
#define SCR_AMVOFFEN_BIT (UL(1) << SCR_AMVOFFEN_SHIFT)
#define SCR_TWEDEn_BIT (UL(1) << 29)
#define SCR_ECVEN_BIT (UL(1) << 28)
#define SCR_FGTEN_BIT (UL(1) << 27)

View file

@ -283,16 +283,6 @@ static void setup_context_common(cpu_context_t *ctx, const entry_point_info_t *e
}
}
/*
* FEAT_AMUv1p1 virtual offset registers are only accessible from EL3
* and EL2, when clear, this bit traps accesses from EL2 so we set it
* to 1 when EL2 is present.
*/
if (is_armv8_6_feat_amuv1p1_present() &&
(el_implemented(2) != EL_IMPL_NONE)) {
scr_el3 |= SCR_AMVOFFEN_BIT;
}
/*
* Initialise SCTLR_EL1 to the reset value corresponding to the target
* execution state setting all fields rather than relying of the hw.

View file

@ -75,7 +75,7 @@ static inline __unused void write_cptr_el2_tam(uint64_t value)
((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
}
static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
static inline __unused void ctx_write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
{
uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
@ -85,6 +85,16 @@ static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
}
static inline __unused void ctx_write_scr_el3_amvoffen(cpu_context_t *ctx, uint64_t amvoffen)
{
uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
value &= ~SCR_AMVOFFEN_BIT;
value |= (amvoffen << SCR_AMVOFFEN_SHIFT) & SCR_AMVOFFEN_BIT;
write_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3, value);
}
static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
{
write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
@ -226,7 +236,7 @@ void amu_enable(bool el2_unused, cpu_context_t *ctx)
* in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
* the Activity Monitor registers do not trap to EL3.
*/
write_cptr_el3_tam(ctx, 0U);
ctx_write_cptr_el3_tam(ctx, 0U);
/*
* Retrieve the number of architected counters. All of these counters
@ -285,6 +295,13 @@ void amu_enable(bool el2_unused, cpu_context_t *ctx)
* used.
*/
write_hcr_el2_amvoffen(0U);
} else {
/*
* Virtual offset registers are only accessible from EL3
* and EL2, when clear, this bit traps accesses from EL2
* so we set it to 1 when EL2 is present.
*/
ctx_write_scr_el3_amvoffen(ctx, 1U);
}
#if AMU_RESTRICT_COUNTERS