Merge changes from topic "bk/context_refactor" into integration

* changes:
  refactor(amu): separate the EL2 and EL3 enablement code
  refactor(cpufeat): separate the EL2 and EL3 enablement code
This commit is contained in:
Manish Pandey 2023-07-17 18:55:52 +02:00 committed by TrustedFirmware Code Review
commit a2d4363791
25 changed files with 325 additions and 257 deletions

View file

@ -16,13 +16,21 @@
#if ENABLE_FEAT_AMU
#if __aarch64__
void amu_enable(bool el2_unused, cpu_context_t *ctx);
void amu_enable(cpu_context_t *ctx);
void amu_init_el3(void);
void amu_init_el2_unused(void);
#else
void amu_enable(bool el2_unused);
#endif
#else
#if __aarch64__
static inline void amu_enable(bool el2_unused, cpu_context_t *ctx)
void amu_enable(cpu_context_t *ctx)
{
}
void amu_init_el3(void)
{
}
void amu_init_el2_unused(void)
{
}
#else

View file

@ -8,9 +8,9 @@
#define BRBE_H
#if ENABLE_BRBE_FOR_NS
void brbe_enable(void);
void brbe_init_el3(void);
#else
static inline void brbe_enable(void)
static inline void brbe_init_el3(void)
{
}
#endif /* ENABLE_BRBE_FOR_NS */

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018-2023, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,11 +10,15 @@
#include <stdbool.h>
#if ENABLE_MPAM_FOR_LOWER_ELS
void mpam_enable(bool el2_unused);
void mpam_init_el3(void);
void mpam_init_el2_unused(void);
#else
static inline void mpam_enable(bool el2_unused)
static inline void mpam_init_el3(void)
{
}
#endif
static inline void mpam_init_el2_unused(void)
{
}
#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
#endif /* MPAM_H */

View file

@ -9,7 +9,7 @@
#include <context.h>
void pmuv3_disable_el3(void);
void pmuv3_init_el3(void);
#ifdef __aarch64__
void pmuv3_enable(cpu_context_t *ctx);

View file

@ -22,11 +22,19 @@
#if ENABLE_SME_FOR_NS
void sme_enable(cpu_context_t *context);
void sme_init_el3(void);
void sme_init_el2_unused(void);
void sme_disable(cpu_context_t *context);
#else
static inline void sme_enable(cpu_context_t *context)
{
}
static inline void sme_init_el3(void)
{
}
static inline void sme_init_el2_unused(void)
{
}
static inline void sme_disable(cpu_context_t *context)
{
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2018, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,15 +10,19 @@
#include <stdbool.h>
#if ENABLE_SPE_FOR_NS
void spe_enable(bool el2_unused);
void spe_init_el3(void);
void spe_init_el2_unused(void);
void spe_disable(void);
#else
static inline void spe_enable(bool el2_unused)
static inline void spe_init_el3(void)
{
}
static inline void spe_init_el2_unused(void)
{
}
static inline void spe_disable(void)
{
}
#endif
#endif /* ENABLE_SPE_FOR_NS */
#endif /* SPE_H */

View file

@ -11,11 +11,15 @@
#if (ENABLE_SME_FOR_NS || ENABLE_SVE_FOR_NS)
void sve_enable(cpu_context_t *context);
void sve_init_el2_unused(void);
void sve_disable(cpu_context_t *context);
#else
static inline void sve_enable(cpu_context_t *context)
{
}
static inline void sve_init_el2_unused(void)
{
}
static inline void sve_disable(cpu_context_t *context)
{
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Arm Limited. All rights reserved.
* Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,10 +10,12 @@
#include <context.h>
#if ENABLE_SYS_REG_TRACE_FOR_NS
#if __aarch64__
void sys_reg_trace_enable(cpu_context_t *context);
void sys_reg_trace_init_el2_unused(void);
#else
void sys_reg_trace_enable(void);
void sys_reg_trace_init_el3(void);
#endif /* __aarch64__ */
#else /* !ENABLE_SYS_REG_TRACE_FOR_NS */
@ -22,11 +24,18 @@ void sys_reg_trace_enable(void);
static inline void sys_reg_trace_enable(cpu_context_t *context)
{
}
static inline void sys_reg_trace_disable(cpu_context_t *context)
{
}
static inline void sys_reg_trace_init_el2_unused(void)
{
}
#else
static inline void sys_reg_trace_enable(void)
static inline void sys_reg_trace_init_el3(void)
{
}
#endif /* __aarch64__ */
#endif /* ENABLE_SYS_REG_TRACE_FOR_NS */
#endif /* SYS_REG_TRACE_H */

View file

@ -8,9 +8,13 @@
#define TRBE_H
#if ENABLE_TRBE_FOR_NS
void trbe_enable(void);
void trbe_init_el3(void);
void trbe_init_el2_unused(void);
#else
static inline void trbe_enable(void)
static inline void trbe_init_el3(void)
{
}
static inline void trbe_init_el2_unused(void)
{
}
#endif /* ENABLE_TRBE_FOR_NS */

View file

@ -8,9 +8,13 @@
#define TRF_H
#if ENABLE_TRF_FOR_NS
void trf_enable(void);
void trf_init_el3(void);
void trf_init_el2_unused(void);
#else
static inline void trf_enable(void)
static inline void trf_init_el3(void)
{
}
static inline void trf_init_el2_unused(void)
{
}
#endif /* ENABLE_TRF_FOR_NS */

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2023, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2016-2023, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -142,19 +142,19 @@ static void enable_extensions_nonsecure(bool el2_unused)
}
if (is_feat_sys_reg_trace_supported()) {
sys_reg_trace_enable();
sys_reg_trace_init_el3();
}
if (is_feat_trf_supported()) {
trf_enable();
trf_init_el3();
}
/*
* Also applies to PMU < v3. The PMU is only disabled for EL3 and Secure
* state execution. This does not affect lower NS ELs.
*/
pmuv3_disable_el3();
#endif
pmuv3_init_el3();
#endif /* IMAGE_BL32 */
}
/*******************************************************************************

View file

@ -496,21 +496,53 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
}
/*******************************************************************************
* Enable architecture extensions on first entry to Non-secure world.
* When EL2 is implemented but unused `el2_unused` is non-zero, otherwise
* it is zero. This function updates some registers in-place and its contents
* are being prepared to be moved to cm_manage_extensions_el3 and
* cm_manage_extensions_nonsecure.
* Enable architecture extensions for EL3 execution. This function only updates
* registers in-place which are expected to either never change or be
* overwritten by el3_exit.
******************************************************************************/
static void manage_extensions_nonsecure_mixed(bool el2_unused, cpu_context_t *ctx)
{
#if IMAGE_BL31
void cm_manage_extensions_el3(void)
{
if (is_feat_spe_supported()) {
spe_enable(el2_unused);
spe_init_el3();
}
if (is_feat_amu_supported()) {
amu_enable(el2_unused, ctx);
amu_init_el3();
}
if (is_feat_sme_supported()) {
sme_init_el3();
}
if (is_feat_mpam_supported()) {
mpam_init_el3();
}
if (is_feat_trbe_supported()) {
trbe_init_el3();
}
if (is_feat_brbe_supported()) {
brbe_init_el3();
}
if (is_feat_trf_supported()) {
trf_init_el3();
}
pmuv3_init_el3();
}
#endif /* IMAGE_BL31 */
/*******************************************************************************
* Enable architecture extensions on first entry to Non-secure world.
******************************************************************************/
static void manage_extensions_nonsecure(cpu_context_t *ctx)
{
#if IMAGE_BL31
if (is_feat_amu_supported()) {
amu_enable(ctx);
}
/* Enable SVE and FPU/SIMD */
@ -522,46 +554,10 @@ static void manage_extensions_nonsecure_mixed(bool el2_unused, cpu_context_t *ct
sme_enable(ctx);
}
if (is_feat_mpam_supported()) {
mpam_enable(el2_unused);
}
if (is_feat_trbe_supported()) {
trbe_enable();
}
if (is_feat_brbe_supported()) {
brbe_enable();
}
if (is_feat_sys_reg_trace_supported()) {
sys_reg_trace_enable(ctx);
}
if (is_feat_trf_supported()) {
trf_enable();
}
#endif
}
/*******************************************************************************
* Enable architecture extensions for EL3 execution. This function only updates
* registers in-place which are expected to either never change or be
* overwritten by el3_exit.
******************************************************************************/
#if IMAGE_BL31
void cm_manage_extensions_el3(void)
{
pmuv3_disable_el3();
}
#endif /* IMAGE_BL31 */
/*******************************************************************************
* Enable architecture extensions on first entry to Non-secure world.
******************************************************************************/
static void manage_extensions_nonsecure(cpu_context_t *ctx)
{
#if IMAGE_BL31
pmuv3_enable(ctx);
#endif /* IMAGE_BL31 */
}
@ -573,7 +569,39 @@ static void manage_extensions_nonsecure(cpu_context_t *ctx)
static void manage_extensions_nonsecure_el2_unused(void)
{
#if IMAGE_BL31
if (is_feat_spe_supported()) {
spe_init_el2_unused();
}
if (is_feat_amu_supported()) {
amu_init_el2_unused();
}
if (is_feat_mpam_supported()) {
mpam_init_el2_unused();
}
if (is_feat_trbe_supported()) {
trbe_init_el2_unused();
}
if (is_feat_sys_reg_trace_supported()) {
sys_reg_trace_init_el2_unused();
}
if (is_feat_trf_supported()) {
trf_init_el2_unused();
}
pmuv3_init_el2_unused();
if (is_feat_sve_supported()) {
sve_init_el2_unused();
}
if (is_feat_sme_supported()) {
sme_init_el2_unused();
}
#endif /* IMAGE_BL31 */
}
@ -606,6 +634,7 @@ static void manage_extensions_secure(cpu_context_t *ctx)
* Enable SME, SVE, FPU/SIMD in secure context, secure manager
* must ensure SME, SVE, and FPU/SIMD context properly managed.
*/
sme_init_el3();
sme_enable(ctx);
} else {
/*
@ -656,7 +685,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
{
u_register_t sctlr_elx, scr_el3, mdcr_el2;
cpu_context_t *ctx = cm_get_context(security_state);
bool el2_unused = false;
uint64_t hcr_el2 = 0U;
assert(ctx != NULL);
@ -694,8 +722,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
#endif
write_sctlr_el2(sctlr_elx);
} else if (el2_implemented != EL_IMPL_NONE) {
el2_unused = true;
/*
* EL2 present but unused, need to disable safely.
* SCTLR_EL2 can be ignored in this case.
@ -719,24 +745,8 @@ void cm_prepare_el3_exit(uint32_t security_state)
* Initialise CPTR_EL2 setting all fields rather than
* relying on the hw. All fields have architecturally
* UNKNOWN reset values.
*
* CPTR_EL2.TCPAC: Set to zero so that Non-secure EL1
* accesses to the CPACR_EL1 or CPACR from both
* Execution states do not trap to EL2.
*
* CPTR_EL2.TTA: Set to zero so that Non-secure System
* register accesses to the trace registers from both
* Execution states do not trap to EL2.
* If PE trace unit System registers are not implemented
* then this bit is reserved, and must be set to zero.
*
* CPTR_EL2.TFP: Set to zero so that Non-secure accesses
* to SIMD and floating-point functionality from both
* Execution states do not trap to EL2.
*/
write_cptr_el2(CPTR_EL2_RESET_VAL &
~(CPTR_EL2_TCPAC_BIT | CPTR_EL2_TTA_BIT
| CPTR_EL2_TFP_BIT));
write_cptr_el2(CPTR_EL2_RESET_VAL);
/*
* Initialise CNTHCTL_EL2. All fields are
@ -787,16 +797,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
* relying on hw. Some fields are architecturally
* UNKNOWN on reset.
*
* MDCR_EL2.TTRF: Set to zero so that access to Trace
* Filter Control register TRFCR_EL1 at EL1 is not
* trapped to EL2. This bit is RES0 in versions of
* the architecture earlier than ARMv8.4.
*
* MDCR_EL2.TPMS: Set to zero so that accesses to
* Statistical Profiling control registers from EL1
* do not trap to EL2. This bit is RES0 when SPE is
* not implemented.
*
* MDCR_EL2.TDRA: Set to zero so that Non-secure EL0 and
* EL1 System register accesses to the Debug ROM
* registers are not trapped to EL2.
@ -810,16 +810,10 @@ void cm_prepare_el3_exit(uint32_t security_state)
*
* MDCR_EL2.TDE: Set to zero so that debug exceptions
* are not routed to EL2.
*
* MDCR_EL2.E2TB: Set to zero so that the trace Buffer
* owning exception level is NS-EL1 and, tracing is
* prohibited at NS-EL2. These bits are RES0 when
* FEAT_TRBE is not implemented.
*/
mdcr_el2 = ((MDCR_EL2_RESET_VAL) & ~(MDCR_EL2_TTRF |
MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT |
MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT |
MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1)));
mdcr_el2 = ((MDCR_EL2_RESET_VAL) &
~(MDCR_EL2_TDRA_BIT | MDCR_EL2_TDOSA_BIT |
MDCR_EL2_TDA_BIT | MDCR_EL2_TDE_BIT));
write_mdcr_el2(mdcr_el2);
@ -844,7 +838,6 @@ void cm_prepare_el3_exit(uint32_t security_state)
manage_extensions_nonsecure_el2_unused();
}
manage_extensions_nonsecure_mixed(el2_unused, ctx);
}
cm_el1_sysregs_context_restore(security_state);
@ -1149,23 +1142,15 @@ void cm_el2_sysregs_context_restore(uint32_t security_state)
void cm_prepare_el3_exit_ns(void)
{
#if CTX_INCLUDE_EL2_REGS
#if ENABLE_ASSERTIONS
cpu_context_t *ctx = cm_get_context(NON_SECURE);
assert(ctx != NULL);
/* Assert that EL2 is used. */
#if ENABLE_ASSERTIONS
el3_state_t *state = get_el3state_ctx(ctx);
u_register_t scr_el3 = read_ctx_reg(state, CTX_SCR_EL3);
#endif
u_register_t scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
assert(((scr_el3 & SCR_HCE_BIT) != 0UL) &&
(el_implemented(2U) != EL_IMPL_NONE));
/*
* Currently some extensions are configured using
* direct register updates. Therefore, do this here
* instead of when setting up context.
*/
manage_extensions_nonsecure_mixed(0, ctx);
#endif /* ENABLE_ASSERTIONS */
/*
* Set the NS bit to be able to access the ICC_SRE_EL2

View file

@ -188,95 +188,66 @@ static __unused bool amu_group1_supported(void)
* Enable counters. This function is meant to be invoked by the context
* management library before exiting from EL3.
*/
void amu_enable(bool el2_unused, cpu_context_t *ctx)
void amu_enable(cpu_context_t *ctx)
{
uint64_t amcfgr_el0_ncg; /* Number of counter groups */
uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
uint64_t amcntenset0_el0_px = 0x0; /* Group 0 enable mask */
uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */
if (el2_unused) {
/*
* CPTR_EL2.TAM: Set to zero so any accesses to the Activity
* Monitor registers do not trap to EL2.
*/
write_cptr_el2_tam(0U);
}
/*
* Retrieve and update the CPTR_EL3 value from the context mentioned
* in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
* the Activity Monitor registers do not trap to EL3.
* Set CPTR_EL3.TAM to zero so that any accesses to the Activity Monitor
* registers do not trap to EL3.
*/
ctx_write_cptr_el3_tam(ctx, 0U);
/*
* Retrieve the number of architected counters. All of these counters
* are enabled by default.
*/
/* Initialize FEAT_AMUv1p1 features if present. */
if (is_feat_amuv1p1_supported()) {
/*
* Set SCR_EL3.AMVOFFEN to one so that accesses to virtual
* offset registers at EL2 do not trap to EL3
*/
ctx_write_scr_el3_amvoffen(ctx, 1U);
}
}
amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
amcntenset0_el0_px = (UINT64_C(1) << (amcgcr_el0_cg0nc)) - 1U;
void amu_init_el3(void)
{
uint64_t group0_impl_ctr = read_amcgcr_el0_cg0nc();
uint64_t group0_en_mask = (1 << (group0_impl_ctr)) - 1U;
uint64_t num_ctr_groups = read_amcfgr_el0_ncg();
assert(amcgcr_el0_cg0nc <= AMU_AMCGCR_CG0NC_MAX);
/*
* The platform may opt to enable specific auxiliary counters. This can
* be done via the common FCONF getter, or via the platform-implemented
* function.
*/
/* Enable all architected counters by default */
write_amcntenset0_el0_px(group0_en_mask);
#if ENABLE_AMU_AUXILIARY_COUNTERS
const struct amu_topology *topology;
if (num_ctr_groups > 0U) {
uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */
const struct amu_topology *topology;
/*
* The platform may opt to enable specific auxiliary counters.
* This can be done via the common FCONF getter, or via the
* platform-implemented function.
*/
#if ENABLE_AMU_FCONF
topology = FCONF_GET_PROPERTY(amu, config, topology);
topology = FCONF_GET_PROPERTY(amu, config, topology);
#else
topology = plat_amu_topology();
topology = plat_amu_topology();
#endif /* ENABLE_AMU_FCONF */
if (topology != NULL) {
unsigned int core_pos = plat_my_core_pos();
if (topology != NULL) {
unsigned int core_pos = plat_my_core_pos();
amcntenset1_el0_px = topology->cores[core_pos].enable;
} else {
ERROR("AMU: failed to generate AMU topology\n");
amcntenset1_el0_px = topology->cores[core_pos].enable;
} else {
ERROR("AMU: failed to generate AMU topology\n");
}
write_amcntenset1_el0_px(amcntenset1_el0_px);
}
#else /* ENABLE_AMU_AUXILIARY_COUNTERS */
if (num_ctr_groups > 0U) {
VERBOSE("AMU: auxiliary counters detected but support is disabled\n");
}
#endif /* ENABLE_AMU_AUXILIARY_COUNTERS */
/*
* Enable the requested counters.
*/
write_amcntenset0_el0_px(amcntenset0_el0_px);
amcfgr_el0_ncg = read_amcfgr_el0_ncg();
if (amcfgr_el0_ncg > 0U) {
write_amcntenset1_el0_px(amcntenset1_el0_px);
#if !ENABLE_AMU_AUXILIARY_COUNTERS
VERBOSE("AMU: auxiliary counters detected but support is disabled\n");
#endif
}
/* Initialize FEAT_AMUv1p1 features if present. */
if (is_feat_amuv1p1_supported()) {
if (el2_unused) {
/*
* Make sure virtual offsets are disabled if EL2 not
* used.
*/
write_hcr_el2_amvoffen(0U);
} else {
/*
* Virtual offset registers are only accessible from EL3
* and EL2, when clear, this bit traps accesses from EL2
* so we set it to 1 when EL2 is present.
*/
ctx_write_scr_el3_amvoffen(ctx, 1U);
}
#if AMU_RESTRICT_COUNTERS
/*
* FEAT_AMUv1p1 adds a register field to restrict access to
@ -297,6 +268,21 @@ void amu_enable(bool el2_unused, cpu_context_t *ctx)
#endif
}
void amu_init_el2_unused(void)
{
/*
* CPTR_EL2.TAM: Set to zero so any accesses to the Activity Monitor
* registers do not trap to EL2.
*/
write_cptr_el2_tam(0U);
/* Initialize FEAT_AMUv1p1 features if present. */
if (is_feat_amuv1p1_supported()) {
/* Make sure virtual offsets are disabled if EL2 not used. */
write_hcr_el2_amvoffen(0U);
}
}
/* Read the group 0 counter identified by the given `idx`. */
static uint64_t amu_group0_cnt_read(unsigned int idx)
{
@ -526,10 +512,10 @@ static void *amu_context_restore(const void *arg)
uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */
uint64_t amcfgr_el0_ncg; /* Number of counter groups */
uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
#if ENABLE_AMU_AUXILIARY_COUNTERS
uint64_t amcfgr_el0_ncg; /* Number of counter groups */
uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */
uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */
#endif
@ -541,7 +527,6 @@ static void *amu_context_restore(const void *arg)
core_pos = plat_my_core_pos();
ctx = &amu_ctxs_[core_pos];
amcfgr_el0_ncg = read_amcfgr_el0_ncg();
amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
if (is_feat_amuv1p1_supported()) {
@ -549,21 +534,11 @@ static void *amu_context_restore(const void *arg)
}
#if ENABLE_AMU_AUXILIARY_COUNTERS
amcfgr_el0_ncg = read_amcfgr_el0_ncg();
amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
amcg1idr_el0_voff = (hcr_el2_amvoffen != 0U) ? read_amcg1idr_el0_voff() : 0U;
#endif
/*
* Sanity check that all counters were disabled when the context was
* previously saved.
*/
assert(read_amcntenset0_el0_px() == 0U);
if (amcfgr_el0_ncg > 0U) {
assert(read_amcntenset1_el0_px() == 0U);
}
/*
* Restore the counter values from the local context.
*/

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Arm Limited. All rights reserved.
* Copyright (c) 2022-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -7,8 +7,9 @@
#include <arch.h>
#include <arch_features.h>
#include <arch_helpers.h>
#include <lib/extensions/brbe.h>
void brbe_enable(void)
void brbe_init_el3(void)
{
uint64_t val;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018-2022, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2018-2023, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -11,7 +11,7 @@
#include <arch_helpers.h>
#include <lib/extensions/mpam.h>
void mpam_enable(bool el2_unused)
void mpam_init_el3(void)
{
/*
* Enable MPAM, and disable trapping to EL3 when lower ELs access their
@ -19,15 +19,18 @@ void mpam_enable(bool el2_unused)
*/
write_mpam3_el3(MPAM3_EL3_MPAMEN_BIT);
/*
* If EL2 is implemented but unused, disable trapping to EL2 when lower
* ELs access their own MPAM registers.
*/
if (el2_unused) {
write_mpam2_el2(0ULL);
if ((read_mpamidr_el1() & MPAMIDR_HAS_HCR_BIT) != 0U) {
write_mpamhcr_el2(0ULL);
}
}
}
/*
* If EL2 is implemented but unused, disable trapping to EL2 when lower ELs
* access their own MPAM registers.
*/
void mpam_init_el2_unused(void)
{
write_mpam2_el2(0ULL);
if ((read_mpamidr_el1() & MPAMIDR_HAS_HCR_BIT) != 0U) {
write_mpamhcr_el2(0ULL);
}
}

View file

@ -29,7 +29,7 @@ static u_register_t mtpmu_disable_el3(u_register_t sdcr)
* Applies to all PMU versions. Name is PMUv3 for compatibility with aarch64 and
* to not clash with platforms which reuse the PMU name
*/
void pmuv3_disable_el3(void)
void pmuv3_init_el3(void)
{
u_register_t sdcr = read_sdcr();

View file

@ -48,7 +48,7 @@ static u_register_t mtpmu_disable_el3(u_register_t mdcr_el3)
return mdcr_el3;
}
void pmuv3_disable_el3(void)
void pmuv3_init_el3(void)
{
u_register_t mdcr_el3 = read_mdcr_el3();

View file

@ -17,7 +17,6 @@
void sme_enable(cpu_context_t *context)
{
u_register_t reg;
u_register_t cptr_el3;
el3_state_t *state;
/* Get the context state. */
@ -32,9 +31,14 @@ void sme_enable(cpu_context_t *context)
reg = read_ctx_reg(state, CTX_SCR_EL3);
reg |= SCR_ENTP2_BIT;
write_ctx_reg(state, CTX_SCR_EL3, reg);
}
/* Set CPTR_EL3.ESM bit so we can write SMCR_EL3 without trapping. */
cptr_el3 = read_cptr_el3();
void sme_init_el3(void)
{
u_register_t cptr_el3 = read_cptr_el3();
u_register_t smcr_el3;
/* Set CPTR_EL3.ESM bit so we can access SMCR_EL3 without trapping. */
write_cptr_el3(cptr_el3 | ESM_BIT);
isb();
@ -43,11 +47,10 @@ void sme_enable(cpu_context_t *context)
* to be the least restrictive, then lower ELs can restrict as needed
* using SMCR_EL2 and SMCR_EL1.
*/
reg = SMCR_ELX_LEN_MAX;
smcr_el3 = SMCR_ELX_LEN_MAX;
if (read_feat_sme_fa64_id_field() != 0U) {
VERBOSE("[SME] FA64 enabled\n");
reg |= SMCR_ELX_FA64_BIT;
smcr_el3 |= SMCR_ELX_FA64_BIT;
}
/*
@ -58,15 +61,24 @@ void sme_enable(cpu_context_t *context)
*/
if (is_feat_sme2_supported()) {
VERBOSE("SME2 enabled\n");
reg |= SMCR_ELX_EZT0_BIT;
smcr_el3 |= SMCR_ELX_EZT0_BIT;
}
write_smcr_el3(reg);
write_smcr_el3(smcr_el3);
/* Reset CPTR_EL3 value. */
write_cptr_el3(cptr_el3);
isb();
}
void sme_init_el2_unused(void)
{
/*
* CPTR_EL2.TCPAC: Set to zero so that Non-secure EL1 accesses to the
* CPACR_EL1 or CPACR from both Execution states do not trap to EL2.
*/
write_cptr_el2(read_cptr_el2() & ~CPTR_EL2_TCPAC_BIT);
}
void sme_disable(cpu_context_t *context)
{
u_register_t reg;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017-2022, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2017-2023, Arm Limited and Contributors. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -21,25 +21,10 @@ static inline void psb_csync(void)
__asm__ volatile("hint #17");
}
void spe_enable(bool el2_unused)
void spe_init_el3(void)
{
uint64_t v;
if (el2_unused) {
/*
* MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
* profiling controls to EL2.
*
* MDCR_EL2.E2PB (ARM v8.2): SPE enabled in Non-secure
* state. Accesses to profiling buffer controls at
* Non-secure EL1 are not trapped to EL2.
*/
v = read_mdcr_el2();
v &= ~MDCR_EL2_TPMS;
v |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
write_mdcr_el2(v);
}
/*
* MDCR_EL2.NSPB (ARM v8.2): SPE enabled in Non-secure state
* and disabled in secure state. Accesses to SPE registers at
@ -55,6 +40,24 @@ void spe_enable(bool el2_unused)
write_mdcr_el3(v);
}
void spe_init_el2_unused(void)
{
uint64_t v;
/*
* MDCR_EL2.TPMS (ARM v8.2): Do not trap statistical
* profiling controls to EL2.
*
* MDCR_EL2.E2PB (ARM v8.2): SPE enabled in Non-secure
* state. Accesses to profiling buffer controls at
* Non-secure EL1 are not trapped to EL2.
*/
v = read_mdcr_el2();
v &= ~MDCR_EL2_TPMS;
v |= MDCR_EL2_E2PB(MDCR_EL2_E2PB_EL1);
write_mdcr_el2(v);
}
void spe_disable(void)
{
uint64_t v;

View file

@ -37,6 +37,16 @@ void sve_enable(cpu_context_t *context)
(ZCR_EL3_LEN_MASK & CONVERT_SVE_LENGTH(SVE_VECTOR_LEN)));
}
void sve_init_el2_unused(void)
{
/*
* CPTR_EL2.TFP: Set to zero so that Non-secure accesses to Advanced
* SIMD and floating-point functionality from both Execution states do
* not trap to EL2.
*/
write_cptr_el2(read_cptr_el2() & ~CPTR_EL2_TFP_BIT);
}
void sve_disable(cpu_context_t *context)
{
u_register_t reg;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Arm Limited. All rights reserved.
* Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,7 +10,7 @@
#include <arch_helpers.h>
#include <lib/extensions/sys_reg_trace.h>
void sys_reg_trace_enable(void)
void sys_reg_trace_init_el3(void)
{
uint32_t val;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Arm Limited. All rights reserved.
* Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -24,3 +24,14 @@ void sys_reg_trace_enable(cpu_context_t *ctx)
val &= ~TTA_BIT;
write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, val);
}
void sys_reg_trace_init_el2_unused(void)
{
/*
* CPTR_EL2.TTA: Set to zero so that Non-secure System register accesses
* to the trace registers from both Execution states do not trap to
* EL2. If PE trace unit System registers are not implemented then this
* bit is reserved, and must be set to zero.
*/
write_cptr_el2(read_cptr_el2() & ~CPTR_EL2_TTA_BIT);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021-2022, Arm Limited. All rights reserved.
* Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -19,9 +19,9 @@ static void tsb_csync(void)
__asm__ volatile("hint #18");
}
void trbe_enable(void)
void trbe_init_el3(void)
{
uint64_t val;
u_register_t val;
/*
* MDCR_EL3.NSTB = 0b11
@ -34,6 +34,17 @@ void trbe_enable(void)
write_mdcr_el3(val);
}
void trbe_init_el2_unused(void)
{
/*
* MDCR_EL2.E2TB: Set to zero so that the trace Buffer
* owning exception level is NS-EL1 and, tracing is
* prohibited at NS-EL2. These bits are RES0 when
* FEAT_TRBE is not implemented.
*/
write_mdcr_el2(read_mdcr_el2() & ~MDCR_EL2_E2TB(MDCR_EL2_E2TB_EL1));
}
static void *trbe_drain_trace_buffers_hook(const void *arg __unused)
{
if (is_feat_trbe_supported()) {

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Arm Limited. All rights reserved.
* Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -10,7 +10,7 @@
#include <arch_helpers.h>
#include <lib/extensions/trf.h>
void trf_enable(void)
void trf_init_el3(void)
{
uint32_t val;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2021, Arm Limited. All rights reserved.
* Copyright (c) 2021-2023, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -9,9 +9,9 @@
#include <arch_helpers.h>
#include <lib/extensions/trf.h>
void trf_enable(void)
void trf_init_el3(void)
{
uint64_t val;
u_register_t val;
/*
* MDCR_EL3.TTRF = b0
@ -22,3 +22,15 @@ void trf_enable(void)
val &= ~MDCR_TTRF_BIT;
write_mdcr_el3(val);
}
void trf_init_el2_unused(void)
{
/*
* MDCR_EL2.TTRF: Set to zero so that access to Trace
* Filter Control register TRFCR_EL1 at EL1 is not
* trapped to EL2. This bit is RES0 in versions of
* the architecture earlier than ARMv8.4.
*
*/
write_mdcr_el2(read_mdcr_el2() & ~MDCR_EL2_TTRF);
}