refactor(pmu): convert FEAT_MTPMU to C and move to persistent register init

The FEAT_MTPMU feature disable runs very early after reset. This means,
it needs to be written in assembly, since the C runtime has not been
initialised yet.

However, there is no need for it to be initialised so soon. The PMU
state is only relevant after TF-A has relinquished control. The code
to do this is also very verbose and difficult to read. Delaying the
initialisation allows for it to happen with the rest of the PMU. Align
with FEAT_STATE in the process.

BREAKING CHANGE: This patch explicitly breaks the EL2 entry path. It is
currently unsupported.

Signed-off-by: Boyan Karatotev <boyan.karatotev@arm.com>
Change-Id: I2aa659d026fbdb75152469f6d19812ece3488c6f
This commit is contained in:
Boyan Karatotev 2023-02-16 09:45:29 +00:00 committed by Jayanth Dodderi Chidanand
parent c73686a11c
commit 83a4dae1af
19 changed files with 106 additions and 234 deletions

View file

@ -1148,7 +1148,6 @@ $(eval $(call assert_booleans,\
CTX_INCLUDE_FPREGS \
CTX_INCLUDE_EL2_REGS \
DEBUG \
DISABLE_MTPMU \
DYN_DISABLE_AUTH \
EL3_EXCEPTION_HANDLING \
ENABLE_AMU_AUXILIARY_COUNTERS \
@ -1226,6 +1225,7 @@ $(eval $(call assert_numerics,\
CTX_INCLUDE_MTE_REGS \
CTX_INCLUDE_NEVE_REGS \
CRYPTO_SUPPORT \
DISABLE_MTPMU \
ENABLE_BRBE_FOR_NS \
ENABLE_TRBE_FOR_NS \
ENABLE_BTI \

View file

@ -16,10 +16,6 @@ BL1_SOURCES += bl1/${ARCH}/bl1_arch_setup.c \
plat/common/${ARCH}/platform_up_stack.S \
${MBEDTLS_SOURCES}
ifeq (${DISABLE_MTPMU},1)
BL1_SOURCES += lib/extensions/mtpmu/${ARCH}/mtpmu.S
endif
ifeq (${ARCH},aarch64)
BL1_SOURCES += lib/cpus/aarch64/dsu_helpers.S \
lib/el3_runtime/aarch64/context.S

View file

@ -43,10 +43,6 @@ BL2_SOURCES += bl2/${ARCH}/bl2_el3_entrypoint.S \
bl2/${ARCH}/bl2_run_next_image.S \
lib/cpus/${ARCH}/cpu_helpers.S
ifeq (${DISABLE_MTPMU},1)
BL2_SOURCES += lib/extensions/mtpmu/${ARCH}/mtpmu.S
endif
ifeq (${ARCH},aarch64)
BL2_SOURCES += lib/cpus/aarch64/dsu_helpers.S
endif

View file

@ -54,10 +54,6 @@ BL31_SOURCES += bl31/bl31_main.c \
${SPMC_SOURCES} \
${SPM_SOURCES}
ifeq (${DISABLE_MTPMU},1)
BL31_SOURCES += lib/extensions/mtpmu/aarch64/mtpmu.S
endif
ifeq (${ENABLE_PMF}, 1)
BL31_SOURCES += lib/pmf/pmf_main.c
endif

View file

@ -20,10 +20,6 @@ BL32_SOURCES += bl32/sp_min/sp_min_main.c \
services/std_svc/std_svc_setup.c \
${PSCI_LIB_SOURCES}
ifeq (${DISABLE_MTPMU},1)
BL32_SOURCES += lib/extensions/mtpmu/aarch32/mtpmu.S
endif
ifeq (${ENABLE_PMF}, 1)
BL32_SOURCES += lib/pmf/pmf_main.c
endif

View file

@ -192,6 +192,13 @@ void detect_arch_features(void)
check_feature(ENABLE_FEAT_TWED, read_feat_twed_id_field(),
"TWED", 1, 1);
/*
* even though this is a "DISABLE" it does confusingly perform feature
* enablement duties like all other flags here. Check it against the HW
* feature when we intend to diverge from the default behaviour
*/
check_feature(DISABLE_MTPMU, read_feat_mtpmu_id_field(), "MTPMU", 1, 1);
/* v8.7 features */
check_feature(ENABLE_FEAT_HCX, read_feat_hcx_id_field(), "HCX", 1, 1);

View file

@ -207,10 +207,10 @@ Common build options
of the binary image. If set to 1, then only the ELF image is built.
0 is the default.
- ``DISABLE_MTPMU``: Boolean option to disable FEAT_MTPMU if implemented
(Armv8.6 onwards). Its default value is 0 to keep consistency with platforms
that do not implement FEAT_MTPMU. For more information on FEAT_MTPMU,
check the latest Arm ARM.
- ``DISABLE_MTPMU``: Numeric option to disable ``FEAT_MTPMU`` (Multi Threaded
PMU). ``FEAT_MTPMU`` is an optional feature available on Armv8.6 onwards.
This flag can take values 0 to 2, to align with the ``FEATURE_DETECTION``
mechanism. Default is ``0``.
- ``DYN_DISABLE_AUTH``: Provides the capability to dynamically disable Trusted
Board Boot authentication at runtime. This option is meant to be enabled only

View file

@ -122,6 +122,7 @@
#define ID_DFR1_MTPMU_SHIFT U(0)
#define ID_DFR1_MTPMU_MASK U(0xf)
#define ID_DFR1_MTPMU_SUPPORTED U(1)
#define ID_DFR1_MTPMU_DISABLED U(15)
/* ID_MMFR3 definitions */
#define ID_MMFR3_PAN_SHIFT U(16)

View file

@ -167,4 +167,24 @@ static inline unsigned int read_feat_pmuv3_id_field(void)
return ISOLATE_FIELD(read_id_dfr0(), ID_DFR0_PERFMON);
}
static inline unsigned int read_feat_mtpmu_id_field(void)
{
return ISOLATE_FIELD(read_id_dfr1(), ID_DFR1_MTPMU);
}
static inline bool is_feat_mtpmu_supported(void)
{
if (DISABLE_MTPMU == FEAT_STATE_DISABLED) {
return false;
}
if (DISABLE_MTPMU == FEAT_STATE_ALWAYS) {
return true;
}
unsigned int mtpmu = read_feat_mtpmu_id_field();
return mtpmu != 0U && mtpmu != ID_DFR1_MTPMU_DISABLED;
}
#endif /* ARCH_FEATURES_H */

View file

@ -221,6 +221,7 @@ DEFINE_COPROCR_READ_FUNC(midr, MIDR)
DEFINE_COPROCR_READ_FUNC(id_mmfr3, ID_MMFR3)
DEFINE_COPROCR_READ_FUNC(id_mmfr4, ID_MMFR4)
DEFINE_COPROCR_READ_FUNC(id_dfr0, ID_DFR0)
DEFINE_COPROCR_READ_FUNC(id_dfr1, ID_DFR1)
DEFINE_COPROCR_READ_FUNC(id_pfr0, ID_PFR0)
DEFINE_COPROCR_READ_FUNC(id_pfr1, ID_PFR1)
DEFINE_COPROCR_READ_FUNC(isr, ISR)

View file

@ -277,10 +277,6 @@
cps #MODE32_mon
isb
#if DISABLE_MTPMU
bl mtpmu_disable
#endif
.if \_warm_boot_mailbox
/* -------------------------------------------------------------
* This code will be executed for both warm and cold resets.

View file

@ -243,6 +243,7 @@
#define ID_AA64DFR0_MTPMU_SHIFT U(48)
#define ID_AA64DFR0_MTPMU_MASK ULL(0xf)
#define ID_AA64DFR0_MTPMU_SUPPORTED ULL(1)
#define ID_AA64DFR0_MTPMU_DISABLED ULL(15)
/* ID_AA64DFR0_EL1.BRBE definitions */
#define ID_AA64DFR0_BRBE_SHIFT U(52)

View file

@ -705,4 +705,24 @@ static inline unsigned int read_feat_pmuv3_id_field(void)
return ISOLATE_FIELD(read_id_aa64dfr0_el1(), ID_AA64DFR0_PMUVER);
}
static inline unsigned int read_feat_mtpmu_id_field(void)
{
return ISOLATE_FIELD(read_id_aa64dfr0_el1(), ID_AA64DFR0_MTPMU);
}
static inline bool is_feat_mtpmu_supported(void)
{
if (DISABLE_MTPMU == FEAT_STATE_DISABLED) {
return false;
}
if (DISABLE_MTPMU == FEAT_STATE_ALWAYS) {
return true;
}
unsigned int mtpmu = read_feat_mtpmu_id_field();
return (mtpmu != 0U) && (mtpmu != ID_AA64DFR0_MTPMU_DISABLED);
}
#endif /* ARCH_FEATURES_H */

View file

@ -244,10 +244,6 @@
isb
.endif /* _init_sctlr */
#if DISABLE_MTPMU
bl mtpmu_disable
#endif
.if \_warm_boot_mailbox
/* -------------------------------------------------------------
* This code will be executed for both warm and cold resets.

View file

@ -293,10 +293,6 @@
isb
.endif /* _init_sctlr */
#if DISABLE_MTPMU
bl mtpmu_disable
#endif
.if \_warm_boot_mailbox
/* -------------------------------------------------------------
* This code will be executed for both warm and cold resets.

View file

@ -1,105 +0,0 @@
/*
* Copyright (c) 2020, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
.global mtpmu_disable
/* -------------------------------------------------------------
* The functions in this file are called at entrypoint, before
* the CPU has decided whether this is a cold or a warm boot.
* Therefore there are no stack yet to rely on for a C function
* call.
* -------------------------------------------------------------
*/
/*
* bool mtpmu_supported(void)
*
* Return a boolean indicating whether FEAT_MTPMU is supported or not.
*
* Trash registers: r0.
*/
func mtpmu_supported
ldcopr r0, ID_DFR1
and r0, r0, #(ID_DFR1_MTPMU_MASK >> ID_DFR1_MTPMU_SHIFT)
cmp r0, #ID_DFR1_MTPMU_SUPPORTED
mov r0, #0
addeq r0, r0, #1
bx lr
endfunc mtpmu_supported
/*
* bool el_implemented(unsigned int el)
*
* Return a boolean indicating if the specified EL (2 or 3) is implemented.
*
* Trash registers: r0
*/
func el_implemented
cmp r0, #3
ldcopr r0, ID_PFR1
lsreq r0, r0, #ID_PFR1_SEC_SHIFT
lsrne r0, r0, #ID_PFR1_VIRTEXT_SHIFT
/*
* ID_PFR1_VIRTEXT_MASK is the same as ID_PFR1_SEC_MASK
* so use any one of them
*/
and r0, r0, #ID_PFR1_VIRTEXT_MASK
cmp r0, #ID_PFR1_ELx_ENABLED
mov r0, #0
addeq r0, r0, #1
bx lr
endfunc el_implemented
/*
* void mtpmu_disable(void)
*
* Disable mtpmu feature if supported.
*
* Trash register: r0, r1, r2
*/
func mtpmu_disable
mov r2, lr
bl mtpmu_supported
cmp r0, #0
bxeq r2 /* FEAT_MTPMU not supported */
/* FEAT_MTMPU Supported */
mov r0, #3
bl el_implemented
cmp r0, #0
beq 1f
/* EL3 implemented */
ldcopr r0, SDCR
ldr r1, =SDCR_MTPME_BIT
bic r0, r0, r1
stcopr r0, SDCR
/*
* If EL3 is implemented, HDCR.MTPME is implemented as Res0 and
* FEAT_MTPMU is controlled only from EL3, so no need to perform
* any operations for EL2.
*/
isb
bx r2
1:
/* EL3 not implemented */
mov r0, #2
bl el_implemented
cmp r0, #0
bxeq r2 /* No EL2 or EL3 implemented */
/* EL2 implemented */
ldcopr r0, HDCR
ldr r1, =HDCR_MTPME_BIT
orr r0, r0, r1
stcopr r0, HDCR
isb
bx r2
endfunc mtpmu_disable

View file

@ -1,96 +0,0 @@
/*
* Copyright (c) 2020, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <arch.h>
#include <asm_macros.S>
.global mtpmu_disable
/* -------------------------------------------------------------
* The functions in this file are called at entrypoint, before
* the CPU has decided whether this is a cold or a warm boot.
* Therefore there are no stack yet to rely on for a C function
* call.
* -------------------------------------------------------------
*/
/*
* bool mtpmu_supported(void)
*
* Return a boolean indicating whether FEAT_MTPMU is supported or not.
*
* Trash registers: x0, x1
*/
func mtpmu_supported
mrs x0, id_aa64dfr0_el1
mov_imm x1, ID_AA64DFR0_MTPMU_MASK
and x0, x1, x0, LSR #ID_AA64DFR0_MTPMU_SHIFT
cmp x0, ID_AA64DFR0_MTPMU_SUPPORTED
cset x0, eq
ret
endfunc mtpmu_supported
/*
* bool el_implemented(unsigned int el_shift)
*
* Return a boolean indicating if the specified EL is implemented.
* The EL is represented as the bitmask shift on id_aa64pfr0_el1 register.
*
* Trash registers: x0, x1
*/
func el_implemented
mrs x1, id_aa64pfr0_el1
lsr x1, x1, x0
cmp x1, #ID_AA64PFR0_ELX_MASK
cset x0, eq
ret
endfunc el_implemented
/*
* void mtpmu_disable(void)
*
* Disable mtpmu feature if supported.
*
* Trash register: x0, x1, x30
*/
func mtpmu_disable
mov x10, x30
bl mtpmu_supported
cbz x0, exit_disable
/* FEAT_MTMPU Supported */
mov_imm x0, ID_AA64PFR0_EL3_SHIFT
bl el_implemented
cbz x0, 1f
/* EL3 implemented */
mrs x0, mdcr_el3
mov_imm x1, MDCR_MTPME_BIT
bic x0, x0, x1
msr mdcr_el3, x0
/*
* If EL3 is implemented, MDCR_EL2.MTPME is implemented as Res0 and
* FEAT_MTPMU is controlled only from EL3, so no need to perform
* any operations for EL2.
*/
isb
exit_disable:
ret x10
1:
/* EL3 not implemented */
mov_imm x0, ID_AA64PFR0_EL2_SHIFT
bl el_implemented
cbz x0, exit_disable
/* EL2 implemented */
mrs x0, mdcr_el2
mov_imm x1, MDCR_EL2_MTPME
bic x0, x0, x1
msr mdcr_el2, x0
isb
ret x10
endfunc mtpmu_disable

View file

@ -9,6 +9,22 @@
#include <arch_helpers.h>
#include <lib/extensions/pmuv3.h>
static u_register_t mtpmu_disable_el3(u_register_t sdcr)
{
if (!is_feat_mtpmu_supported()) {
return sdcr;
}
/*
* SDCR.MTPME = 0
* FEAT_MTPMU is disabled. The Effective value of PMEVTYPER<n>.MT is
* zero.
*/
sdcr &= ~SDCR_MTPME_BIT;
return sdcr;
}
/*
* Applies to all PMU versions. Name is PMUv3 for compatibility with aarch64 and
* to not clash with platforms which reuse the PMU name
@ -32,6 +48,7 @@ void pmuv3_disable_el3(void)
* ---------------------------------------------------------------------
*/
sdcr = (sdcr | SDCR_SCCD_BIT) & ~SDCR_SPME_BIT;
sdcr = mtpmu_disable_el3(sdcr);
write_sdcr(sdcr);
/* ---------------------------------------------------------------------

View file

@ -32,6 +32,22 @@ void pmuv3_enable(cpu_context_t *ctx)
#endif /* CTX_INCLUDE_EL2_REGS */
}
static u_register_t mtpmu_disable_el3(u_register_t mdcr_el3)
{
if (!is_feat_mtpmu_supported()) {
return mdcr_el3;
}
/*
* MDCR_EL3.MTPME = 0
* FEAT_MTPMU is disabled. The Effective value of PMEVTYPER<n>_EL0.MT is
* zero.
*/
mdcr_el3 &= ~MDCR_MTPME_BIT;
return mdcr_el3;
}
void pmuv3_disable_el3(void)
{
u_register_t mdcr_el3 = read_mdcr_el3();
@ -69,6 +85,7 @@ void pmuv3_disable_el3(void)
*/
mdcr_el3 = (mdcr_el3 | MDCR_SCCD_BIT | MDCR_MCCD_BIT) &
~(MDCR_MPMX_BIT | MDCR_SPME_BIT);
mdcr_el3 = mtpmu_disable_el3(mdcr_el3);
write_mdcr_el3(mdcr_el3);
/* ---------------------------------------------------------------------
@ -94,6 +111,22 @@ void pmuv3_disable_el3(void)
PMCR_EL0_P_BIT) & ~(PMCR_EL0_X_BIT | PMCR_EL0_E_BIT));
}
static u_register_t mtpmu_disable_el2(u_register_t mdcr_el2)
{
if (!is_feat_mtpmu_supported()) {
return mdcr_el2;
}
/*
* MDCR_EL2.MTPME = 0
* FEAT_MTPMU is disabled. The Effective value of PMEVTYPER<n>_EL0.MT is
* zero.
*/
mdcr_el2 &= ~MDCR_EL2_MTPME;
return mdcr_el2;
}
void pmuv3_init_el2_unused(void)
{
u_register_t mdcr_el2 = read_mdcr_el2();
@ -132,5 +165,6 @@ void pmuv3_init_el2_unused(void)
MDCR_EL2_HCCD_BIT) &
~(MDCR_EL2_HPME_BIT | MDCR_EL2_TPM_BIT | MDCR_EL2_TPMCR_BIT);
mdcr_el2 = init_mdcr_el2_hpmn(mdcr_el2);
mdcr_el2 = mtpmu_disable_el2(mdcr_el2);
write_mdcr_el2(mdcr_el2);
}