refactor(mpam): enable FEAT_MPAM for FEAT_STATE_CHECKED

At the moment we only support FEAT_MPAM to be either unconditionally
compiled in, or to be not supported at all.

Add support for runtime detection (ENABLE_MPAM_FOR_LOWER_ELS=2), by
splitting get_mpam_version() into an ID register reading
function and a second function to report the support status. That
function considers both build time settings and runtime information (if
needed), and is used before we access MPAM related registers.
Also move the context saving code from assembly to C, and use the new
is_feat_mpam_supported() function to guard its execution.

ENABLE_MPAM_FOR_LOWER_ELS defaults to 0, so add a stub enable function
to cover builds with compiler optimisations turned off. The unused
mpam_enable() function call will normally be optimised away (because it
would never be called), but with -O0 the compiler will leave the symbol
in the object file.

Change-Id: I531d87cb855a7c43471f861f625b5a6d4bc61313
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
This commit is contained in:
Andre Przywara 2022-11-17 16:42:09 +00:00
parent 023f1bed1d
commit 9448f2b88e
11 changed files with 142 additions and 248 deletions

View file

@ -108,7 +108,7 @@ BL31_SOURCES += lib/extensions/sve/sve.c
endif
endif
ifeq (${ENABLE_MPAM_FOR_LOWER_ELS},1)
ifneq (${ENABLE_MPAM_FOR_LOWER_ELS},0)
BL31_SOURCES += lib/extensions/mpam/mpam.c
endif

View file

@ -130,16 +130,6 @@ static void read_feat_dit(void)
#endif
}
/****************************************************************************
* Feature : FEAT_MPAM (Memory Partitioning and Monitoring (MPAM) Extension)
***************************************************************************/
static void read_feat_mpam(void)
{
#if (ENABLE_MPAM_FOR_LOWER_ELS == FEAT_STATE_ALWAYS)
feat_detect_panic(get_mpam_version() != 0U, "MPAM");
#endif
}
/**************************************************************
* Feature : FEAT_NV2 (Enhanced Nested Virtualization Support)
*************************************************************/
@ -293,7 +283,8 @@ void detect_arch_features(void)
read_feat_dit();
check_feature(ENABLE_FEAT_AMUv1, read_feat_amu_id_field(),
"AMUv1", 1, 2);
read_feat_mpam();
check_feature(ENABLE_MPAM_FOR_LOWER_ELS, read_feat_mpam_version(),
"MPAM", 1, 1);
read_feat_nv2();
read_feat_sel2();
check_feature(ENABLE_TRF_FOR_NS, read_feat_trf_id_field(),

View file

@ -1077,10 +1077,8 @@
#define MPAMHCR_EL2 S3_4_C10_C4_0
#define MPAM3_EL3 S3_6_C10_C5_0
#define MPAMIDR_EL1_HAS_HCR_SHIFT ULL(0x11)
#define MPAMIDR_EL1_VPMR_MAX_SHIFT ULL(0x12)
#define MPAMIDR_EL1_VPMR_MAX_WIDTH ULL(0x3)
#define MPAMIDR_EL1_VPMR_MAX_POSSIBLE ULL(0x7)
#define MPAMIDR_EL1_VPMR_MAX_SHIFT ULL(18)
#define MPAMIDR_EL1_VPMR_MAX_MASK ULL(0x7)
/*******************************************************************************
* Definitions for system register interface to AMU for FEAT_AMUv1
******************************************************************************/

View file

@ -184,7 +184,7 @@ static inline bool is_armv8_6_feat_amuv1p1_present(void)
* 0x11: v1.1 Armv8.4 or later
*
*/
static inline unsigned int get_mpam_version(void)
static inline unsigned int read_feat_mpam_version(void)
{
return (unsigned int)((((read_id_aa64pfr0_el1() >>
ID_AA64PFR0_MPAM_SHIFT) & ID_AA64PFR0_MPAM_MASK) << 4) |
@ -192,6 +192,19 @@ static inline unsigned int get_mpam_version(void)
ID_AA64PFR1_MPAM_FRAC_SHIFT) & ID_AA64PFR1_MPAM_FRAC_MASK));
}
static inline bool is_feat_mpam_supported(void)
{
if (ENABLE_MPAM_FOR_LOWER_ELS == FEAT_STATE_DISABLED) {
return false;
}
if (ENABLE_MPAM_FOR_LOWER_ELS == FEAT_STATE_ALWAYS) {
return true;
}
return read_feat_mpam_version() != 0U;
}
static inline unsigned int read_feat_hcx_id_field(void)
{
return ISOLATE_FIELD(read_id_aa64mmfr1_el1(), ID_AA64MMFR1_EL1_HCX);

View file

@ -522,11 +522,6 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset0_el0, AMCNTENSET0_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenclr1_el0, AMCNTENCLR1_EL0)
DEFINE_RENAME_SYSREG_RW_FUNCS(amcntenset1_el0, AMCNTENSET1_EL0)
DEFINE_RENAME_SYSREG_READ_FUNC(mpamidr_el1, MPAMIDR_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpam3_el3, MPAM3_EL3)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpam2_el2, MPAM2_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamhcr_el2, MPAMHCR_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(pmblimitr_el1, PMBLIMITR_EL1)
DEFINE_RENAME_SYSREG_WRITE_FUNC(zcr_el3, ZCR_EL3)
@ -545,9 +540,24 @@ DEFINE_RENAME_SYSREG_READ_FUNC(erxaddr_el1, ERXADDR_EL1)
DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc0_el1, ERXMISC0_EL1)
DEFINE_RENAME_SYSREG_READ_FUNC(erxmisc1_el1, ERXMISC1_EL1)
/* Armv8.2 Registers */
/* Armv8.2 ID Registers */
DEFINE_RENAME_IDREG_READ_FUNC(id_aa64mmfr2_el1, ID_AA64MMFR2_EL1)
/* Armv8.2 MPAM Registers */
DEFINE_RENAME_SYSREG_READ_FUNC(mpamidr_el1, MPAMIDR_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpam3_el3, MPAM3_EL3)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpam2_el2, MPAM2_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamhcr_el2, MPAMHCR_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm0_el2, MPAMVPM0_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm1_el2, MPAMVPM1_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm2_el2, MPAMVPM2_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm3_el2, MPAMVPM3_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm4_el2, MPAMVPM4_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm5_el2, MPAMVPM5_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm6_el2, MPAMVPM6_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpm7_el2, MPAMVPM7_EL2)
DEFINE_RENAME_SYSREG_RW_FUNCS(mpamvpmv_el2, MPAMVPMV_EL2)
/* Armv8.3 Pointer Authentication Registers */
DEFINE_RENAME_SYSREG_RW_FUNCS(apiakeyhi_el1, APIAKeyHi_EL1)
DEFINE_RENAME_SYSREG_RW_FUNCS(apiakeylo_el1, APIAKeyLo_EL1)

View file

@ -193,7 +193,6 @@
// Only if MTE registers in use
#define CTX_TFSR_EL2 U(0x100)
// Only if ENABLE_MPAM_FOR_LOWER_ELS==1
#define CTX_MPAM2_EL2 U(0x108)
#define CTX_MPAMHCR_EL2 U(0x110)
#define CTX_MPAMVPM0_EL2 U(0x118)
@ -518,10 +517,6 @@ void el2_sysregs_context_restore_common(el2_sysregs_t *regs);
void el2_sysregs_context_save_mte(el2_sysregs_t *regs);
void el2_sysregs_context_restore_mte(el2_sysregs_t *regs);
#endif /* CTX_INCLUDE_MTE_REGS */
#if ENABLE_MPAM_FOR_LOWER_ELS
void el2_sysregs_context_save_mpam(el2_sysregs_t *regs);
void el2_sysregs_context_restore_mpam(el2_sysregs_t *regs);
#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
#if ENABLE_FEAT_ECV
void el2_sysregs_context_save_ecv(el2_sysregs_t *regs);
void el2_sysregs_context_restore_ecv(el2_sysregs_t *regs);

View file

@ -9,6 +9,12 @@
#include <stdbool.h>
#if ENABLE_MPAM_FOR_LOWER_ELS
void mpam_enable(bool el2_unused);
#else
void mpam_enable(bool el2_unused)
{
}
#endif
#endif /* MPAM_H */

View file

@ -17,10 +17,6 @@
.global el2_sysregs_context_save_mte
.global el2_sysregs_context_restore_mte
#endif /* CTX_INCLUDE_MTE_REGS */
#if ENABLE_MPAM_FOR_LOWER_ELS
.global el2_sysregs_context_save_mpam
.global el2_sysregs_context_restore_mpam
#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
#if ENABLE_FEAT_ECV
.global el2_sysregs_context_save_ecv
.global el2_sysregs_context_restore_ecv
@ -230,208 +226,6 @@ func el2_sysregs_context_restore_mte
endfunc el2_sysregs_context_restore_mte
#endif /* CTX_INCLUDE_MTE_REGS */
#if ENABLE_MPAM_FOR_LOWER_ELS
func el2_sysregs_context_save_mpam
mrs x10, MPAM2_EL2
str x10, [x0, #CTX_MPAM2_EL2]
mrs x10, MPAMIDR_EL1
/*
* The context registers that we intend to save would be part of the
* PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
*/
tbz w10, #MPAMIDR_EL1_HAS_HCR_SHIFT, 3f
/*
* MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 would be present in the
* system register frame if MPAMIDR_EL1.HAS_HCR == 1. Proceed to save
* the context of these registers.
*/
mrs x11, MPAMHCR_EL2
mrs x12, MPAMVPM0_EL2
stp x11, x12, [x0, #CTX_MPAMHCR_EL2]
mrs x13, MPAMVPMV_EL2
str x13, [x0, #CTX_MPAMVPMV_EL2]
/*
* MPAMIDR_EL1.VPMR_MAX has to be probed to obtain the maximum supported
* VPMR value. Proceed to save the context of registers from
* MPAMVPM1_EL2 to MPAMVPM<x>_EL2 where x is VPMR_MAX. From MPAM spec,
* VPMR_MAX should not be zero if HAS_HCR == 1.
*/
ubfx x10, x10, #MPAMIDR_EL1_VPMR_MAX_SHIFT, \
#MPAMIDR_EL1_VPMR_MAX_WIDTH
/*
* Once VPMR_MAX has been identified, calculate the offset relative to
* PC to jump to so that relevant context can be saved. The offset is
* calculated as (VPMR_POSSIBLE_MAX - VPMR_MAX) * (instruction size for
* saving one VPM register) + (absolute address of label "1").
*/
mov w11, #MPAMIDR_EL1_VPMR_MAX_POSSIBLE
sub w10, w11, w10
/* Calculate the size of one block of MPAMVPM*_EL2 save */
adr x11, 1f
adr x12, 2f
sub x12, x12, x11
madd x10, x10, x12, x11
br x10
/*
* The branch above would land properly on one of the blocks following
* label "1". Make sure that the order of save is retained.
*/
1:
#if ENABLE_BTI
bti j
#endif
mrs x10, MPAMVPM7_EL2
str x10, [x0, #CTX_MPAMVPM7_EL2]
2:
#if ENABLE_BTI
bti j
#endif
mrs x11, MPAMVPM6_EL2
str x11, [x0, #CTX_MPAMVPM6_EL2]
#if ENABLE_BTI
bti j
#endif
mrs x12, MPAMVPM5_EL2
str x12, [x0, #CTX_MPAMVPM5_EL2]
#if ENABLE_BTI
bti j
#endif
mrs x13, MPAMVPM4_EL2
str x13, [x0, #CTX_MPAMVPM4_EL2]
#if ENABLE_BTI
bti j
#endif
mrs x14, MPAMVPM3_EL2
str x14, [x0, #CTX_MPAMVPM3_EL2]
#if ENABLE_BTI
bti j
#endif
mrs x15, MPAMVPM2_EL2
str x15, [x0, #CTX_MPAMVPM2_EL2]
#if ENABLE_BTI
bti j
#endif
mrs x16, MPAMVPM1_EL2
str x16, [x0, #CTX_MPAMVPM1_EL2]
3: ret
endfunc el2_sysregs_context_save_mpam
func el2_sysregs_context_restore_mpam
ldr x10, [x0, #CTX_MPAM2_EL2]
msr MPAM2_EL2, x10
mrs x10, MPAMIDR_EL1
/*
* The context registers that we intend to restore would be part of the
* PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
*/
tbz w10, #MPAMIDR_EL1_HAS_HCR_SHIFT, 3f
/*
* MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 would be present in the
* system register frame if MPAMIDR_EL1.HAS_HCR == 1. Proceed to restore
* the context of these registers
*/
ldp x11, x12, [x0, #CTX_MPAMHCR_EL2]
msr MPAMHCR_EL2, x11
msr MPAMVPM0_EL2, x12
ldr x13, [x0, #CTX_MPAMVPMV_EL2]
msr MPAMVPMV_EL2, x13
/*
* MPAMIDR_EL1.VPMR_MAX has to be probed to obtain the maximum supported
* VPMR value. Proceed to restore the context of registers from
* MPAMVPM1_EL2 to MPAMVPM<x>_EL2 where x is VPMR_MAX. from MPAM spec,
* VPMR_MAX should not be zero if HAS_HCR == 1.
*/
ubfx x10, x10, #MPAMIDR_EL1_VPMR_MAX_SHIFT, \
#MPAMIDR_EL1_VPMR_MAX_WIDTH
/*
* Once VPMR_MAX has been identified, calculate the offset relative to
* PC to jump to so that relevant context can be restored. The offset is
* calculated as (VPMR_POSSIBLE_MAX - VPMR_MAX) * (instruction size for
* restoring one VPM register) + (absolute address of label "1").
*/
mov w11, #MPAMIDR_EL1_VPMR_MAX_POSSIBLE
sub w10, w11, w10
/* Calculate the size of one block of MPAMVPM*_EL2 restore */
adr x11, 1f
adr x12, 2f
sub x12, x12, x11
madd x10, x10, x12, x11
br x10
/*
* The branch above would land properly on one of the blocks following
* label "1". Make sure that the order of restore is retained.
*/
1:
#if ENABLE_BTI
bti j
#endif
ldr x10, [x0, #CTX_MPAMVPM7_EL2]
msr MPAMVPM7_EL2, x10
2:
#if ENABLE_BTI
bti j
#endif
ldr x11, [x0, #CTX_MPAMVPM6_EL2]
msr MPAMVPM6_EL2, x11
#if ENABLE_BTI
bti j
#endif
ldr x12, [x0, #CTX_MPAMVPM5_EL2]
msr MPAMVPM5_EL2, x12
#if ENABLE_BTI
bti j
#endif
ldr x13, [x0, #CTX_MPAMVPM4_EL2]
msr MPAMVPM4_EL2, x13
#if ENABLE_BTI
bti j
#endif
ldr x14, [x0, #CTX_MPAMVPM3_EL2]
msr MPAMVPM3_EL2, x14
#if ENABLE_BTI
bti j
#endif
ldr x15, [x0, #CTX_MPAMVPM2_EL2]
msr MPAMVPM2_EL2, x15
#if ENABLE_BTI
bti j
#endif
ldr x16, [x0, #CTX_MPAMVPM1_EL2]
msr MPAMVPM1_EL2, x16
3: ret
endfunc el2_sysregs_context_restore_mpam
#endif /* ENABLE_MPAM_FOR_LOWER_ELS */
#if ENABLE_FEAT_ECV
func el2_sysregs_context_save_ecv
mrs x11, CNTPOFF_EL2

View file

@ -498,9 +498,9 @@ static void manage_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
sve_enable(ctx);
#endif
#if ENABLE_MPAM_FOR_LOWER_ELS
mpam_enable(el2_unused);
#endif
if (is_feat_mpam_supported()) {
mpam_enable(el2_unused);
}
if (is_feat_trbe_supported()) {
trbe_enable();
@ -834,6 +834,96 @@ static void el2_sysregs_context_restore_fgt(el2_sysregs_t *ctx)
write_hfgwtr_el2(read_ctx_reg(ctx, CTX_HFGWTR_EL2));
}
static void el2_sysregs_context_save_mpam(el2_sysregs_t *ctx)
{
u_register_t mpam_idr = read_mpamidr_el1();
write_ctx_reg(ctx, CTX_MPAM2_EL2, read_mpam2_el2());
/*
* The context registers that we intend to save would be part of the
* PE's system register frame only if MPAMIDR_EL1.HAS_HCR == 1.
*/
if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) {
return;
}
/*
* MPAMHCR_EL2, MPAMVPMV_EL2 and MPAMVPM0_EL2 are always present if
* MPAMIDR_HAS_HCR_BIT == 1.
*/
write_ctx_reg(ctx, CTX_MPAMHCR_EL2, read_mpamhcr_el2());
write_ctx_reg(ctx, CTX_MPAMVPM0_EL2, read_mpamvpm0_el2());
write_ctx_reg(ctx, CTX_MPAMVPMV_EL2, read_mpamvpmv_el2());
/*
* The number of MPAMVPM registers is implementation defined, their
* number is stored in the MPAMIDR_EL1 register.
*/
switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) {
case 7:
write_ctx_reg(ctx, CTX_MPAMVPM7_EL2, read_mpamvpm7_el2());
__fallthrough;
case 6:
write_ctx_reg(ctx, CTX_MPAMVPM6_EL2, read_mpamvpm6_el2());
__fallthrough;
case 5:
write_ctx_reg(ctx, CTX_MPAMVPM5_EL2, read_mpamvpm5_el2());
__fallthrough;
case 4:
write_ctx_reg(ctx, CTX_MPAMVPM4_EL2, read_mpamvpm4_el2());
__fallthrough;
case 3:
write_ctx_reg(ctx, CTX_MPAMVPM3_EL2, read_mpamvpm3_el2());
__fallthrough;
case 2:
write_ctx_reg(ctx, CTX_MPAMVPM2_EL2, read_mpamvpm2_el2());
__fallthrough;
case 1:
write_ctx_reg(ctx, CTX_MPAMVPM1_EL2, read_mpamvpm1_el2());
break;
}
}
static void el2_sysregs_context_restore_mpam(el2_sysregs_t *ctx)
{
u_register_t mpam_idr = read_mpamidr_el1();
write_mpam2_el2(read_ctx_reg(ctx, CTX_MPAM2_EL2));
if ((mpam_idr & MPAMIDR_HAS_HCR_BIT) == 0U) {
return;
}
write_mpamhcr_el2(read_ctx_reg(ctx, CTX_MPAMHCR_EL2));
write_mpamvpm0_el2(read_ctx_reg(ctx, CTX_MPAMVPM0_EL2));
write_mpamvpmv_el2(read_ctx_reg(ctx, CTX_MPAMVPMV_EL2));
switch ((mpam_idr >> MPAMIDR_EL1_VPMR_MAX_SHIFT) & MPAMIDR_EL1_VPMR_MAX_MASK) {
case 7:
write_mpamvpm7_el2(read_ctx_reg(ctx, CTX_MPAMVPM7_EL2));
__fallthrough;
case 6:
write_mpamvpm6_el2(read_ctx_reg(ctx, CTX_MPAMVPM6_EL2));
__fallthrough;
case 5:
write_mpamvpm5_el2(read_ctx_reg(ctx, CTX_MPAMVPM5_EL2));
__fallthrough;
case 4:
write_mpamvpm4_el2(read_ctx_reg(ctx, CTX_MPAMVPM4_EL2));
__fallthrough;
case 3:
write_mpamvpm3_el2(read_ctx_reg(ctx, CTX_MPAMVPM3_EL2));
__fallthrough;
case 2:
write_mpamvpm2_el2(read_ctx_reg(ctx, CTX_MPAMVPM2_EL2));
__fallthrough;
case 1:
write_mpamvpm1_el2(read_ctx_reg(ctx, CTX_MPAMVPM1_EL2));
break;
}
}
/*******************************************************************************
* Save EL2 sysreg context
******************************************************************************/
@ -859,9 +949,9 @@ void cm_el2_sysregs_context_save(uint32_t security_state)
#if CTX_INCLUDE_MTE_REGS
el2_sysregs_context_save_mte(el2_sysregs_ctx);
#endif
#if ENABLE_MPAM_FOR_LOWER_ELS
el2_sysregs_context_save_mpam(el2_sysregs_ctx);
#endif
if (is_feat_mpam_supported()) {
el2_sysregs_context_save_mpam(el2_sysregs_ctx);
}
if (is_feat_fgt_supported()) {
el2_sysregs_context_save_fgt(el2_sysregs_ctx);
@ -919,9 +1009,9 @@ void cm_el2_sysregs_context_restore(uint32_t security_state)
#if CTX_INCLUDE_MTE_REGS
el2_sysregs_context_restore_mte(el2_sysregs_ctx);
#endif
#if ENABLE_MPAM_FOR_LOWER_ELS
el2_sysregs_context_restore_mpam(el2_sysregs_ctx);
#endif
if (is_feat_mpam_supported()) {
el2_sysregs_context_restore_mpam(el2_sysregs_ctx);
}
if (is_feat_fgt_supported()) {
el2_sysregs_context_restore_fgt(el2_sysregs_ctx);

View file

@ -13,11 +13,6 @@
void mpam_enable(bool el2_unused)
{
/* Check if MPAM is implemented */
if (get_mpam_version() == 0U) {
return;
}
/*
* Enable MPAM, and disable trapping to EL3 when lower ELs access their
* own MPAM registers.

View file

@ -469,6 +469,8 @@ ENABLE_FEAT_FGT := 2
ENABLE_FEAT_HCX := 2
ENABLE_FEAT_TCR2 := 2
ENABLE_MPAM_FOR_LOWER_ELS := 2
ifeq (${SPMC_AT_EL3}, 1)
PLAT_BL_COMMON_SOURCES += plat/arm/board/fvp/fvp_el3_spmc.c
endif