mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-04-08 05:43:53 +00:00
refactor(amu): use new AMU feature check routines
The AMU extension code was using its own feature detection routines. Replace them with the generic CPU feature handlers (defined in arch_features.h), which get updated to cover the v1p1 variant as well. Change-Id: I8540f1e745d7b02a25a6c6cdf2a39d6f5e21f2aa Signed-off-by: Andre Przywara <andre.przywara@arm.com>
This commit is contained in:
parent
d23acc9e4f
commit
b57e16a4f9
9 changed files with 89 additions and 102 deletions
|
@ -112,16 +112,6 @@ static void read_feat_bti(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
/***********************************************
|
||||
* Feature : FEAT_AMUv1p1 (AMU Extensions v1.1)
|
||||
**********************************************/
|
||||
static void read_feat_amuv1p1(void)
|
||||
{
|
||||
#if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_ALWAYS)
|
||||
feat_detect_panic(is_armv8_6_feat_amuv1p1_present(), "AMUv1p1");
|
||||
#endif
|
||||
}
|
||||
|
||||
/**************************************************
|
||||
* Feature : FEAT_RME (Realm Management Extension)
|
||||
*************************************************/
|
||||
|
@ -205,7 +195,8 @@ void detect_arch_features(void)
|
|||
read_feat_rng_trap();
|
||||
|
||||
/* v8.6 features */
|
||||
read_feat_amuv1p1();
|
||||
check_feature(ENABLE_FEAT_AMUv1p1, read_feat_amu_id_field(),
|
||||
"AMUv1p1", 2, 2);
|
||||
check_feature(ENABLE_FEAT_FGT, read_feat_fgt_id_field(), "FGT", 1, 1);
|
||||
check_feature(ENABLE_FEAT_ECV, read_feat_ecv_id_field(), "ECV", 1, 2);
|
||||
check_feature(ENABLE_FEAT_TWED, read_feat_twed_id_field(),
|
||||
|
|
|
@ -25,6 +25,37 @@ static inline bool is_armv8_2_ttcnp_present(void)
|
|||
return ISOLATE_FIELD(read_id_mmfr4(), ID_MMFR4_CNP) != 0U;
|
||||
}
|
||||
|
||||
static unsigned int read_feat_amu_id_field(void)
|
||||
{
|
||||
return ISOLATE_FIELD(read_id_pfr0(), ID_PFR0_AMU);
|
||||
}
|
||||
|
||||
static inline bool is_feat_amu_supported(void)
|
||||
{
|
||||
if (ENABLE_FEAT_AMU == FEAT_STATE_DISABLED) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ENABLE_FEAT_AMU == FEAT_STATE_ALWAYS) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return read_feat_amu_id_field() >= ID_PFR0_AMU_V1;
|
||||
}
|
||||
|
||||
static inline bool is_feat_amuv1p1_supported(void)
|
||||
{
|
||||
if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_DISABLED) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_ALWAYS) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return read_feat_amu_id_field() >= ID_PFR0_AMU_V1P1;
|
||||
}
|
||||
|
||||
static inline unsigned int read_feat_trf_id_field(void)
|
||||
{
|
||||
return ISOLATE_FIELD(read_id_dfr0(), ID_DFR0_TRACEFILT);
|
||||
|
|
|
@ -255,8 +255,16 @@ static inline bool is_feat_amu_supported(void)
|
|||
return read_feat_amu_id_field() >= ID_AA64PFR0_AMU_V1;
|
||||
}
|
||||
|
||||
static inline bool is_armv8_6_feat_amuv1p1_present(void)
|
||||
static inline bool is_feat_amuv1p1_supported(void)
|
||||
{
|
||||
if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_DISABLED) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (ENABLE_FEAT_AMUv1p1 == FEAT_STATE_ALWAYS) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return read_feat_amu_id_field() >= ID_AA64PFR0_AMU_V1P1;
|
||||
}
|
||||
|
||||
|
|
|
@ -14,11 +14,23 @@
|
|||
|
||||
#include <platform_def.h>
|
||||
|
||||
#if ENABLE_FEAT_AMU
|
||||
#if __aarch64__
|
||||
void amu_enable(bool el2_unused, cpu_context_t *ctx);
|
||||
#else
|
||||
void amu_enable(bool el2_unused);
|
||||
#endif
|
||||
#else
|
||||
#if __aarch64__
|
||||
static inline void amu_enable(bool el2_unused, cpu_context_t *ctx)
|
||||
{
|
||||
}
|
||||
#else
|
||||
static inline void amu_enable(bool el2_unused)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if ENABLE_AMU_AUXILIARY_COUNTERS
|
||||
/*
|
||||
|
|
|
@ -136,9 +136,9 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep)
|
|||
static void enable_extensions_nonsecure(bool el2_unused)
|
||||
{
|
||||
#if IMAGE_BL32
|
||||
#if ENABLE_FEAT_AMU
|
||||
amu_enable(el2_unused);
|
||||
#endif
|
||||
if (is_feat_amu_supported()) {
|
||||
amu_enable(el2_unused);
|
||||
}
|
||||
|
||||
if (is_feat_sys_reg_trace_supported()) {
|
||||
sys_reg_trace_enable();
|
||||
|
|
|
@ -485,9 +485,9 @@ static void manage_extensions_nonsecure(bool el2_unused, cpu_context_t *ctx)
|
|||
spe_enable(el2_unused);
|
||||
}
|
||||
|
||||
#if ENABLE_FEAT_AMU
|
||||
amu_enable(el2_unused, ctx);
|
||||
#endif
|
||||
if (is_feat_amu_supported()) {
|
||||
amu_enable(el2_unused, ctx);
|
||||
}
|
||||
|
||||
#if ENABLE_SME_FOR_NS
|
||||
/* Enable SME, SVE, and FPU/SIMD for non-secure world. */
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
#include "../amu_private.h"
|
||||
#include <arch.h>
|
||||
#include <arch_features.h>
|
||||
#include <arch_helpers.h>
|
||||
#include <common/debug.h>
|
||||
#include <lib/el3_runtime/pubsub_events.h>
|
||||
|
@ -39,12 +40,6 @@ CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTE
|
|||
amu_ctx_group1_enable_cannot_represent_all_group1_counters);
|
||||
#endif
|
||||
|
||||
static inline __unused uint32_t read_id_pfr0_amu(void)
|
||||
{
|
||||
return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
|
||||
ID_PFR0_AMU_MASK;
|
||||
}
|
||||
|
||||
static inline __unused void write_hcptr_tam(uint32_t value)
|
||||
{
|
||||
write_hcptr((read_hcptr() & ~TAM_BIT) |
|
||||
|
@ -129,11 +124,6 @@ static inline __unused void write_amcntenclr1_px(uint32_t px)
|
|||
write_amcntenclr1(value);
|
||||
}
|
||||
|
||||
static __unused bool amu_supported(void)
|
||||
{
|
||||
return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
|
||||
}
|
||||
|
||||
#if ENABLE_AMU_AUXILIARY_COUNTERS
|
||||
static __unused bool amu_group1_supported(void)
|
||||
{
|
||||
|
@ -147,23 +137,12 @@ static __unused bool amu_group1_supported(void)
|
|||
*/
|
||||
void amu_enable(bool el2_unused)
|
||||
{
|
||||
uint32_t id_pfr0_amu; /* AMU version */
|
||||
|
||||
uint32_t amcfgr_ncg; /* Number of counter groups */
|
||||
uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
|
||||
|
||||
uint32_t amcntenset0_px = 0x0; /* Group 0 enable mask */
|
||||
uint32_t amcntenset1_px = 0x0; /* Group 1 enable mask */
|
||||
|
||||
id_pfr0_amu = read_id_pfr0_amu();
|
||||
if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
|
||||
/*
|
||||
* If the AMU is unsupported, nothing needs to be done.
|
||||
*/
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (el2_unused) {
|
||||
/*
|
||||
* HCPTR.TAM: Set to zero so any accesses to the Activity
|
||||
|
@ -221,8 +200,8 @@ void amu_enable(bool el2_unused)
|
|||
#endif
|
||||
}
|
||||
|
||||
/* Initialize FEAT_AMUv1p1 features if present. */
|
||||
if (id_pfr0_amu < ID_PFR0_AMU_V1P1) {
|
||||
/* Bail out if FEAT_AMUv1p1 features are not present. */
|
||||
if (!is_feat_amuv1p1_supported()) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -244,7 +223,7 @@ void amu_enable(bool el2_unused)
|
|||
/* Read the group 0 counter identified by the given `idx`. */
|
||||
static uint64_t amu_group0_cnt_read(unsigned int idx)
|
||||
{
|
||||
assert(amu_supported());
|
||||
assert(is_feat_amu_supported());
|
||||
assert(idx < read_amcgcr_cg0nc());
|
||||
|
||||
return amu_group0_cnt_read_internal(idx);
|
||||
|
@ -253,7 +232,7 @@ static uint64_t amu_group0_cnt_read(unsigned int idx)
|
|||
/* Write the group 0 counter identified by the given `idx` with `val` */
|
||||
static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
|
||||
{
|
||||
assert(amu_supported());
|
||||
assert(is_feat_amu_supported());
|
||||
assert(idx < read_amcgcr_cg0nc());
|
||||
|
||||
amu_group0_cnt_write_internal(idx, val);
|
||||
|
@ -264,7 +243,7 @@ static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
|
|||
/* Read the group 1 counter identified by the given `idx` */
|
||||
static uint64_t amu_group1_cnt_read(unsigned int idx)
|
||||
{
|
||||
assert(amu_supported());
|
||||
assert(is_feat_amu_supported());
|
||||
assert(amu_group1_supported());
|
||||
assert(idx < read_amcgcr_cg1nc());
|
||||
|
||||
|
@ -274,7 +253,7 @@ static uint64_t amu_group1_cnt_read(unsigned int idx)
|
|||
/* Write the group 1 counter identified by the given `idx` with `val` */
|
||||
static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
|
||||
{
|
||||
assert(amu_supported());
|
||||
assert(is_feat_amu_supported());
|
||||
assert(amu_group1_supported());
|
||||
assert(idx < read_amcgcr_cg1nc());
|
||||
|
||||
|
@ -290,7 +269,6 @@ static void *amu_context_save(const void *arg)
|
|||
unsigned int core_pos;
|
||||
struct amu_ctx *ctx;
|
||||
|
||||
uint32_t id_pfr0_amu; /* AMU version */
|
||||
uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
|
||||
|
||||
#if ENABLE_AMU_AUXILIARY_COUNTERS
|
||||
|
@ -298,8 +276,7 @@ static void *amu_context_save(const void *arg)
|
|||
uint32_t amcgcr_cg1nc; /* Number of group 1 counters */
|
||||
#endif
|
||||
|
||||
id_pfr0_amu = read_id_pfr0_amu();
|
||||
if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
|
||||
if (!is_feat_amu_supported()) {
|
||||
return (void *)0;
|
||||
}
|
||||
|
||||
|
@ -353,8 +330,6 @@ static void *amu_context_restore(const void *arg)
|
|||
unsigned int core_pos;
|
||||
struct amu_ctx *ctx;
|
||||
|
||||
uint32_t id_pfr0_amu; /* AMU version */
|
||||
|
||||
uint32_t amcfgr_ncg; /* Number of counter groups */
|
||||
uint32_t amcgcr_cg0nc; /* Number of group 0 counters */
|
||||
|
||||
|
@ -362,8 +337,7 @@ static void *amu_context_restore(const void *arg)
|
|||
uint32_t amcgcr_cg1nc; /* Number of group 1 counters */
|
||||
#endif
|
||||
|
||||
id_pfr0_amu = read_id_pfr0_amu();
|
||||
if (id_pfr0_amu == ID_PFR0_AMU_NOT_SUPPORTED) {
|
||||
if (!is_feat_amu_supported()) {
|
||||
return (void *)0;
|
||||
}
|
||||
|
||||
|
|
|
@ -57,12 +57,6 @@ CASSERT((sizeof(amu_ctxs_[0].group1_enable) * CHAR_BIT) <= AMU_GROUP1_MAX_COUNTE
|
|||
amu_ctx_group1_enable_cannot_represent_all_group1_counters);
|
||||
#endif
|
||||
|
||||
static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
|
||||
{
|
||||
return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
|
||||
ID_AA64PFR0_AMU_MASK;
|
||||
}
|
||||
|
||||
static inline __unused uint64_t read_hcr_el2_amvoffen(void)
|
||||
{
|
||||
return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
|
||||
|
@ -183,16 +177,6 @@ static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
|
|||
write_amcntenclr1_el0(value);
|
||||
}
|
||||
|
||||
static __unused bool amu_supported(void)
|
||||
{
|
||||
return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
|
||||
}
|
||||
|
||||
static __unused bool amu_v1p1_supported(void)
|
||||
{
|
||||
return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
|
||||
}
|
||||
|
||||
#if ENABLE_AMU_AUXILIARY_COUNTERS
|
||||
static __unused bool amu_group1_supported(void)
|
||||
{
|
||||
|
@ -206,23 +190,12 @@ static __unused bool amu_group1_supported(void)
|
|||
*/
|
||||
void amu_enable(bool el2_unused, cpu_context_t *ctx)
|
||||
{
|
||||
uint64_t id_aa64pfr0_el1_amu; /* AMU version */
|
||||
|
||||
uint64_t amcfgr_el0_ncg; /* Number of counter groups */
|
||||
uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
|
||||
|
||||
uint64_t amcntenset0_el0_px = 0x0; /* Group 0 enable mask */
|
||||
uint64_t amcntenset1_el0_px = 0x0; /* Group 1 enable mask */
|
||||
|
||||
id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
|
||||
if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
|
||||
/*
|
||||
* If the AMU is unsupported, nothing needs to be done.
|
||||
*/
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (el2_unused) {
|
||||
/*
|
||||
* CPTR_EL2.TAM: Set to zero so any accesses to the Activity
|
||||
|
@ -288,7 +261,7 @@ void amu_enable(bool el2_unused, cpu_context_t *ctx)
|
|||
}
|
||||
|
||||
/* Initialize FEAT_AMUv1p1 features if present. */
|
||||
if (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) {
|
||||
if (is_feat_amuv1p1_supported()) {
|
||||
if (el2_unused) {
|
||||
/*
|
||||
* Make sure virtual offsets are disabled if EL2 not
|
||||
|
@ -327,7 +300,7 @@ void amu_enable(bool el2_unused, cpu_context_t *ctx)
|
|||
/* Read the group 0 counter identified by the given `idx`. */
|
||||
static uint64_t amu_group0_cnt_read(unsigned int idx)
|
||||
{
|
||||
assert(amu_supported());
|
||||
assert(is_feat_amu_supported());
|
||||
assert(idx < read_amcgcr_el0_cg0nc());
|
||||
|
||||
return amu_group0_cnt_read_internal(idx);
|
||||
|
@ -336,7 +309,7 @@ static uint64_t amu_group0_cnt_read(unsigned int idx)
|
|||
/* Write the group 0 counter identified by the given `idx` with `val` */
|
||||
static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
|
||||
{
|
||||
assert(amu_supported());
|
||||
assert(is_feat_amu_supported());
|
||||
assert(idx < read_amcgcr_el0_cg0nc());
|
||||
|
||||
amu_group0_cnt_write_internal(idx, val);
|
||||
|
@ -376,7 +349,7 @@ static bool amu_group0_voffset_supported(uint64_t idx)
|
|||
*/
|
||||
static uint64_t amu_group0_voffset_read(unsigned int idx)
|
||||
{
|
||||
assert(amu_v1p1_supported());
|
||||
assert(is_feat_amuv1p1_supported());
|
||||
assert(idx < read_amcgcr_el0_cg0nc());
|
||||
assert(idx != 1U);
|
||||
|
||||
|
@ -391,7 +364,7 @@ static uint64_t amu_group0_voffset_read(unsigned int idx)
|
|||
*/
|
||||
static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
|
||||
{
|
||||
assert(amu_v1p1_supported());
|
||||
assert(is_feat_amuv1p1_supported());
|
||||
assert(idx < read_amcgcr_el0_cg0nc());
|
||||
assert(idx != 1U);
|
||||
|
||||
|
@ -403,7 +376,7 @@ static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
|
|||
/* Read the group 1 counter identified by the given `idx` */
|
||||
static uint64_t amu_group1_cnt_read(unsigned int idx)
|
||||
{
|
||||
assert(amu_supported());
|
||||
assert(is_feat_amu_supported());
|
||||
assert(amu_group1_supported());
|
||||
assert(idx < read_amcgcr_el0_cg1nc());
|
||||
|
||||
|
@ -413,7 +386,7 @@ static uint64_t amu_group1_cnt_read(unsigned int idx)
|
|||
/* Write the group 1 counter identified by the given `idx` with `val` */
|
||||
static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
|
||||
{
|
||||
assert(amu_supported());
|
||||
assert(is_feat_amu_supported());
|
||||
assert(amu_group1_supported());
|
||||
assert(idx < read_amcgcr_el0_cg1nc());
|
||||
|
||||
|
@ -428,7 +401,7 @@ static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
|
|||
*/
|
||||
static uint64_t amu_group1_voffset_read(unsigned int idx)
|
||||
{
|
||||
assert(amu_v1p1_supported());
|
||||
assert(is_feat_amuv1p1_supported());
|
||||
assert(amu_group1_supported());
|
||||
assert(idx < read_amcgcr_el0_cg1nc());
|
||||
assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
|
||||
|
@ -443,7 +416,7 @@ static uint64_t amu_group1_voffset_read(unsigned int idx)
|
|||
*/
|
||||
static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
|
||||
{
|
||||
assert(amu_v1p1_supported());
|
||||
assert(is_feat_amuv1p1_supported());
|
||||
assert(amu_group1_supported());
|
||||
assert(idx < read_amcgcr_el0_cg1nc());
|
||||
assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
|
||||
|
@ -460,8 +433,7 @@ static void *amu_context_save(const void *arg)
|
|||
unsigned int core_pos;
|
||||
struct amu_ctx *ctx;
|
||||
|
||||
uint64_t id_aa64pfr0_el1_amu; /* AMU version */
|
||||
uint64_t hcr_el2_amvoffen; /* AMU virtual offsets enabled */
|
||||
uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */
|
||||
uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
|
||||
|
||||
#if ENABLE_AMU_AUXILIARY_COUNTERS
|
||||
|
@ -470,8 +442,7 @@ static void *amu_context_save(const void *arg)
|
|||
uint64_t amcgcr_el0_cg1nc; /* Number of group 1 counters */
|
||||
#endif
|
||||
|
||||
id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
|
||||
if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
|
||||
if (!is_feat_amu_supported()) {
|
||||
return (void *)0;
|
||||
}
|
||||
|
||||
|
@ -479,8 +450,9 @@ static void *amu_context_save(const void *arg)
|
|||
ctx = &amu_ctxs_[core_pos];
|
||||
|
||||
amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
|
||||
hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
|
||||
read_hcr_el2_amvoffen() : 0U;
|
||||
if (is_feat_amuv1p1_supported()) {
|
||||
hcr_el2_amvoffen = read_hcr_el2_amvoffen();
|
||||
}
|
||||
|
||||
#if ENABLE_AMU_AUXILIARY_COUNTERS
|
||||
amcfgr_el0_ncg = read_amcfgr_el0_ncg();
|
||||
|
@ -552,9 +524,7 @@ static void *amu_context_restore(const void *arg)
|
|||
unsigned int core_pos;
|
||||
struct amu_ctx *ctx;
|
||||
|
||||
uint64_t id_aa64pfr0_el1_amu; /* AMU version */
|
||||
|
||||
uint64_t hcr_el2_amvoffen; /* AMU virtual offsets enabled */
|
||||
uint64_t hcr_el2_amvoffen = 0; /* AMU virtual offsets enabled */
|
||||
|
||||
uint64_t amcfgr_el0_ncg; /* Number of counter groups */
|
||||
uint64_t amcgcr_el0_cg0nc; /* Number of group 0 counters */
|
||||
|
@ -564,8 +534,7 @@ static void *amu_context_restore(const void *arg)
|
|||
uint64_t amcg1idr_el0_voff; /* Auxiliary counters with virtual offsets */
|
||||
#endif
|
||||
|
||||
id_aa64pfr0_el1_amu = read_id_aa64pfr0_el1_amu();
|
||||
if (id_aa64pfr0_el1_amu == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
|
||||
if (!is_feat_amu_supported()) {
|
||||
return (void *)0;
|
||||
}
|
||||
|
||||
|
@ -575,8 +544,9 @@ static void *amu_context_restore(const void *arg)
|
|||
amcfgr_el0_ncg = read_amcfgr_el0_ncg();
|
||||
amcgcr_el0_cg0nc = read_amcgcr_el0_cg0nc();
|
||||
|
||||
hcr_el2_amvoffen = (id_aa64pfr0_el1_amu >= ID_AA64PFR0_AMU_V1P1) ?
|
||||
read_hcr_el2_amvoffen() : 0U;
|
||||
if (is_feat_amuv1p1_supported()) {
|
||||
hcr_el2_amvoffen = read_hcr_el2_amvoffen();
|
||||
}
|
||||
|
||||
#if ENABLE_AMU_AUXILIARY_COUNTERS
|
||||
amcgcr_el0_cg1nc = (amcfgr_el0_ncg > 0U) ? read_amcgcr_el0_cg1nc() : 0U;
|
||||
|
|
|
@ -320,6 +320,7 @@ endif
|
|||
|
||||
# Enable Activity Monitor Unit extensions by default
|
||||
ENABLE_FEAT_AMU := 2
|
||||
ENABLE_FEAT_AMUv1p1 := 2
|
||||
|
||||
# Enable dynamic mitigation support by default
|
||||
DYNAMIC_WORKAROUND_CVE_2018_3639 := 1
|
||||
|
|
Loading…
Add table
Reference in a new issue