refactor(amu): factor out register accesses

This change introduces a small set of register getters and setters to
avoid having to repeatedly mask and shift in complex code.

Change-Id: Ia372f60c5efb924cd6eeceb75112e635ad13d942
Signed-off-by: Chris Kay <chris.kay@arm.com>
This commit is contained in:
Chris Kay 2021-05-26 11:58:23 +01:00
parent b4b726ea86
commit 33b9be6d75
4 changed files with 317 additions and 115 deletions

View file

@ -253,7 +253,8 @@
/* HCPTR definitions */
#define HCPTR_RES1 ((U(1) << 13) | (U(1) << 12) | U(0x3ff))
#define TCPAC_BIT (U(1) << 31)
#define TAM_BIT (U(1) << 30)
#define TAM_SHIFT U(30)
#define TAM_BIT (U(1) << TAM_SHIFT)
#define TTA_BIT (U(1) << 20)
#define TCP11_BIT (U(1) << 11)
#define TCP10_BIT (U(1) << 10)
@ -727,8 +728,25 @@
#define AMEVTYPER1E p15, 0, c13, c15, 6
#define AMEVTYPER1F p15, 0, c13, c15, 7
/* AMCNTENSET0 definitions */
#define AMCNTENSET0_Pn_SHIFT U(0)
#define AMCNTENSET0_Pn_MASK U(0xffff)
/* AMCNTENSET1 definitions */
#define AMCNTENSET1_Pn_SHIFT U(0)
#define AMCNTENSET1_Pn_MASK U(0xffff)
/* AMCNTENCLR0 definitions */
#define AMCNTENCLR0_Pn_SHIFT U(0)
#define AMCNTENCLR0_Pn_MASK U(0xffff)
/* AMCNTENCLR1 definitions */
#define AMCNTENCLR1_Pn_SHIFT U(0)
#define AMCNTENCLR1_Pn_MASK U(0xffff)
/* AMCR definitions */
#define AMCR_CG1RZ_BIT (ULL(1) << 17)
#define AMCR_CG1RZ_SHIFT U(17)
#define AMCR_CG1RZ_BIT (ULL(1) << AMCR_CG1RZ_SHIFT)
/* AMCFGR definitions */
#define AMCFGR_NCG_SHIFT U(28)

View file

@ -532,7 +532,8 @@
/* HCR definitions */
#define HCR_RESET_VAL ULL(0x0)
#define HCR_AMVOFFEN_BIT (ULL(1) << 51)
#define HCR_AMVOFFEN_SHIFT U(51)
#define HCR_AMVOFFEN_BIT (ULL(1) << HCR_AMVOFFEN_SHIFT)
#define HCR_TEA_BIT (ULL(1) << 47)
#define HCR_API_BIT (ULL(1) << 41)
#define HCR_APK_BIT (ULL(1) << 40)
@ -570,7 +571,8 @@
/* CPTR_EL3 definitions */
#define TCPAC_BIT (U(1) << 31)
#define TAM_BIT (U(1) << 30)
#define TAM_SHIFT U(30)
#define TAM_BIT (U(1) << TAM_SHIFT)
#define TTA_BIT (U(1) << 20)
#define TFP_BIT (U(1) << 10)
#define CPTR_EZ_BIT (U(1) << 8)
@ -579,7 +581,8 @@
/* CPTR_EL2 definitions */
#define CPTR_EL2_RES1 ((U(1) << 13) | (U(1) << 12) | (U(0x3ff)))
#define CPTR_EL2_TCPAC_BIT (U(1) << 31)
#define CPTR_EL2_TAM_BIT (U(1) << 30)
#define CPTR_EL2_TAM_SHIFT U(30)
#define CPTR_EL2_TAM_BIT (U(1) << CPTR_EL2_TAM_SHIFT)
#define CPTR_EL2_TTA_BIT (U(1) << 20)
#define CPTR_EL2_TFP_BIT (U(1) << 10)
#define CPTR_EL2_TZ_BIT (U(1) << 8)
@ -1043,6 +1046,22 @@
#define AMEVTYPER1E_EL0 S3_3_C13_C15_6
#define AMEVTYPER1F_EL0 S3_3_C13_C15_7
/* AMCNTENSET0_EL0 definitions */
#define AMCNTENSET0_EL0_Pn_SHIFT U(0)
#define AMCNTENSET0_EL0_Pn_MASK ULL(0xffff)
/* AMCNTENSET1_EL0 definitions */
#define AMCNTENSET1_EL0_Pn_SHIFT U(0)
#define AMCNTENSET1_EL0_Pn_MASK ULL(0xffff)
/* AMCNTENCLR0_EL0 definitions */
#define AMCNTENCLR0_EL0_Pn_SHIFT U(0)
#define AMCNTENCLR0_EL0_Pn_MASK ULL(0xffff)
/* AMCNTENCLR1_EL0 definitions */
#define AMCNTENCLR1_EL0_Pn_SHIFT U(0)
#define AMCNTENCLR1_EL0_Pn_MASK ULL(0xffff)
/* AMCFGR_EL0 definitions */
#define AMCFGR_EL0_NCG_SHIFT U(28)
#define AMCFGR_EL0_NCG_MASK U(0xf)
@ -1074,7 +1093,8 @@
#define AMCG1IDR_VOFF_SHIFT U(16)
/* New bit added to AMCR_EL0 */
#define AMCR_CG1RZ_BIT (ULL(0x1) << 17)
#define AMCR_CG1RZ_SHIFT U(17)
#define AMCR_CG1RZ_BIT (ULL(0x1) << AMCR_CG1RZ_SHIFT)
/*
* Definitions for virtual offset registers for architected activity monitor

View file

@ -5,6 +5,7 @@
*/
#include <assert.h>
#include <cdefs.h>
#include <stdbool.h>
#include <arch.h>
@ -18,26 +19,104 @@
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
/*
* Get AMU version value from pfr0.
* Return values
* ID_PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4)
* ID_PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6)
* ID_PFR0_AMU_NOT_SUPPORTED: not supported
*/
static unsigned int amu_get_version(void)
static inline __unused uint32_t read_id_pfr0_amu(void)
{
return (unsigned int)(read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
return (read_id_pfr0() >> ID_PFR0_AMU_SHIFT) &
ID_PFR0_AMU_MASK;
}
#if AMU_GROUP1_NR_COUNTERS
/* Check if group 1 counters is implemented */
static inline __unused void write_hcptr_tam(uint32_t value)
{
write_hcptr((read_hcptr() & ~TAM_BIT) |
((value << TAM_SHIFT) & TAM_BIT));
}
static inline __unused void write_amcr_cg1rz(uint32_t value)
{
write_amcr((read_amcr() & ~AMCR_CG1RZ_BIT) |
((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
}
static inline __unused uint32_t read_amcfgr_ncg(void)
{
return (read_amcfgr() >> AMCFGR_NCG_SHIFT) &
AMCFGR_NCG_MASK;
}
static inline __unused uint32_t read_amcgcr_cg1nc(void)
{
return (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
AMCGCR_CG1NC_MASK;
}
static inline __unused uint32_t read_amcntenset0_px(void)
{
return (read_amcntenset0() >> AMCNTENSET0_Pn_SHIFT) &
AMCNTENSET0_Pn_MASK;
}
static inline __unused uint32_t read_amcntenset1_px(void)
{
return (read_amcntenset1() >> AMCNTENSET1_Pn_SHIFT) &
AMCNTENSET1_Pn_MASK;
}
static inline __unused void write_amcntenset0_px(uint32_t px)
{
uint32_t value = read_amcntenset0();
value &= ~AMCNTENSET0_Pn_MASK;
value |= (px << AMCNTENSET0_Pn_SHIFT) &
AMCNTENSET0_Pn_MASK;
write_amcntenset0(value);
}
static inline __unused void write_amcntenset1_px(uint32_t px)
{
uint32_t value = read_amcntenset1();
value &= ~AMCNTENSET1_Pn_MASK;
value |= (px << AMCNTENSET1_Pn_SHIFT) &
AMCNTENSET1_Pn_MASK;
write_amcntenset1(value);
}
static inline __unused void write_amcntenclr0_px(uint32_t px)
{
uint32_t value = read_amcntenclr0();
value &= ~AMCNTENCLR0_Pn_MASK;
value |= (px << AMCNTENCLR0_Pn_SHIFT) & AMCNTENCLR0_Pn_MASK;
write_amcntenclr0(value);
}
static inline __unused void write_amcntenclr1_px(uint32_t px)
{
uint32_t value = read_amcntenclr1();
value &= ~AMCNTENCLR1_Pn_MASK;
value |= (px << AMCNTENCLR1_Pn_SHIFT) & AMCNTENCLR1_Pn_MASK;
write_amcntenclr1(value);
}
static bool amu_supported(void)
{
return read_id_pfr0_amu() >= ID_PFR0_AMU_V1;
}
static bool amu_v1p1_supported(void)
{
return read_id_pfr0_amu() >= ID_PFR0_AMU_V1P1;
}
#if ENABLE_AMU_AUXILIARY_COUNTERS
static bool amu_group1_supported(void)
{
uint32_t features = read_amcfgr() >> AMCFGR_NCG_SHIFT;
return (features & AMCFGR_NCG_MASK) == 1U;
return read_amcfgr_ncg() > 0U;
}
#endif
@ -47,7 +126,7 @@ static bool amu_group1_supported(void)
*/
void amu_enable(bool el2_unused)
{
if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) {
if (!amu_supported()) {
return;
}
@ -59,8 +138,7 @@ void amu_enable(bool el2_unused)
}
/* Check number of group 1 counters */
uint32_t cnt_num = (read_amcgcr() >> AMCGCR_CG1NC_SHIFT) &
AMCGCR_CG1NC_MASK;
uint32_t cnt_num = read_amcgcr_cg1nc();
VERBOSE("%s%u. %s%u\n",
"Number of AMU Group 1 Counters ", cnt_num,
"Requested number ", AMU_GROUP1_NR_COUNTERS);
@ -74,26 +152,23 @@ void amu_enable(bool el2_unused)
#endif
if (el2_unused) {
uint64_t v;
/*
* Non-secure access from EL0 or EL1 to the Activity Monitor
* registers do not trap to EL2.
*/
v = read_hcptr();
v &= ~TAM_BIT;
write_hcptr(v);
write_hcptr_tam(0U);
}
/* Enable group 0 counters */
write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
write_amcntenset0_px(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
/* Enable group 1 counters */
write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
#endif
/* Initialize FEAT_AMUv1p1 features if present. */
if (amu_get_version() < ID_PFR0_AMU_V1P1) {
if (!amu_v1p1_supported()) {
return;
}
@ -106,16 +181,16 @@ void amu_enable(bool el2_unused)
* mapped view are unaffected.
*/
VERBOSE("AMU group 1 counter access restricted.\n");
write_amcr(read_amcr() | AMCR_CG1RZ_BIT);
write_amcr_cg1rz(1U);
#else
write_amcr(read_amcr() & ~AMCR_CG1RZ_BIT);
write_amcr_cg1rz(0U);
#endif
}
/* Read the group 0 counter identified by the given `idx`. */
static uint64_t amu_group0_cnt_read(unsigned int idx)
{
assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED);
assert(amu_supported());
assert(idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx);
@ -124,7 +199,7 @@ static uint64_t amu_group0_cnt_read(unsigned int idx)
/* Write the group 0 counter identified by the given `idx` with `val` */
static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED);
assert(amu_supported());
assert(idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val);
@ -135,7 +210,7 @@ static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
/* Read the group 1 counter identified by the given `idx` */
static uint64_t amu_group1_cnt_read(unsigned int idx)
{
assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED);
assert(amu_supported());
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
@ -145,7 +220,7 @@ static uint64_t amu_group1_cnt_read(unsigned int idx)
/* Write the group 1 counter identified by the given `idx` with `val` */
static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_get_version() != ID_PFR0_AMU_NOT_SUPPORTED);
assert(amu_supported());
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
@ -159,7 +234,7 @@ static void *amu_context_save(const void *arg)
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
unsigned int i;
if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) {
if (!amu_supported()) {
return (void *)-1;
}
@ -169,19 +244,19 @@ static void *amu_context_save(const void *arg)
}
#endif
/* Assert that group 0/1 counter configuration is what we expect */
assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
assert(read_amcntenset0_px() == AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
assert(read_amcntenset1_px() == AMU_GROUP1_COUNTERS_MASK);
#endif
/*
* Disable group 0/1 counters to avoid other observers like SCP sampling
* counter values from the future via the memory mapped view.
*/
write_amcntenclr0(AMU_GROUP0_COUNTERS_MASK);
write_amcntenclr0_px(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
write_amcntenclr1(AMU_GROUP1_COUNTERS_MASK);
write_amcntenclr1_px(AMU_GROUP1_COUNTERS_MASK);
#endif
isb();
@ -206,20 +281,20 @@ static void *amu_context_restore(const void *arg)
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
unsigned int i;
if (amu_get_version() == ID_PFR0_AMU_NOT_SUPPORTED) {
if (!amu_supported()) {
return (void *)-1;
}
#if AMU_GROUP1_NR_COUNTERS
if (!amu_group1_supported()) {
if (amu_group1_supported()) {
return (void *)-1;
}
#endif
/* Counters were disabled in `amu_context_save()` */
assert(read_amcntenset0_el0() == 0U);
assert(read_amcntenset0_px() == 0U);
#if AMU_GROUP1_NR_COUNTERS
assert(read_amcntenset1_el0() == 0U);
assert(read_amcntenset1_px() == 0U);
#endif
/* Restore all group 0 counters */
@ -228,7 +303,7 @@ static void *amu_context_restore(const void *arg)
}
/* Restore group 0 counter configuration */
write_amcntenset0(AMU_GROUP0_COUNTERS_MASK);
write_amcntenset0_px(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
/* Restore group 1 counters */
@ -239,7 +314,7 @@ static void *amu_context_restore(const void *arg)
}
/* Restore group 1 counter configuration */
write_amcntenset1(AMU_GROUP1_COUNTERS_MASK);
write_amcntenset1_px(AMU_GROUP1_COUNTERS_MASK);
#endif
return (void *)0;

View file

@ -5,6 +5,7 @@
*/
#include <assert.h>
#include <cdefs.h>
#include <stdbool.h>
#include <arch.h>
@ -19,26 +20,130 @@
static struct amu_ctx amu_ctxs[PLATFORM_CORE_COUNT];
/*
* Get AMU version value from aa64pfr0.
* Return values
* ID_AA64PFR0_AMU_V1: FEAT_AMUv1 supported (introduced in ARM v8.4)
* ID_AA64PFR0_AMU_V1P1: FEAT_AMUv1p1 supported (introduced in ARM v8.6)
* ID_AA64PFR0_AMU_NOT_SUPPORTED: not supported
*/
static unsigned int amu_get_version(void)
static inline __unused uint64_t read_id_aa64pfr0_el1_amu(void)
{
return (unsigned int)(read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
return (read_id_aa64pfr0_el1() >> ID_AA64PFR0_AMU_SHIFT) &
ID_AA64PFR0_AMU_MASK;
}
#if AMU_GROUP1_NR_COUNTERS
/* Check if group 1 counters is implemented */
static inline __unused uint64_t read_hcr_el2_amvoffen(void)
{
return (read_hcr_el2() & HCR_AMVOFFEN_BIT) >>
HCR_AMVOFFEN_SHIFT;
}
static inline __unused void write_cptr_el2_tam(uint64_t value)
{
write_cptr_el2((read_cptr_el2() & ~CPTR_EL2_TAM_BIT) |
((value << CPTR_EL2_TAM_SHIFT) & CPTR_EL2_TAM_BIT));
}
static inline __unused void write_cptr_el3_tam(cpu_context_t *ctx, uint64_t tam)
{
uint64_t value = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
value &= ~TAM_BIT;
value |= (tam << TAM_SHIFT) & TAM_BIT;
write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, value);
}
static inline __unused void write_hcr_el2_amvoffen(uint64_t value)
{
write_hcr_el2((read_hcr_el2() & ~HCR_AMVOFFEN_BIT) |
((value << HCR_AMVOFFEN_SHIFT) & HCR_AMVOFFEN_BIT));
}
static inline __unused void write_amcr_el0_cg1rz(uint64_t value)
{
write_amcr_el0((read_amcr_el0() & ~AMCR_CG1RZ_BIT) |
((value << AMCR_CG1RZ_SHIFT) & AMCR_CG1RZ_BIT));
}
static inline __unused uint64_t read_amcfgr_el0_ncg(void)
{
return (read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT) &
AMCFGR_EL0_NCG_MASK;
}
static inline __unused uint64_t read_amcg1idr_el0_voff(void)
{
return (read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
AMCG1IDR_VOFF_MASK;
}
static inline __unused uint64_t read_amcgcr_el0_cg1nc(void)
{
return (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
AMCGCR_EL0_CG1NC_MASK;
}
static inline __unused uint64_t read_amcntenset0_el0_px(void)
{
return (read_amcntenset0_el0() >> AMCNTENSET0_EL0_Pn_SHIFT) &
AMCNTENSET0_EL0_Pn_MASK;
}
static inline __unused uint64_t read_amcntenset1_el0_px(void)
{
return (read_amcntenset1_el0() >> AMCNTENSET1_EL0_Pn_SHIFT) &
AMCNTENSET1_EL0_Pn_MASK;
}
static inline __unused void write_amcntenset0_el0_px(uint64_t px)
{
uint64_t value = read_amcntenset0_el0();
value &= ~AMCNTENSET0_EL0_Pn_MASK;
value |= (px << AMCNTENSET0_EL0_Pn_SHIFT) & AMCNTENSET0_EL0_Pn_MASK;
write_amcntenset0_el0(value);
}
static inline __unused void write_amcntenset1_el0_px(uint64_t px)
{
uint64_t value = read_amcntenset1_el0();
value &= ~AMCNTENSET1_EL0_Pn_MASK;
value |= (px << AMCNTENSET1_EL0_Pn_SHIFT) & AMCNTENSET1_EL0_Pn_MASK;
write_amcntenset1_el0(value);
}
static inline __unused void write_amcntenclr0_el0_px(uint64_t px)
{
uint64_t value = read_amcntenclr0_el0();
value &= ~AMCNTENCLR0_EL0_Pn_MASK;
value |= (px << AMCNTENCLR0_EL0_Pn_SHIFT) & AMCNTENCLR0_EL0_Pn_MASK;
write_amcntenclr0_el0(value);
}
static inline __unused void write_amcntenclr1_el0_px(uint64_t px)
{
uint64_t value = read_amcntenclr1_el0();
value &= ~AMCNTENCLR1_EL0_Pn_MASK;
value |= (px << AMCNTENCLR1_EL0_Pn_SHIFT) & AMCNTENCLR1_EL0_Pn_MASK;
write_amcntenclr1_el0(value);
}
static bool amu_supported(void)
{
return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1;
}
static bool amu_v1p1_supported(void)
{
return read_id_aa64pfr0_el1_amu() >= ID_AA64PFR0_AMU_V1P1;
}
#if ENABLE_AMU_AUXILIARY_COUNTERS
static bool amu_group1_supported(void)
{
uint64_t features = read_amcfgr_el0() >> AMCFGR_EL0_NCG_SHIFT;
return (features & AMCFGR_EL0_NCG_MASK) == 1U;
return read_amcfgr_el0_ncg() > 0U;
}
#endif
@ -48,10 +153,7 @@ static bool amu_group1_supported(void)
*/
void amu_enable(bool el2_unused, cpu_context_t *ctx)
{
uint64_t v;
unsigned int amu_version = amu_get_version();
if (amu_version == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
if (!amu_supported()) {
return;
}
@ -63,8 +165,7 @@ void amu_enable(bool el2_unused, cpu_context_t *ctx)
}
/* Check number of group 1 counters */
uint64_t cnt_num = (read_amcgcr_el0() >> AMCGCR_EL0_CG1NC_SHIFT) &
AMCGCR_EL0_CG1NC_MASK;
uint64_t cnt_num = read_amcgcr_el0_cg1nc();
VERBOSE("%s%llu. %s%u\n",
"Number of AMU Group 1 Counters ", cnt_num,
"Requested number ", AMU_GROUP1_NR_COUNTERS);
@ -82,9 +183,7 @@ void amu_enable(bool el2_unused, cpu_context_t *ctx)
* CPTR_EL2.TAM: Set to zero so any accesses to
* the Activity Monitor registers do not trap to EL2.
*/
v = read_cptr_el2();
v &= ~CPTR_EL2_TAM_BIT;
write_cptr_el2(v);
write_cptr_el2_tam(0U);
}
/*
@ -92,26 +191,24 @@ void amu_enable(bool el2_unused, cpu_context_t *ctx)
* in 'ctx'. Set CPTR_EL3.TAM to zero so that any accesses to
* the Activity Monitor registers do not trap to EL3.
*/
v = read_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3);
v &= ~TAM_BIT;
write_ctx_reg(get_el3state_ctx(ctx), CTX_CPTR_EL3, v);
write_cptr_el3_tam(ctx, 0U);
/* Enable group 0 counters */
write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
write_amcntenset0_el0_px(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
/* Enable group 1 counters */
write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
#endif
/* Initialize FEAT_AMUv1p1 features if present. */
if (amu_version < ID_AA64PFR0_AMU_V1P1) {
if (!amu_v1p1_supported()) {
return;
}
if (el2_unused) {
/* Make sure virtual offsets are disabled if EL2 not used. */
write_hcr_el2(read_hcr_el2() & ~HCR_AMVOFFEN_BIT);
write_hcr_el2_amvoffen(0U);
}
#if AMU_RESTRICT_COUNTERS
@ -123,16 +220,16 @@ void amu_enable(bool el2_unused, cpu_context_t *ctx)
* mapped view are unaffected.
*/
VERBOSE("AMU group 1 counter access restricted.\n");
write_amcr_el0(read_amcr_el0() | AMCR_CG1RZ_BIT);
write_amcr_el0_cg1rz(1U);
#else
write_amcr_el0(read_amcr_el0() & ~AMCR_CG1RZ_BIT);
write_amcr_el0_cg1rz(0U);
#endif
}
/* Read the group 0 counter identified by the given `idx`. */
static uint64_t amu_group0_cnt_read(unsigned int idx)
{
assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
assert(amu_supported());
assert(idx < AMU_GROUP0_NR_COUNTERS);
return amu_group0_cnt_read_internal(idx);
@ -141,7 +238,7 @@ static uint64_t amu_group0_cnt_read(unsigned int idx)
/* Write the group 0 counter identified by the given `idx` with `val` */
static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
assert(amu_supported());
assert(idx < AMU_GROUP0_NR_COUNTERS);
amu_group0_cnt_write_internal(idx, val);
@ -156,7 +253,7 @@ static void amu_group0_cnt_write(unsigned int idx, uint64_t val)
*/
static uint64_t amu_group0_voffset_read(unsigned int idx)
{
assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
assert(amu_v1p1_supported());
assert(idx < AMU_GROUP0_NR_COUNTERS);
assert(idx != 1U);
@ -171,7 +268,7 @@ static uint64_t amu_group0_voffset_read(unsigned int idx)
*/
static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
{
assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
assert(amu_v1p1_supported());
assert(idx < AMU_GROUP0_NR_COUNTERS);
assert(idx != 1U);
@ -183,7 +280,7 @@ static void amu_group0_voffset_write(unsigned int idx, uint64_t val)
/* Read the group 1 counter identified by the given `idx` */
static uint64_t amu_group1_cnt_read(unsigned int idx)
{
assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
assert(amu_supported());
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
@ -193,7 +290,7 @@ static uint64_t amu_group1_cnt_read(unsigned int idx)
/* Write the group 1 counter identified by the given `idx` with `val` */
static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
{
assert(amu_get_version() != ID_AA64PFR0_AMU_NOT_SUPPORTED);
assert(amu_supported());
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
@ -208,11 +305,10 @@ static void amu_group1_cnt_write(unsigned int idx, uint64_t val)
*/
static uint64_t amu_group1_voffset_read(unsigned int idx)
{
assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
assert(amu_v1p1_supported());
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
(1ULL << idx)) != 0ULL);
assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
return amu_group1_voffset_read_internal(idx);
}
@ -224,11 +320,10 @@ static uint64_t amu_group1_voffset_read(unsigned int idx)
*/
static void amu_group1_voffset_write(unsigned int idx, uint64_t val)
{
assert(amu_get_version() >= ID_AA64PFR0_AMU_V1P1);
assert(amu_v1p1_supported());
assert(amu_group1_supported());
assert(idx < AMU_GROUP1_NR_COUNTERS);
assert(((read_amcg1idr_el0() >> AMCG1IDR_VOFF_SHIFT) &
(1ULL << idx)) != 0ULL);
assert((read_amcg1idr_el0_voff() & (UINT64_C(1) << idx)) != 0U);
amu_group1_voffset_write_internal(idx, val);
isb();
@ -240,7 +335,7 @@ static void *amu_context_save(const void *arg)
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
unsigned int i;
if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
if (!amu_supported()) {
return (void *)-1;
}
@ -250,19 +345,19 @@ static void *amu_context_save(const void *arg)
}
#endif
/* Assert that group 0/1 counter configuration is what we expect */
assert(read_amcntenset0_el0() == AMU_GROUP0_COUNTERS_MASK);
assert(read_amcntenset0_el0_px() == AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
assert(read_amcntenset1_el0() == AMU_GROUP1_COUNTERS_MASK);
assert(read_amcntenset1_el0_px() == AMU_GROUP1_COUNTERS_MASK);
#endif
/*
* Disable group 0/1 counters to avoid other observers like SCP sampling
* counter values from the future via the memory mapped view.
*/
write_amcntenclr0_el0(AMU_GROUP0_COUNTERS_MASK);
write_amcntenclr0_el0_px(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
write_amcntenclr1_el0(AMU_GROUP1_COUNTERS_MASK);
write_amcntenclr1_el0_px(AMU_GROUP1_COUNTERS_MASK);
#endif
isb();
@ -272,8 +367,7 @@ static void *amu_context_save(const void *arg)
}
/* Save group 0 virtual offsets if supported and enabled. */
if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
/* Not using a loop because count is fixed and index 1 DNE. */
ctx->group0_voffsets[0U] = amu_group0_voffset_read(0U);
ctx->group0_voffsets[1U] = amu_group0_voffset_read(2U);
@ -289,11 +383,9 @@ static void *amu_context_save(const void *arg)
}
/* Save group 1 virtual offsets if supported and enabled. */
if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
u_register_t amcg1idr = read_amcg1idr_el0() >>
AMCG1IDR_VOFF_SHIFT;
amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK;
if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
uint64_t amcg1idr = read_amcg1idr_el0_voff() &
AMU_GROUP1_COUNTERS_MASK;
for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
if (((amcg1idr >> i) & 1ULL) != 0ULL) {
@ -311,7 +403,7 @@ static void *amu_context_restore(const void *arg)
struct amu_ctx *ctx = &amu_ctxs[plat_my_core_pos()];
unsigned int i;
if (amu_get_version() == ID_AA64PFR0_AMU_NOT_SUPPORTED) {
if (!amu_supported()) {
return (void *)-1;
}
@ -321,10 +413,10 @@ static void *amu_context_restore(const void *arg)
}
#endif
/* Counters were disabled in `amu_context_save()` */
assert(read_amcntenset0_el0() == 0U);
assert(read_amcntenset0_el0_px() == 0U);
#if AMU_GROUP1_NR_COUNTERS
assert(read_amcntenset1_el0() == 0U);
assert(read_amcntenset1_el0_px() == 0U);
#endif
/* Restore all group 0 counters */
@ -333,8 +425,7 @@ static void *amu_context_restore(const void *arg)
}
/* Restore group 0 virtual offsets if supported and enabled. */
if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
/* Not using a loop because count is fixed and index 1 DNE. */
amu_group0_voffset_write(0U, ctx->group0_voffsets[0U]);
amu_group0_voffset_write(2U, ctx->group0_voffsets[1U]);
@ -342,7 +433,7 @@ static void *amu_context_restore(const void *arg)
}
/* Restore group 0 counter configuration */
write_amcntenset0_el0(AMU_GROUP0_COUNTERS_MASK);
write_amcntenset0_el0_px(AMU_GROUP0_COUNTERS_MASK);
#if AMU_GROUP1_NR_COUNTERS
/* Restore group 1 counters */
@ -353,11 +444,9 @@ static void *amu_context_restore(const void *arg)
}
/* Restore group 1 virtual offsets if supported and enabled. */
if ((amu_get_version() >= ID_AA64PFR0_AMU_V1P1) &&
((read_hcr_el2() & HCR_AMVOFFEN_BIT) != 0ULL)) {
u_register_t amcg1idr = read_amcg1idr_el0() >>
AMCG1IDR_VOFF_SHIFT;
amcg1idr = amcg1idr & AMU_GROUP1_COUNTERS_MASK;
if (amu_v1p1_supported() && (read_hcr_el2_amvoffen() != 0U)) {
uint64_t amcg1idr = read_amcg1idr_el0_voff() &
AMU_GROUP1_COUNTERS_MASK;
for (i = 0U; i < AMU_GROUP1_NR_COUNTERS; i++) {
if (((amcg1idr >> i) & 1ULL) != 0ULL) {
@ -368,7 +457,7 @@ static void *amu_context_restore(const void *arg)
}
/* Restore group 1 counter configuration */
write_amcntenset1_el0(AMU_GROUP1_COUNTERS_MASK);
write_amcntenset1_el0_px(AMU_GROUP1_COUNTERS_MASK);
#endif
return (void *)0;