fix(gpt): unify logging messages

This patch modifies GPT library comments and makes
logging messages consistent with PRIx64 usage and
TF-A format used in other modules.
Minor changes are made to make the code compliant
with MISRA C requirements.

Change-Id: Ic40e1b7ac43cd9602819698d00e1ce3a8c7183ce
Signed-off-by: AlexeiFedorov <Alexei.Fedorov@arm.com>
This commit is contained in:
AlexeiFedorov 2024-03-13 15:18:02 +00:00 committed by Alexei Fedorov
parent 20e2683daf
commit b99926ef7b
2 changed files with 174 additions and 173 deletions

View file

@ -21,7 +21,7 @@
#include <lib/xlat_tables/xlat_tables_v2.h>
#if !ENABLE_RME
#error "ENABLE_RME must be enabled to use the GPT library."
#error "ENABLE_RME must be enabled to use the GPT library"
#endif
/*
@ -58,7 +58,7 @@ static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T,
static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
/*
* This structure contains GPT configuration data.
* This structure contains GPT configuration data
*/
typedef struct {
uintptr_t plat_gpt_l0_base;
@ -70,7 +70,7 @@ typedef struct {
static gpt_config_t gpt_config;
/* These variables are used during initialization of the L1 tables. */
/* These variables are used during initialization of the L1 tables */
static unsigned int gpt_next_l1_tbl_idx;
static uintptr_t gpt_l1_tbl;
@ -137,7 +137,7 @@ static bool does_previous_pas_exist_here(unsigned int l0_idx,
pas_region_t *pas_regions,
unsigned int pas_idx)
{
/* Iterate over PAS regions up to pas_idx. */
/* Iterate over PAS regions up to pas_idx */
for (unsigned int i = 0U; i < pas_idx; i++) {
if (check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
GPT_L0GPTSZ_ACTUAL_SIZE,
@ -176,18 +176,18 @@ static int validate_pas_mappings(pas_region_t *pas_regions,
assert(pas_region_cnt != 0U);
for (idx = 0U; idx < pas_region_cnt; idx++) {
/* Check for arithmetic overflow in region. */
/* Check for arithmetic overflow in region */
if ((ULONG_MAX - pas_regions[idx].base_pa) <
pas_regions[idx].size) {
ERROR("[GPT] Address overflow in PAS[%u]!\n", idx);
ERROR("GPT: Address overflow in PAS[%u]!\n", idx);
return -EOVERFLOW;
}
/* Initial checks for PAS validity. */
/* Initial checks for PAS validity */
if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
!is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
ERROR("[GPT] PAS[%u] is invalid!\n", idx);
ERROR("GPT: PAS[%u] is invalid!\n", idx);
return -EFAULT;
}
@ -196,12 +196,12 @@ static int validate_pas_mappings(pas_region_t *pas_regions,
* start from idx + 1 instead of 0 since prior PAS mappings will
* have already checked themselves against this one.
*/
for (unsigned int i = idx + 1; i < pas_region_cnt; i++) {
for (unsigned int i = idx + 1U; i < pas_region_cnt; i++) {
if (check_pas_overlap(pas_regions[idx].base_pa,
pas_regions[idx].size,
pas_regions[i].base_pa,
pas_regions[i].size)) {
ERROR("[GPT] PAS[%u] overlaps with PAS[%u]\n",
ERROR("GPT: PAS[%u] overlaps with PAS[%u]\n",
i, idx);
return -EFAULT;
}
@ -214,11 +214,12 @@ static int validate_pas_mappings(pas_region_t *pas_regions,
* initialized.
*/
for (unsigned int i = GPT_L0_IDX(pas_regions[idx].base_pa);
i <= GPT_L0_IDX(pas_regions[idx].base_pa + pas_regions[idx].size - 1);
i <= GPT_L0_IDX(pas_regions[idx].base_pa +
pas_regions[idx].size - 1UL);
i++) {
if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
(GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
/* This descriptor is unused so continue. */
/* This descriptor is unused so continue */
continue;
}
@ -226,18 +227,18 @@ static int validate_pas_mappings(pas_region_t *pas_regions,
* This descriptor has been initialized in a previous
* call to this function so cannot be initialized again.
*/
ERROR("[GPT] PAS[%u] overlaps with previous L0[%d]!\n",
ERROR("GPT: PAS[%u] overlaps with previous L0[%d]!\n",
idx, i);
return -EFAULT;
}
/* Check for block mapping (L0) type. */
/* Check for block mapping (L0) type */
if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
/* Make sure base and size are block-aligned. */
/* Make sure base and size are block-aligned */
if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
!GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
ERROR("[GPT] PAS[%u] is not block-aligned!\n",
ERROR("GPT: PAS[%u] is not block-aligned!\n",
idx);
return -EFAULT;
}
@ -245,21 +246,21 @@ static int validate_pas_mappings(pas_region_t *pas_regions,
continue;
}
/* Check for granule mapping (L1) type. */
/* Check for granule mapping (L1) type */
if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
/* Make sure base and size are granule-aligned. */
/* Make sure base and size are granule-aligned */
if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
ERROR("[GPT] PAS[%u] is not granule-aligned!\n",
ERROR("GPT: PAS[%u] is not granule-aligned!\n",
idx);
return -EFAULT;
}
/* Find how many L1 tables this PAS occupies. */
/* Find how many L1 tables this PAS occupies */
pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
pas_regions[idx].size - 1) -
GPT_L0_IDX(pas_regions[idx].base_pa) + 1);
pas_regions[idx].size - 1UL) -
GPT_L0_IDX(pas_regions[idx].base_pa) + 1U);
/*
* This creates a situation where, if multiple PAS
@ -279,24 +280,24 @@ static int validate_pas_mappings(pas_region_t *pas_regions,
if (pas_l1_cnt > 1) {
if (does_previous_pas_exist_here(
GPT_L0_IDX(pas_regions[idx].base_pa +
pas_regions[idx].size - 1),
pas_regions[idx].size - 1UL),
pas_regions, idx)) {
pas_l1_cnt = pas_l1_cnt - 1;
pas_l1_cnt--;
}
}
if (does_previous_pas_exist_here(
GPT_L0_IDX(pas_regions[idx].base_pa),
pas_regions, idx)) {
pas_l1_cnt = pas_l1_cnt - 1;
pas_l1_cnt--;
}
l1_cnt += pas_l1_cnt;
continue;
}
/* If execution reaches this point, mapping type is invalid. */
ERROR("[GPT] PAS[%u] has invalid mapping type 0x%x.\n", idx,
/* If execution reaches this point, mapping type is invalid */
ERROR("GPT: PAS[%u] has invalid mapping type 0x%x.\n", idx,
GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
return -EINVAL;
}
@ -324,27 +325,29 @@ static int validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
* to work.
*/
if (pps > GPT_PPS_MAX) {
ERROR("[GPT] Invalid PPS: 0x%x\n", pps);
ERROR("GPT: Invalid PPS: 0x%x\n", pps);
return -EINVAL;
}
gpt_config.pps = pps;
gpt_config.t = gpt_t_lookup[pps];
/* Alignment must be the greater of 4k or l0 table size. */
/* Alignment must be the greater of 4KB or l0 table size */
l0_alignment = PAGE_SIZE_4KB;
if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
}
/* Check base address. */
if ((l0_mem_base == 0U) || ((l0_mem_base & (l0_alignment - 1)) != 0U)) {
ERROR("[GPT] Invalid L0 base address: 0x%lx\n", l0_mem_base);
/* Check base address */
if ((l0_mem_base == 0UL) ||
((l0_mem_base & (l0_alignment - 1UL)) != 0UL)) {
ERROR("GPT: Invalid L0 base address: 0x%lx\n", l0_mem_base);
return -EFAULT;
}
/* Check size. */
/* Check size */
if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) {
ERROR("[GPT] Inadequate L0 memory: need 0x%lx, have 0x%lx)\n",
ERROR("%sL0%s\n", "GPT: Inadequate ", " memory\n");
ERROR(" Expected 0x%lx bytes, got 0x%lx bytes\n",
GPT_L0_TABLE_SIZE(gpt_config.t),
l0_mem_size);
return -ENOMEM;
@ -376,31 +379,31 @@ static int validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
return -EPERM;
}
/* Make sure L1 tables are aligned to their size. */
if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1)) != 0U) {
ERROR("[GPT] Unaligned L1 GPT base address: 0x%lx\n",
/* Make sure L1 tables are aligned to their size */
if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1UL)) != 0UL) {
ERROR("GPT: Unaligned L1 GPT base address: 0x%"PRIxPTR"\n",
l1_mem_base);
return -EFAULT;
}
/* Get total memory needed for L1 tables. */
/* Get total memory needed for L1 tables */
l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
/* Check for overflow. */
/* Check for overflow */
if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
ERROR("[GPT] Overflow calculating L1 memory size.\n");
ERROR("GPT: Overflow calculating L1 memory size\n");
return -ENOMEM;
}
/* Make sure enough space was supplied. */
/* Make sure enough space was supplied */
if (l1_mem_size < l1_gpt_mem_sz) {
ERROR("[GPT] Inadequate memory for L1 GPTs. ");
ERROR(" Expected 0x%lx bytes. Got 0x%lx bytes\n",
ERROR("%sL1 GPTs%s", "GPT: Inadequate ", " memory\n");
ERROR(" Expected 0x%lx bytes, got 0x%lx bytes\n",
l1_gpt_mem_sz, l1_mem_size);
return -ENOMEM;
}
VERBOSE("[GPT] Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz);
VERBOSE("GPT: Requested 0x%lx bytes for L1 GPTs\n", l1_gpt_mem_sz);
return 0;
}
@ -442,10 +445,10 @@ static void generate_l0_blk_desc(pas_region_t *pas)
*/
end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
/* Generate the needed block descriptors. */
/* Generate the needed block descriptors */
for (; idx < end_idx; idx++) {
l0_gpt_arr[idx] = gpt_desc;
VERBOSE("[GPT] L0 entry (BLOCK) index %u [%p]: GPI = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
VERBOSE("GPT: L0 entry (BLOCK) index %u [%p]: GPI = 0x%"PRIx64" (0x%"PRIx64")\n",
idx, &l0_gpt_arr[idx],
(gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
@ -496,7 +499,7 @@ static void fill_l1_tbl(uint64_t gpi, uint64_t *l1, uintptr_t first,
uintptr_t last)
{
uint64_t gpi_field = GPT_BUILD_L1_DESC(gpi);
uint64_t gpi_mask = 0xFFFFFFFFFFFFFFFF;
uint64_t gpi_mask = ULONG_MAX;
assert(first <= last);
assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U);
@ -504,25 +507,25 @@ static void fill_l1_tbl(uint64_t gpi, uint64_t *l1, uintptr_t first,
assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
assert(l1 != NULL);
/* Shift the mask if we're starting in the middle of an L1 entry. */
/* Shift the mask if we're starting in the middle of an L1 entry */
gpi_mask = gpi_mask << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
/* Fill out each L1 entry for this region. */
/* Fill out each L1 entry for this region */
for (unsigned int i = GPT_L1_IDX(gpt_config.p, first);
i <= GPT_L1_IDX(gpt_config.p, last); i++) {
/* Account for stopping in the middle of an L1 entry. */
/* Account for stopping in the middle of an L1 entry */
if (i == GPT_L1_IDX(gpt_config.p, last)) {
gpi_mask &= (gpi_mask >> ((15 -
gpi_mask &= (gpi_mask >> ((15U -
GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
}
/* Write GPI values. */
/* Write GPI values */
assert((l1[i] & gpi_mask) ==
(GPT_BUILD_L1_DESC(GPT_GPI_ANY) & gpi_mask));
l1[i] = (l1[i] & ~gpi_mask) | (gpi_mask & gpi_field);
/* Reset mask. */
gpi_mask = 0xFFFFFFFFFFFFFFFF;
/* Reset mask */
gpi_mask = ULONG_MAX;
}
}
@ -539,12 +542,12 @@ static void fill_l1_tbl(uint64_t gpi, uint64_t *l1, uintptr_t first,
*/
static uint64_t *get_new_l1_tbl(void)
{
/* Retrieve the next L1 table. */
/* Retrieve the next L1 table */
uint64_t *l1 = (uint64_t *)((uint64_t)(gpt_l1_tbl) +
(GPT_L1_TABLE_SIZE(gpt_config.p) *
gpt_next_l1_tbl_idx));
/* Increment L1 counter. */
/* Increment L1 counter */
gpt_next_l1_tbl_idx++;
/* Initialize all GPIs to GPT_GPI_ANY */
@ -586,7 +589,7 @@ static void generate_l0_tbl_desc(pas_region_t *pas)
/* We start working from the granule at base PA */
cur_pa = pas->base_pa;
/* Iterate over each L0 region in this memory range. */
/* Iterate over each L0 region in this memory range */
for (l0_idx = GPT_L0_IDX(pas->base_pa);
l0_idx <= GPT_L0_IDX(end_pa - 1U);
l0_idx++) {
@ -596,20 +599,18 @@ static void generate_l0_tbl_desc(pas_region_t *pas)
* need to create one.
*/
if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
/* Get the L1 array from the L0 entry. */
/* Get the L1 array from the L0 entry */
l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
} else {
/* Get a new L1 table from the L1 memory space. */
/* Get a new L1 table from the L1 memory space */
l1_gpt_arr = get_new_l1_tbl();
/* Fill out the L0 descriptor and flush it. */
/* Fill out the L0 descriptor and flush it */
l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
}
VERBOSE("[GPT] L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%" PRIx64 ")\n",
l0_idx, &l0_gpt_base[l0_idx],
(unsigned long long)(l1_gpt_arr),
l0_gpt_base[l0_idx]);
VERBOSE("GPT: L0 entry (TABLE) index %u [%p] ==> L1 Addr %p (0x%"PRIx64")\n",
l0_idx, &l0_gpt_base[l0_idx], l1_gpt_arr, l0_gpt_base[l0_idx]);
/*
* Determine the PA of the last granule in this L0 descriptor.
@ -625,7 +626,7 @@ static void generate_l0_tbl_desc(pas_region_t *pas)
fill_l1_tbl(GPT_PAS_ATTR_GPI(pas->attrs), l1_gpt_arr,
cur_pa, last_gran_pa);
/* Advance cur_pa to first granule in next L0 region. */
/* Advance cur_pa to first granule in next L0 region */
cur_pa = get_l1_end_pa(cur_pa, end_pa);
}
}
@ -649,19 +650,19 @@ static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
assert(pas != NULL);
assert(pas_count > 0);
assert(pas_count != 0U);
/* Initial start and end values. */
/* Initial start and end values */
start_idx = GPT_L0_IDX(pas[0].base_pa);
end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1);
end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1UL);
/* Find lowest and highest L0 indices used in this PAS array. */
for (idx = 1; idx < pas_count; idx++) {
/* Find lowest and highest L0 indices used in this PAS array */
for (idx = 1U; idx < pas_count; idx++) {
if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
start_idx = GPT_L0_IDX(pas[idx].base_pa);
}
if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1) > end_idx) {
end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1);
if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL) > end_idx) {
end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1UL);
}
}
@ -670,7 +671,7 @@ static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
* the end index value.
*/
flush_dcache_range((uintptr_t)&l0[start_idx],
((end_idx + 1) - start_idx) * sizeof(uint64_t));
((end_idx + 1U) - start_idx) * sizeof(uint64_t));
}
/*
@ -689,8 +690,8 @@ int gpt_enable(void)
* Granule tables must be initialised before enabling
* granule protection.
*/
if (gpt_config.plat_gpt_l0_base == 0U) {
ERROR("[GPT] Tables have not been initialized!\n");
if (gpt_config.plat_gpt_l0_base == 0UL) {
ERROR("GPT: Tables have not been initialized!\n");
return -EPERM;
}
@ -711,7 +712,7 @@ int gpt_enable(void)
*/
gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS);
/* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */
/* Outer and Inner cacheability set to Normal memory, WB, RA, WA */
gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
@ -727,7 +728,7 @@ int gpt_enable(void)
/* Enable GPT */
gpccr_el3 |= GPCCR_GPC_BIT;
/* TODO: Configure GPCCR_EL3_GPCP for Fault control. */
/* TODO: Configure GPCCR_EL3_GPCP for Fault control */
write_gpccr_el3(gpccr_el3);
isb();
tlbipaallos();
@ -769,16 +770,16 @@ int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base,
int ret;
uint64_t gpt_desc;
/* Ensure that MMU and Data caches are enabled. */
/* Ensure that MMU and Data caches are enabled */
assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
/* Validate other parameters. */
/* Validate other parameters */
ret = validate_l0_params(pps, l0_mem_base, l0_mem_size);
if (ret != 0) {
return ret;
}
/* Create the descriptor to initialize L0 entries with. */
/* Create the descriptor to initialize L0 entries with */
gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
/* Iterate through all L0 entries */
@ -786,11 +787,11 @@ int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base,
((uint64_t *)l0_mem_base)[i] = gpt_desc;
}
/* Flush updated L0 tables to memory. */
/* Flush updated L0 tables to memory */
flush_dcache_range((uintptr_t)l0_mem_base,
(size_t)GPT_L0_TABLE_SIZE(gpt_config.t));
/* Stash the L0 base address once initial setup is complete. */
/* Stash the L0 base address once initial setup is complete */
gpt_config.plat_gpt_l0_base = l0_mem_base;
return 0;
@ -824,57 +825,57 @@ int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
int ret;
int l1_gpt_cnt;
/* Ensure that MMU and Data caches are enabled. */
/* Ensure that MMU and Data caches are enabled */
assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
/* PGS is needed for validate_pas_mappings so check it now. */
/* PGS is needed for validate_pas_mappings so check it now */
if (pgs > GPT_PGS_MAX) {
ERROR("[GPT] Invalid PGS: 0x%x\n", pgs);
ERROR("GPT: Invalid PGS: 0x%x\n", pgs);
return -EINVAL;
}
gpt_config.pgs = pgs;
gpt_config.p = gpt_p_lookup[pgs];
/* Make sure L0 tables have been initialized. */
/* Make sure L0 tables have been initialized */
if (gpt_config.plat_gpt_l0_base == 0U) {
ERROR("[GPT] L0 tables must be initialized first!\n");
ERROR("GPT: L0 tables must be initialized first!\n");
return -EPERM;
}
/* Check if L1 GPTs are required and how many. */
/* Check if L1 GPTs are required and how many */
l1_gpt_cnt = validate_pas_mappings(pas_regions, pas_count);
if (l1_gpt_cnt < 0) {
return l1_gpt_cnt;
}
VERBOSE("[GPT] %u L1 GPTs requested.\n", l1_gpt_cnt);
VERBOSE("GPT: %i L1 GPTs requested\n", l1_gpt_cnt);
/* If L1 tables are needed then validate the L1 parameters. */
/* If L1 tables are needed then validate the L1 parameters */
if (l1_gpt_cnt > 0) {
ret = validate_l1_params(l1_mem_base, l1_mem_size,
l1_gpt_cnt);
(unsigned int)l1_gpt_cnt);
if (ret != 0) {
return ret;
}
/* Set up parameters for L1 table generation. */
/* Set up parameters for L1 table generation */
gpt_l1_tbl = l1_mem_base;
gpt_next_l1_tbl_idx = 0U;
}
INFO("[GPT] Boot Configuration\n");
INFO("GPT: Boot Configuration\n");
INFO(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
INFO(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
INFO(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
INFO(" PAS count: 0x%x\n", pas_count);
INFO(" L0 base: 0x%lx\n", gpt_config.plat_gpt_l0_base);
INFO(" PAS count: %u\n", pas_count);
INFO(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
/* Generate the tables in memory. */
/* Generate the tables in memory */
for (unsigned int idx = 0U; idx < pas_count; idx++) {
INFO("[GPT] PAS[%u]: base 0x%lx, size 0x%lx, GPI 0x%x, type 0x%x\n",
idx, pas_regions[idx].base_pa, pas_regions[idx].size,
GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
VERBOSE("GPT: PAS[%u]: base 0x%"PRIxPTR"\tsize 0x%lx\tGPI 0x%x\ttype 0x%x\n",
idx, pas_regions[idx].base_pa, pas_regions[idx].size,
GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
/* Check if a block or table descriptor is required */
if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
@ -886,17 +887,17 @@ int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
}
}
/* Flush modified L0 tables. */
/* Flush modified L0 tables */
flush_l0_for_pas_array(pas_regions, pas_count);
/* Flush L1 tables if needed. */
/* Flush L1 tables if needed */
if (l1_gpt_cnt > 0) {
flush_dcache_range(l1_mem_base,
GPT_L1_TABLE_SIZE(gpt_config.p) *
l1_gpt_cnt);
}
/* Make sure that all the entries are written to the memory. */
/* Make sure that all the entries are written to the memory */
dsbishst();
tlbipaallos();
dsb();
@ -920,12 +921,12 @@ int gpt_runtime_init(void)
{
u_register_t reg;
/* Ensure that MMU and Data caches are enabled. */
/* Ensure that MMU and Data caches are enabled */
assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
/* Ensure GPC are already enabled. */
/* Ensure GPC are already enabled */
if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) {
ERROR("[GPT] Granule protection checks are not enabled!\n");
ERROR("GPT: Granule protection checks are not enabled!\n");
return -EPERM;
}
@ -938,18 +939,18 @@ int gpt_runtime_init(void)
GPTBR_BADDR_MASK) <<
GPTBR_BADDR_VAL_SHIFT;
/* Read GPCCR to get PGS and PPS values. */
/* Read GPCCR to get PGS and PPS values */
reg = read_gpccr_el3();
gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
gpt_config.t = gpt_t_lookup[gpt_config.pps];
gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
gpt_config.p = gpt_p_lookup[gpt_config.pgs];
VERBOSE("[GPT] Runtime Configuration\n");
VERBOSE("GPT: Runtime Configuration\n");
VERBOSE(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t);
VERBOSE(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p);
VERBOSE(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
VERBOSE(" L0 base: 0x%lx\n", gpt_config.plat_gpt_l0_base);
VERBOSE(" L0 base: 0x%"PRIxPTR"\n", gpt_config.plat_gpt_l0_base);
return 0;
}
@ -963,7 +964,7 @@ static spinlock_t gpt_lock;
/*
* A helper to write the value (target_pas << gpi_shift) to the index of
* the gpt_l1_addr
* the gpt_l1_addr.
*/
static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
unsigned int gpi_shift, unsigned int idx,
@ -976,7 +977,7 @@ static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
/*
* Helper to retrieve the gpt_l1_* information from the base address
* returned in gpi_info
* returned in gpi_info.
*/
static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
{
@ -985,12 +986,12 @@ static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
VERBOSE("[GPT] Granule is not covered by a table descriptor!\n");
VERBOSE(" Base=0x%" PRIx64 "\n", base);
VERBOSE("GPT: Granule is not covered by a table descriptor!\n");
VERBOSE(" Base=0x%"PRIx64"\n", base);
return -EINVAL;
}
/* Get the table index and GPI shift from PA. */
/* Get the table index and GPI shift from PA */
gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
gpi_info->idx = GPT_L1_IDX(gpt_config.p, base);
gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
@ -1025,36 +1026,36 @@ int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
int res;
unsigned int target_pas;
/* Ensure that the tables have been set up before taking requests. */
/* Ensure that the tables have been set up before taking requests */
assert(gpt_config.plat_gpt_l0_base != 0UL);
/* Ensure that caches are enabled. */
/* Ensure that caches are enabled */
assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
/* Delegate request can only come from REALM or SECURE */
assert(src_sec_state == SMC_FROM_REALM ||
src_sec_state == SMC_FROM_SECURE);
/* See if this is a single or a range of granule transition. */
/* See if this is a single or a range of granule transition */
if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
return -EINVAL;
}
/* Check that base and size are valid */
if ((ULONG_MAX - base) < size) {
VERBOSE("[GPT] Transition request address overflow!\n");
VERBOSE(" Base=0x%" PRIx64 "\n", base);
VERBOSE("GPT: Transition request address overflow!\n");
VERBOSE(" Base=0x%"PRIx64"\n", base);
VERBOSE(" Size=0x%lx\n", size);
return -EINVAL;
}
/* Make sure base and size are valid. */
if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
/* Make sure base and size are valid */
if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
(size == 0UL) ||
((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
VERBOSE("[GPT] Invalid granule transition address range!\n");
VERBOSE(" Base=0x%" PRIx64 "\n", base);
VERBOSE("GPT: Invalid granule transition address range!\n");
VERBOSE(" Base=0x%"PRIx64"\n", base);
VERBOSE(" Size=0x%lx\n", size);
return -EINVAL;
}
@ -1078,7 +1079,7 @@ int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
/* Check that the current address is in NS state */
if (gpi_info.gpi != GPT_GPI_NS) {
VERBOSE("[GPT] Only Granule in NS state can be delegated.\n");
VERBOSE("GPT: Only Granule in NS state can be delegated.\n");
VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state,
gpi_info.gpi);
spin_unlock(&gpt_lock);
@ -1094,7 +1095,7 @@ int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
/*
* In order to maintain mutual distrust between Realm and Secure
* states, remove any data speculatively fetched into the target
* physical address space. Issue DC CIPAPA over address range
* physical address space. Issue DC CIPAPA over address range.
*/
if (is_feat_mte2_supported()) {
flush_dcache_to_popa_range_mte2(nse | base,
@ -1121,14 +1122,14 @@ int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
GPT_PGS_ACTUAL_SIZE(gpt_config.p));
}
/* Unlock access to the L1 tables. */
/* Unlock access to the L1 tables */
spin_unlock(&gpt_lock);
/*
* The isb() will be done as part of context
* synchronization when returning to lower EL
* synchronization when returning to lower EL.
*/
VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n",
VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
base, gpi_info.gpi, target_pas);
return 0;
@ -1157,36 +1158,36 @@ int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
uint64_t nse;
int res;
/* Ensure that the tables have been set up before taking requests. */
/* Ensure that the tables have been set up before taking requests */
assert(gpt_config.plat_gpt_l0_base != 0UL);
/* Ensure that MMU and caches are enabled. */
/* Ensure that MMU and caches are enabled */
assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
/* Delegate request can only come from REALM or SECURE */
assert(src_sec_state == SMC_FROM_REALM ||
src_sec_state == SMC_FROM_SECURE);
/* See if this is a single or a range of granule transition. */
/* See if this is a single or a range of granule transition */
if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
return -EINVAL;
}
/* Check that base and size are valid */
if ((ULONG_MAX - base) < size) {
VERBOSE("[GPT] Transition request address overflow!\n");
VERBOSE(" Base=0x%" PRIx64 "\n", base);
VERBOSE("GPT: Transition request address overflow!\n");
VERBOSE(" Base=0x%"PRIx64"\n", base);
VERBOSE(" Size=0x%lx\n", size);
return -EINVAL;
}
/* Make sure base and size are valid. */
if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
/* Make sure base and size are valid */
if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1UL)) != 0UL) ||
(size == 0UL) ||
((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
VERBOSE("[GPT] Invalid granule transition address range!\n");
VERBOSE(" Base=0x%" PRIx64 "\n", base);
VERBOSE("GPT: Invalid granule transition address range!\n");
VERBOSE(" Base=0x%"PRIx64"\n", base);
VERBOSE(" Size=0x%lx\n", size);
return -EINVAL;
}
@ -1209,8 +1210,8 @@ int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
gpi_info.gpi != GPT_GPI_REALM) ||
(src_sec_state == SMC_FROM_SECURE &&
gpi_info.gpi != GPT_GPI_SECURE)) {
VERBOSE("[GPT] Only Granule in REALM or SECURE state can be undelegated.\n");
VERBOSE(" Caller: %u, Current GPI: %u\n", src_sec_state,
VERBOSE("GPT: Only Granule in REALM or SECURE state can be undelegated.\n");
VERBOSE(" Caller: %u Current GPI: %u\n", src_sec_state,
gpi_info.gpi);
spin_unlock(&gpt_lock);
return -EPERM;
@ -1272,9 +1273,9 @@ int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
/*
* The isb() will be done as part of context
* synchronization when returning to lower EL
* synchronization when returning to lower EL.
*/
VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n",
VERBOSE("GPT: Granule 0x%"PRIx64" GPI 0x%x->0x%x\n",
base, gpi_info.gpi, GPT_GPI_NS);
return 0;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2022, Arm Limited. All rights reserved.
* Copyright (c) 2022-2024, Arm Limited. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
@ -15,13 +15,13 @@
/* GPT descriptor definitions */
/******************************************************************************/
/* GPT level 0 descriptor bit definitions. */
/* GPT level 0 descriptor bit definitions */
#define GPT_L0_TYPE_MASK UL(0xF)
#define GPT_L0_TYPE_SHIFT U(0)
/* For now, we don't support contiguous descriptors, only table and block. */
#define GPT_L0_TYPE_TBL_DESC UL(0x3)
#define GPT_L0_TYPE_BLK_DESC UL(0x1)
/* For now, we don't support contiguous descriptors, only table and block */
#define GPT_L0_TYPE_TBL_DESC UL(3)
#define GPT_L0_TYPE_BLK_DESC UL(1)
#define GPT_L0_TBL_DESC_L1ADDR_MASK UL(0xFFFFFFFFFF)
#define GPT_L0_TBL_DESC_L1ADDR_SHIFT U(12)
@ -57,7 +57,7 @@
/* GPT platform configuration */
/******************************************************************************/
/* This value comes from GPCCR_EL3 so no externally supplied definition. */
/* This value comes from GPCCR_EL3 so no externally supplied definition */
#define GPT_L0GPTSZ ((unsigned int)((read_gpccr_el3() >> \
GPCCR_L0GPTSZ_SHIFT) & GPCCR_L0GPTSZ_MASK))
@ -117,10 +117,10 @@ typedef struct gpi_info {
unsigned int gpi;
} gpi_info_t;
/* Max valid value for PGS. */
/* Max valid value for PGS */
#define GPT_PGS_MAX (2U)
/* Max valid value for PPS. */
/* Max valid value for PPS */
#define GPT_PPS_MAX (6U)
/******************************************************************************/
@ -139,7 +139,7 @@ typedef struct gpi_info {
#define GPT_L0_IDX_WIDTH(_t) (((_t) > GPT_S_VAL) ? \
((_t) - GPT_S_VAL) : (0U))
/* Bit shift for the L0 index field in a PA. */
/* Bit shift for the L0 index field in a PA */
#define GPT_L0_IDX_SHIFT (GPT_S_VAL)
/*
@ -153,13 +153,13 @@ typedef struct gpi_info {
#define GPT_L0_IDX_MASK(_t) (0x3FFFFFUL >> (22U - \
(GPT_L0_IDX_WIDTH(_t))))
/* Total number of L0 regions. */
/* Total number of L0 regions */
#define GPT_L0_REGION_COUNT(_t) ((GPT_L0_IDX_MASK(_t)) + 1U)
/* Total size of each GPT L0 region in bytes. */
/* Total size of each GPT L0 region in bytes */
#define GPT_L0_REGION_SIZE (1UL << (GPT_L0_IDX_SHIFT))
/* Total size in bytes of the whole L0 table. */
/* Total size in bytes of the whole L0 table */
#define GPT_L0_TABLE_SIZE(_t) ((GPT_L0_REGION_COUNT(_t)) << 3U)
/******************************************************************************/
@ -175,7 +175,7 @@ typedef struct gpi_info {
*/
#define GPT_L1_IDX_WIDTH(_p) ((GPT_S_VAL - 1U) - ((_p) + 3U))
/* Bit shift for the L1 index field. */
/* Bit shift for the L1 index field */
#define GPT_L1_IDX_SHIFT(_p) ((_p) + 4U)
/*
@ -183,38 +183,38 @@ typedef struct gpi_info {
*
* The value 0x7FFFFF is 23 bits wide and is the maximum possible width of the
* L1 index within a physical address. It is calculated by
* ((s_max - 1) - (p_min + 4) + 1) where s_max is 39 for 512gb, the largest
* ((s_max - 1) - (p_min + 4) + 1) where s_max is 39 for 512GB, the largest
* L0GPTSZ, and p_min is 12 for 4KB granules, the smallest PGS.
*/
#define GPT_L1_IDX_MASK(_p) (0x7FFFFFUL >> (23U - \
(GPT_L1_IDX_WIDTH(_p))))
/* Bit shift for the index of the L1 GPI in a PA. */
/* Bit shift for the index of the L1 GPI in a PA */
#define GPT_L1_GPI_IDX_SHIFT(_p) (_p)
/* Mask for the index of the L1 GPI in a PA. */
/* Mask for the index of the L1 GPI in a PA */
#define GPT_L1_GPI_IDX_MASK (0xF)
/* Total number of entries in each L1 table. */
/* Total number of entries in each L1 table */
#define GPT_L1_ENTRY_COUNT(_p) ((GPT_L1_IDX_MASK(_p)) + 1U)
/* Total size in bytes of each L1 table. */
/* Total size in bytes of each L1 table */
#define GPT_L1_TABLE_SIZE(_p) ((GPT_L1_ENTRY_COUNT(_p)) << 3U)
/******************************************************************************/
/* General helper macros */
/******************************************************************************/
/* Protected space actual size in bytes. */
/* Protected space actual size in bytes */
#define GPT_PPS_ACTUAL_SIZE(_t) (1UL << (_t))
/* Granule actual size in bytes. */
/* Granule actual size in bytes */
#define GPT_PGS_ACTUAL_SIZE(_p) (1UL << (_p))
/* L0 GPT region size in bytes. */
/* L0 GPT region size in bytes */
#define GPT_L0GPTSZ_ACTUAL_SIZE (1UL << GPT_S_VAL)
/* Get the index of the L0 entry from a physical address. */
/* Get the index of the L0 entry from a physical address */
#define GPT_L0_IDX(_pa) ((_pa) >> GPT_L0_IDX_SHIFT)
/*
@ -223,38 +223,38 @@ typedef struct gpi_info {
*/
#define GPT_IS_L0_ALIGNED(_pa) (((_pa) & (GPT_L0_REGION_SIZE - U(1))) == U(0))
/* Get the type field from an L0 descriptor. */
/* Get the type field from an L0 descriptor */
#define GPT_L0_TYPE(_desc) (((_desc) >> GPT_L0_TYPE_SHIFT) & \
GPT_L0_TYPE_MASK)
/* Create an L0 block descriptor. */
/* Create an L0 block descriptor */
#define GPT_L0_BLK_DESC(_gpi) (GPT_L0_TYPE_BLK_DESC | \
(((_gpi) & GPT_L0_BLK_DESC_GPI_MASK) << \
GPT_L0_BLK_DESC_GPI_SHIFT))
/* Create an L0 table descriptor with an L1 table address. */
/* Create an L0 table descriptor with an L1 table address */
#define GPT_L0_TBL_DESC(_pa) (GPT_L0_TYPE_TBL_DESC | ((uint64_t)(_pa) & \
(GPT_L0_TBL_DESC_L1ADDR_MASK << \
GPT_L0_TBL_DESC_L1ADDR_SHIFT)))
/* Get the GPI from an L0 block descriptor. */
/* Get the GPI from an L0 block descriptor */
#define GPT_L0_BLKD_GPI(_desc) (((_desc) >> GPT_L0_BLK_DESC_GPI_SHIFT) & \
GPT_L0_BLK_DESC_GPI_MASK)
/* Get the L1 address from an L0 table descriptor. */
/* Get the L1 address from an L0 table descriptor */
#define GPT_L0_TBLD_ADDR(_desc) ((uint64_t *)(((_desc) & \
(GPT_L0_TBL_DESC_L1ADDR_MASK << \
GPT_L0_TBL_DESC_L1ADDR_SHIFT))))
/* Get the index into the L1 table from a physical address. */
/* Get the index into the L1 table from a physical address */
#define GPT_L1_IDX(_p, _pa) (((_pa) >> GPT_L1_IDX_SHIFT(_p)) & \
GPT_L1_IDX_MASK(_p))
/* Get the index of the GPI within an L1 table entry from a physical address. */
/* Get the index of the GPI within an L1 table entry from a physical address */
#define GPT_L1_GPI_IDX(_p, _pa) (((_pa) >> GPT_L1_GPI_IDX_SHIFT(_p)) & \
GPT_L1_GPI_IDX_MASK)
/* Determine if an address is granule-aligned. */
/* Determine if an address is granule-aligned */
#define GPT_IS_L1_ALIGNED(_p, _pa) (((_pa) & (GPT_PGS_ACTUAL_SIZE(_p) - U(1))) \
== U(0))