From 81c272b3b71af38bc5cfb10bbe5722e328a1578e Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Thu, 8 Jul 2021 16:51:14 -0500 Subject: [PATCH 01/21] feat(rme): add register definitions and helper functions for FEAT_RME This patch adds new register and bit definitions for the Armv9-A Realm Management Extension (RME) as described in the Arm document DDI0615 (https://developer.arm.com/documentation/ddi0615/latest). The patch also adds TLB maintenance functions and a function to detect the presence of RME feature. Signed-off-by: Zelalem Aweke Change-Id: I03d2af7ea41a20a9e8a362a36b8099e3b4d18a11 --- include/arch/aarch64/arch.h | 92 ++++++++++++++++++++++++++++ include/arch/aarch64/arch_features.h | 11 ++++ include/arch/aarch64/arch_helpers.h | 29 ++++++++- lib/aarch64/misc_helpers.S | 18 ++++++ 4 files changed, 148 insertions(+), 2 deletions(-) diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h index 1b3ae0221..1053006a9 100644 --- a/include/arch/aarch64/arch.h +++ b/include/arch/aarch64/arch.h @@ -182,6 +182,11 @@ #define ID_AA64PFR0_CSV2_SHIFT U(56) #define ID_AA64PFR0_CSV2_MASK ULL(0xf) #define ID_AA64PFR0_CSV2_LENGTH U(4) +#define ID_AA64PFR0_FEAT_RME_SHIFT U(52) +#define ID_AA64PFR0_FEAT_RME_MASK ULL(0xf) +#define ID_AA64PFR0_FEAT_RME_LENGTH U(4) +#define ID_AA64PFR0_FEAT_RME_NOT_SUPPORTED U(0) +#define ID_AA64PFR0_FEAT_RME_V1 U(1) /* Exception level handling */ #define EL_IMPL_NONE ULL(0) @@ -432,6 +437,9 @@ /* SCR definitions */ #define SCR_RES1_BITS ((U(1) << 4) | (U(1) << 5)) +#define SCR_NSE_SHIFT U(62) +#define SCR_NSE_BIT (ULL(1) << SCR_NSE_SHIFT) +#define SCR_GPF_BIT (UL(1) << 48) #define SCR_TWEDEL_SHIFT U(30) #define SCR_TWEDEL_MASK ULL(0xf) #define SCR_HXEn_BIT (UL(1) << 38) @@ -1092,6 +1100,90 @@ #define AMEVCNTVOFF1E_EL2 S3_4_C13_C11_6 #define AMEVCNTVOFF1F_EL2 S3_4_C13_C11_7 +/******************************************************************************* + * Realm management extension register definitions + ******************************************************************************/ + +/* GPCCR_EL3 definitions */ +#define GPCCR_EL3 S3_6_C2_C1_6 + +/* Least significant address bits protected by each entry in level 0 GPT */ +#define GPCCR_L0GPTSZ_SHIFT U(20) +#define GPCCR_L0GPTSZ_MASK U(0xF) +#define GPCCR_L0GPTSZ_30BITS U(0x0) +#define GPCCR_L0GPTSZ_34BITS U(0x4) +#define GPCCR_L0GPTSZ_36BITS U(0x6) +#define GPCCR_L0GPTSZ_39BITS U(0x9) +#define SET_GPCCR_L0GPTSZ(x) \ + ((x & GPCCR_L0GPTSZ_MASK) << GPCCR_L0GPTSZ_SHIFT) + +/* Granule protection check priority bit definitions */ +#define GPCCR_GPCP_SHIFT U(17) +#define GPCCR_GPCP_BIT (ULL(1) << GPCCR_EL3_GPCP_SHIFT) + +/* Granule protection check bit definitions */ +#define GPCCR_GPC_SHIFT U(16) +#define GPCCR_GPC_BIT (ULL(1) << GPCCR_GPC_SHIFT) + +/* Physical granule size bit definitions */ +#define GPCCR_PGS_SHIFT U(14) +#define GPCCR_PGS_MASK U(0x3) +#define GPCCR_PGS_4K U(0x0) +#define GPCCR_PGS_16K U(0x2) +#define GPCCR_PGS_64K U(0x1) +#define SET_GPCCR_PGS(x) \ + ((x & GPCCR_PGS_MASK) << GPCCR_PGS_SHIFT) + +/* GPT fetch shareability attribute bit definitions */ +#define GPCCR_SH_SHIFT U(12) +#define GPCCR_SH_MASK U(0x3) +#define GPCCR_SH_NS U(0x0) +#define GPCCR_SH_OS U(0x2) +#define GPCCR_SH_IS U(0x3) +#define SET_GPCCR_SH(x) \ + ((x & GPCCR_SH_MASK) << GPCCR_SH_SHIFT) + +/* GPT fetch outer cacheability attribute bit definitions */ +#define GPCCR_ORGN_SHIFT U(10) +#define GPCCR_ORGN_MASK U(0x3) +#define GPCCR_ORGN_NC U(0x0) +#define GPCCR_ORGN_WB_RA_WA U(0x1) +#define GPCCR_ORGN_WT_RA_NWA U(0x2) +#define GPCCR_ORGN_WB_RA_NWA U(0x3) +#define SET_GPCCR_ORGN(x) \ + ((x & GPCCR_ORGN_MASK) << GPCCR_ORGN_SHIFT) + +/* GPT fetch inner cacheability attribute bit definitions */ +#define GPCCR_IRGN_SHIFT U(8) +#define GPCCR_IRGN_MASK U(0x3) +#define GPCCR_IRGN_NC U(0x0) +#define GPCCR_IRGN_WB_RA_WA U(0x1) +#define GPCCR_IRGN_WT_RA_NWA U(0x2) +#define GPCCR_IRGN_WB_RA_NWA U(0x3) +#define SET_GPCCR_IRGN(x) \ + ((x & GPCCR_IRGN_MASK) << GPCCR_IRGN_SHIFT) + +/* Protected physical address size bit definitions */ +#define GPCCR_PPS_SHIFT U(0) +#define GPCCR_PPS_MASK U(0x7) +#define GPCCR_PPS_4GB U(0x0) +#define GPCCR_PPS_64GB U(0x1) +#define GPCCR_PPS_1TB U(0x2) +#define GPCCR_PPS_4TB U(0x3) +#define GPCCR_PPS_16TB U(0x4) +#define GPCCR_PPS_256TB U(0x5) +#define GPCCR_PPS_4PB U(0x6) +#define SET_GPCCR_PPS(x) \ + ((x & GPCCR_PPS_MASK) << GPCCR_PPS_SHIFT) + +/* GPTBR_EL3 definitions */ +#define GPTBR_EL3 S3_6_C2_C1_4 + +/* Base Address for the GPT bit definitions */ +#define GPTBR_BADDR_SHIFT U(0) +#define GPTBR_BADDR_VAL_SHIFT U(12) +#define GPTBR_BADDR_MASK ULL(0xffffffffff) + /******************************************************************************* * RAS system registers ******************************************************************************/ diff --git a/include/arch/aarch64/arch_features.h b/include/arch/aarch64/arch_features.h index 3ff67e571..46cd1c982 100644 --- a/include/arch/aarch64/arch_features.h +++ b/include/arch/aarch64/arch_features.h @@ -123,4 +123,15 @@ static inline bool is_feat_hcx_present(void) ID_AA64MMFR1_EL1_HCX_MASK) == ID_AA64MMFR1_EL1_HCX_SUPPORTED); } +static inline unsigned int get_armv9_2_feat_rme_support(void) +{ + /* + * Return the RME version, zero if not supported. This function can be + * used as both an integer value for the RME version or compared to zero + * to detect RME presence. + */ + return (unsigned int)(read_id_aa64pfr0_el1() >> + ID_AA64PFR0_FEAT_RME_SHIFT) & ID_AA64PFR0_FEAT_RME_MASK; +} + #endif /* ARCH_FEATURES_H */ diff --git a/include/arch/aarch64/arch_helpers.h b/include/arch/aarch64/arch_helpers.h index 72b87c8b9..1aadf0b8d 100644 --- a/include/arch/aarch64/arch_helpers.h +++ b/include/arch/aarch64/arch_helpers.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -540,6 +540,10 @@ DEFINE_RENAME_SYSREG_RW_FUNCS(hcrx_el2, HCRX_EL2) /* DynamIQ Shared Unit power management */ DEFINE_RENAME_SYSREG_RW_FUNCS(clusterpwrdn_el1, CLUSTERPWRDN_EL1) +/* Armv9.2 RME Registers */ +DEFINE_RENAME_SYSREG_RW_FUNCS(gptbr_el3, GPTBR_EL3) +DEFINE_RENAME_SYSREG_RW_FUNCS(gpccr_el3, GPCCR_EL3) + #define IS_IN_EL(x) \ (GET_EL(read_CurrentEl()) == MODE_EL##x) @@ -583,7 +587,28 @@ static inline uint64_t el_implemented(unsigned int el) } } -/* Previously defined accesor functions with incomplete register names */ +/* + * TLBIPAALLOS instruction + * (TLB Inivalidate GPT Information by PA, + * All Entries, Outer Shareable) + */ +static inline void tlbipaallos(void) +{ + __asm__("SYS #6,c8,c1,#4"); +} + +/* + * Invalidate cached copies of GPT entries + * from TLBs by physical address + * + * @pa: the starting address for the range + * of invalidation + * @size: size of the range of invalidation + */ +void gpt_tlbi_by_pa(uint64_t pa, size_t size); + + +/* Previously defined accessor functions with incomplete register names */ #define read_current_el() read_CurrentEl() diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S index b6f6c9d88..cc5c5759d 100644 --- a/lib/aarch64/misc_helpers.S +++ b/lib/aarch64/misc_helpers.S @@ -15,6 +15,7 @@ .globl zero_normalmem .globl zeromem .globl memcpy16 + .globl gpt_tlbi_by_pa .globl disable_mmu_el1 .globl disable_mmu_el3 @@ -592,3 +593,20 @@ func fixup_gdt_reloc b.lo 1b ret endfunc fixup_gdt_reloc + +/* + * TODO: Currently only supports size of 4KB, + * support other sizes as well. + */ +func gpt_tlbi_by_pa +#if ENABLE_ASSERTIONS + cmp x1, #PAGE_SIZE_4KB + ASM_ASSERT(eq) + tst x0, #(PAGE_SIZE_MASK) + ASM_ASSERT(eq) +#endif + lsr x0, x0, #FOUR_KB_SHIFT /* 4KB size encoding is zero */ + sys #6, c8, c4, #3, x0 /* TLBI RPAOS, */ + dsb sy + ret +endfunc gpt_tlbi_by_pa From 4693ff7225faadc5ad1bcd1c2fb3fbbb8fe1aed0 Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Thu, 8 Jul 2021 17:13:09 -0500 Subject: [PATCH 02/21] feat(rme): add Realm security state definition FEAT_RME introduces two additional security states, Root and Realm security states. This patch adds Realm security state awareness to SMCCC helpers and entry point info structure. Signed-off-by: Zelalem Aweke Change-Id: I9cdefcc1aa71259b2de46e5fb62b28d658fa59bd --- bl31/aarch64/runtime_exceptions.S | 15 +++++++++++++++ include/common/ep_info.h | 11 +++++++++-- include/export/common/ep_info_exp.h | 18 +++++++++++++++--- include/lib/smccc.h | 27 ++++++++++++++++++++++++++- 4 files changed, 65 insertions(+), 6 deletions(-) diff --git a/bl31/aarch64/runtime_exceptions.S b/bl31/aarch64/runtime_exceptions.S index 51eb2bd47..0d0a12d3e 100644 --- a/bl31/aarch64/runtime_exceptions.S +++ b/bl31/aarch64/runtime_exceptions.S @@ -500,6 +500,21 @@ smc_handler64: stp x16, x17, [x6, #CTX_EL3STATE_OFFSET + CTX_SPSR_EL3] str x18, [x6, #CTX_EL3STATE_OFFSET + CTX_SCR_EL3] + /* Clear flag register */ + mov x7, xzr + +#if ENABLE_RME + /* Copy SCR_EL3.NSE bit to the flag to indicate caller's security */ + ubfx x7, x18, #SCR_NSE_SHIFT, 1 + + /* + * Shift copied SCR_EL3.NSE bit by 5 to create space for + * SCR_EL3.NS bit. Bit 5 of the flag correspondes to + * the SCR_EL3.NSE bit. + */ + lsl x7, x7, #5 +#endif /* ENABLE_RME */ + /* Copy SCR_EL3.NS bit to the flag to indicate caller's security */ bfi x7, x18, #0, #1 diff --git a/include/common/ep_info.h b/include/common/ep_info.h index 4bfa1fa6a..771572ce9 100644 --- a/include/common/ep_info.h +++ b/include/common/ep_info.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -18,14 +18,21 @@ #define SECURE EP_SECURE #define NON_SECURE EP_NON_SECURE +#define REALM EP_REALM +#if ENABLE_RME +#define sec_state_is_valid(s) (((s) == SECURE) || \ + ((s) == NON_SECURE) || \ + ((s) == REALM)) +#else #define sec_state_is_valid(s) (((s) == SECURE) || ((s) == NON_SECURE)) +#endif #define PARAM_EP_SECURITY_MASK EP_SECURITY_MASK #define NON_EXECUTABLE EP_NON_EXECUTABLE #define EXECUTABLE EP_EXECUTABLE -/* Secure or Non-secure image */ +/* Get/set security state of an image */ #define GET_SECURITY_STATE(x) ((x) & EP_SECURITY_MASK) #define SET_SECURITY_STATE(x, security) \ ((x) = ((x) & ~EP_SECURITY_MASK) | (security)) diff --git a/include/export/common/ep_info_exp.h b/include/export/common/ep_info_exp.h index 9d2969f3f..a5bd10ac8 100644 --- a/include/export/common/ep_info_exp.h +++ b/include/export/common/ep_info_exp.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -24,11 +24,23 @@ #define ENTRY_POINT_INFO_ARGS_OFFSET U(0x14) #endif -/* Security state of the image. */ -#define EP_SECURITY_MASK UL(0x1) +/* + * Security state of the image. Bit 0 and + * bit 5 are used to determine the security + * state of the image as follows: + * + * --------------------------------- + * Bit 5 | Bit 0 | Security state + * --------------------------------- + * 0 0 EP_SECURE + * 0 1 EP_NON_SECURE + * 1 1 EP_REALM + */ +#define EP_SECURITY_MASK UL(0x21) #define EP_SECURITY_SHIFT UL(0) #define EP_SECURE UL(0x0) #define EP_NON_SECURE UL(0x1) +#define EP_REALM UL(0x21) /* Endianness of the image. */ #define EP_EE_MASK U(0x2) diff --git a/include/lib/smccc.h b/include/lib/smccc.h index deaeb1d1d..1a39f24c7 100644 --- a/include/lib/smccc.h +++ b/include/lib/smccc.h @@ -108,9 +108,24 @@ #define SMC_ARCH_CALL_NOT_REQUIRED -2 #define SMC_ARCH_CALL_INVAL_PARAM -3 -/* Various flags passed to SMC handlers */ +/* + * Various flags passed to SMC handlers + * + * Bit 5 and bit 0 of the flag are used to + * determine the source security state as + * follows: + * --------------------------------- + * Bit 5 | Bit 0 | Security state + * --------------------------------- + * 0 0 SMC_FROM_SECURE + * 0 1 SMC_FROM_NON_SECURE + * 1 1 SMC_FROM_REALM + */ + #define SMC_FROM_SECURE (U(0) << 0) #define SMC_FROM_NON_SECURE (U(1) << 0) +#define SMC_FROM_REALM U(0x21) +#define SMC_FROM_MASK U(0x21) #ifndef __ASSEMBLER__ @@ -118,8 +133,18 @@ #include +#if ENABLE_RME +#define is_caller_non_secure(_f) (((_f) & SMC_FROM_MASK) \ + == SMC_FROM_NON_SECURE) +#define is_caller_secure(_f) (((_f) & SMC_FROM_MASK) \ + == SMC_FROM_SECURE) +#define is_caller_realm(_f) (((_f) & SMC_FROM_MASK) \ + == SMC_FROM_REALM) +#define caller_sec_state(_f) ((_f) & SMC_FROM_MASK) +#else /* ENABLE_RME */ #define is_caller_non_secure(_f) (((_f) & SMC_FROM_NON_SECURE) != U(0)) #define is_caller_secure(_f) (!is_caller_non_secure(_f)) +#endif /* ENABLE_RME */ /* The macro below is used to identify a Standard Service SMC call */ #define is_std_svc_call(_fid) (GET_SMC_OEN(_fid) == OEN_STD_START) From 362182386bafbda9e6671be921fa30cc20610d30 Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Thu, 8 Jul 2021 17:23:04 -0500 Subject: [PATCH 03/21] feat(rme): add xlat table library changes for FEAT_RME FEAT_RME adds a new bit (NSE) in the translation table descriptor to determine the Physical Address Space (PAS) of an EL3 stage 1 translation according to the following mapping: TTD.NSE TTD.NS | PAS ================================= 0 0 | Secure 0 1 | Non-secure 1 0 | Root 1 1 | Realm This patch adds modifications to version 2 of the translation table library accordingly. Bits 4 and 5 in mmap attribute are used to determine the PAS. Signed-off-by: Zelalem Aweke Change-Id: I82790f6900b7a1ab9494c732eac7b9808a388103 --- include/lib/xlat_tables/xlat_tables.h | 9 +++++- include/lib/xlat_tables/xlat_tables_defs.h | 3 +- include/lib/xlat_tables/xlat_tables_v2.h | 30 ++++++++++++++----- lib/xlat_tables_v2/aarch32/xlat_tables_arch.c | 17 +++++++++++ lib/xlat_tables_v2/aarch64/xlat_tables_arch.c | 29 +++++++++++++++++- lib/xlat_tables_v2/xlat_tables_core.c | 11 ++++--- lib/xlat_tables_v2/xlat_tables_private.h | 5 +++- lib/xlat_tables_v2/xlat_tables_utils.c | 18 ++++++++++- 8 files changed, 105 insertions(+), 17 deletions(-) diff --git a/include/lib/xlat_tables/xlat_tables.h b/include/lib/xlat_tables/xlat_tables.h index 082bb5e45..a15696976 100644 --- a/include/lib/xlat_tables/xlat_tables.h +++ b/include/lib/xlat_tables/xlat_tables.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -72,6 +72,13 @@ #define MT_CODE (MT_MEMORY | MT_RO | MT_EXECUTE) #define MT_RO_DATA (MT_MEMORY | MT_RO | MT_EXECUTE_NEVER) +/* Memory type for EL3 regions */ +#if ENABLE_RME +#error FEAT_RME requires version 2 of the Translation Tables Library +#else +#define EL3_PAS MT_SECURE +#endif + /* * Structure for specifying a single region of memory. */ diff --git a/include/lib/xlat_tables/xlat_tables_defs.h b/include/lib/xlat_tables/xlat_tables_defs.h index 579d8d89c..2d0949b51 100644 --- a/include/lib/xlat_tables/xlat_tables_defs.h +++ b/include/lib/xlat_tables/xlat_tables_defs.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -142,6 +142,7 @@ #define AP_NO_ACCESS_UNPRIVILEGED (AP1_NO_ACCESS_UNPRIVILEGED << 4) #define AP_ONE_VA_RANGE_RES1 (AP1_RES1 << 4) #define NS (U(0x1) << 3) +#define EL3_S1_NSE (U(0x1) << 9) #define ATTR_NON_CACHEABLE_INDEX ULL(0x2) #define ATTR_DEVICE_INDEX ULL(0x1) #define ATTR_IWBWA_OWBWA_NTR_INDEX ULL(0x0) diff --git a/include/lib/xlat_tables/xlat_tables_v2.h b/include/lib/xlat_tables/xlat_tables_v2.h index 359b9839a..69ad02764 100644 --- a/include/lib/xlat_tables/xlat_tables_v2.h +++ b/include/lib/xlat_tables/xlat_tables_v2.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -60,17 +60,22 @@ #define MT_TYPE(_attr) ((_attr) & MT_TYPE_MASK) /* Access permissions (RO/RW) */ #define MT_PERM_SHIFT U(3) -/* Security state (SECURE/NS) */ -#define MT_SEC_SHIFT U(4) + +/* Physical address space (SECURE/NS/Root/Realm) */ +#define MT_PAS_SHIFT U(4) +#define MT_PAS_MASK (U(3) << MT_PAS_SHIFT) +#define MT_PAS(_attr) ((_attr) & MT_PAS_MASK) + /* Access permissions for instruction execution (EXECUTE/EXECUTE_NEVER) */ -#define MT_EXECUTE_SHIFT U(5) +#define MT_EXECUTE_SHIFT U(6) /* In the EL1&0 translation regime, User (EL0) or Privileged (EL1). */ -#define MT_USER_SHIFT U(6) +#define MT_USER_SHIFT U(7) /* Shareability attribute for the memory region */ -#define MT_SHAREABILITY_SHIFT U(7) +#define MT_SHAREABILITY_SHIFT U(8) #define MT_SHAREABILITY_MASK (U(3) << MT_SHAREABILITY_SHIFT) #define MT_SHAREABILITY(_attr) ((_attr) & MT_SHAREABILITY_MASK) + /* All other bits are reserved */ /* @@ -91,8 +96,10 @@ #define MT_RO (U(0) << MT_PERM_SHIFT) #define MT_RW (U(1) << MT_PERM_SHIFT) -#define MT_SECURE (U(0) << MT_SEC_SHIFT) -#define MT_NS (U(1) << MT_SEC_SHIFT) +#define MT_SECURE (U(0) << MT_PAS_SHIFT) +#define MT_NS (U(1) << MT_PAS_SHIFT) +#define MT_ROOT (U(2) << MT_PAS_SHIFT) +#define MT_REALM (U(3) << MT_PAS_SHIFT) /* * Access permissions for instruction execution are only relevant for normal @@ -149,6 +156,13 @@ typedef struct mmap_region { #define EL3_REGIME 3 #define EL_REGIME_INVALID -1 +/* Memory type for EL3 regions. With RME, EL3 is in ROOT PAS */ +#if ENABLE_RME +#define EL3_PAS MT_ROOT +#else +#define EL3_PAS MT_SECURE +#endif /* ENABLE_RME */ + /* * Declare the translation context type. * Its definition is private. diff --git a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c index b69c6702b..ed6383751 100644 --- a/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch32/xlat_tables_arch.c @@ -39,6 +39,23 @@ size_t xlat_arch_get_max_supported_granule_size(void) return PAGE_SIZE_4KB; } +/* + * Determine the physical address space encoded in the 'attr' parameter. + * + * The physical address will fall into one of two spaces; secure or + * nonsecure. + */ +uint32_t xlat_arch_get_pas(uint32_t attr) +{ + uint32_t pas = MT_PAS(attr); + + if (pas == MT_NS) { + return LOWER_ATTRS(NS); + } else { /* MT_SECURE */ + return 0U; + } +} + #if ENABLE_ASSERTIONS unsigned long long xlat_arch_get_max_supported_pa(void) { diff --git a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c index 3832b0703..719110a0e 100644 --- a/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c +++ b/lib/xlat_tables_v2/aarch64/xlat_tables_arch.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -53,6 +53,33 @@ size_t xlat_arch_get_max_supported_granule_size(void) } } +/* + * Determine the physical address space encoded in the 'attr' parameter. + * + * The physical address will fall into one of four spaces; secure, + * nonsecure, root, or realm if RME is enabled, or one of two spaces; + * secure and nonsecure otherwise. + */ +uint32_t xlat_arch_get_pas(uint32_t attr) +{ + uint32_t pas = MT_PAS(attr); + + switch (pas) { +#if ENABLE_RME + /* TTD.NSE = 1 and TTD.NS = 1 for Realm PAS */ + case MT_REALM: + return LOWER_ATTRS(EL3_S1_NSE | NS); + /* TTD.NSE = 1 and TTD.NS = 0 for Root PAS */ + case MT_ROOT: + return LOWER_ATTRS(EL3_S1_NSE); +#endif + case MT_NS: + return LOWER_ATTRS(NS); + default: /* MT_SECURE */ + return 0U; + } +} + unsigned long long tcr_physical_addr_size_bits(unsigned long long max_addr) { /* Physical address can't exceed 48 bits */ diff --git a/lib/xlat_tables_v2/xlat_tables_core.c b/lib/xlat_tables_v2/xlat_tables_core.c index bb6d18459..de5718454 100644 --- a/lib/xlat_tables_v2/xlat_tables_core.c +++ b/lib/xlat_tables_v2/xlat_tables_core.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -125,11 +125,14 @@ uint64_t xlat_desc(const xlat_ctx_t *ctx, uint32_t attr, * faults aren't managed. */ desc |= LOWER_ATTRS(ACCESS_FLAG); + + /* Determine the physical address space this region belongs to. */ + desc |= xlat_arch_get_pas(attr); + /* - * Deduce other fields of the descriptor based on the MT_NS and MT_RW - * memory region attributes. + * Deduce other fields of the descriptor based on the MT_RW memory + * region attributes. */ - desc |= ((attr & MT_NS) != 0U) ? LOWER_ATTRS(NS) : 0U; desc |= ((attr & MT_RW) != 0U) ? LOWER_ATTRS(AP_RW) : LOWER_ATTRS(AP_RO); /* diff --git a/lib/xlat_tables_v2/xlat_tables_private.h b/lib/xlat_tables_v2/xlat_tables_private.h index 863470cf3..42c9a43ea 100644 --- a/lib/xlat_tables_v2/xlat_tables_private.h +++ b/lib/xlat_tables_v2/xlat_tables_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -40,6 +40,9 @@ extern uint64_t mmu_cfg_params[MMU_CFG_PARAM_MAX]; +/* Determine the physical address space encoded in the 'attr' parameter. */ +uint32_t xlat_arch_get_pas(uint32_t attr); + /* * Return the execute-never mask that will prevent instruction fetch at the * given translation regime. diff --git a/lib/xlat_tables_v2/xlat_tables_utils.c b/lib/xlat_tables_v2/xlat_tables_utils.c index 9fae7e917..df1738642 100644 --- a/lib/xlat_tables_v2/xlat_tables_utils.c +++ b/lib/xlat_tables_v2/xlat_tables_utils.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -95,7 +95,23 @@ static void xlat_desc_print(const xlat_ctx_t *ctx, uint64_t desc) ? "-USER" : "-PRIV"); } +#if ENABLE_RME + switch (desc & LOWER_ATTRS(EL3_S1_NSE | NS)) { + case 0ULL: + printf("-S"); + break; + case LOWER_ATTRS(NS): + printf("-NS"); + break; + case LOWER_ATTRS(EL3_S1_NSE): + printf("-RT"); + break; + default: /* LOWER_ATTRS(EL3_S1_NSE | NS) */ + printf("-RL"); + } +#else printf(((LOWER_ATTRS(NS) & desc) != 0ULL) ? "-NS" : "-S"); +#endif #ifdef __aarch64__ /* Check Guarded Page bit */ From 6c09af9f8b36cdfa1dc4d5052f7e4792f63fa88a Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Fri, 9 Jul 2021 11:37:10 -0500 Subject: [PATCH 04/21] feat(rme): run BL2 in root world when FEAT_RME is enabled This patch enables BL2 to run in root world (EL3) which is needed as per the security model of RME-enabled systems. Using the existing BL2_AT_EL3 TF-A build option is not convenient because that option assumes TF-A BL1 doesn't exist, which is not the case for RME-enabled systems. For the purposes of RME, we use a normal BL1 image but we also want to run BL2 in EL3 as normally as possible, therefore rather than use the special bl2_entrypoint function in bl2_el3_entrypoint.S, we use a new bl2_entrypoint function (in bl2_rme_entrypoint.S) which doesn't need reset or mailbox initialization code seen in the el3_entrypoint_common macro. The patch also cleans up bl2_el3_entrypoint.S, moving the bl2_run_next_image function to its own file to avoid duplicating code. Signed-off-by: Zelalem Aweke Change-Id: I99821b4cd550cadcb701f4c0c4dc36da81c7ef55 --- bl1/aarch64/bl1_context_mgmt.c | 38 +++++++++++++- bl1/aarch64/bl1_entrypoint.S | 40 +++++++++++++- bl1/bl1_private.h | 5 +- bl2/aarch32/bl2_el3_entrypoint.S | 37 +------------ bl2/aarch32/bl2_run_next_image.S | 46 ++++++++++++++++ bl2/aarch64/bl2_el3_entrypoint.S | 37 +------------ bl2/aarch64/bl2_rme_entrypoint.S | 67 ++++++++++++++++++++++++ bl2/aarch64/bl2_run_next_image.S | 45 ++++++++++++++++ bl2/bl2.ld.S | 6 ++- bl2/bl2.mk | 15 +++++- bl2/bl2_main.c | 53 +++++++++---------- include/arch/aarch64/el3_common_macros.S | 14 ++++- lib/aarch64/misc_helpers.S | 3 +- 13 files changed, 298 insertions(+), 108 deletions(-) create mode 100644 bl2/aarch32/bl2_run_next_image.S create mode 100644 bl2/aarch64/bl2_rme_entrypoint.S create mode 100644 bl2/aarch64/bl2_run_next_image.S diff --git a/bl1/aarch64/bl1_context_mgmt.c b/bl1/aarch64/bl1_context_mgmt.c index 2a8d58efd..b9a7e5ba6 100644 --- a/bl1/aarch64/bl1_context_mgmt.c +++ b/bl1/aarch64/bl1_context_mgmt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -16,6 +16,7 @@ /* Following contains the cpu context pointers. */ static void *bl1_cpu_context_ptr[2]; +entry_point_info_t *bl2_ep_info; void *cm_get_context(uint32_t security_state) @@ -30,6 +31,40 @@ void cm_set_context(void *context, uint32_t security_state) bl1_cpu_context_ptr[security_state] = context; } +#if ENABLE_RME +/******************************************************************************* + * This function prepares the entry point information to run BL2 in Root world, + * i.e. EL3, for the case when FEAT_RME is enabled. + ******************************************************************************/ +void bl1_prepare_next_image(unsigned int image_id) +{ + image_desc_t *bl2_desc; + + assert(image_id == BL2_IMAGE_ID); + + /* Get the image descriptor. */ + bl2_desc = bl1_plat_get_image_desc(BL2_IMAGE_ID); + assert(bl2_desc != NULL); + + /* Get the entry point info. */ + bl2_ep_info = &bl2_desc->ep_info; + + bl2_ep_info->spsr = (uint32_t)SPSR_64(MODE_EL3, MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS); + + /* + * Flush cache since bl2_ep_info is accessed after MMU is disabled + * before jumping to BL2. + */ + flush_dcache_range((uintptr_t)bl2_ep_info, sizeof(entry_point_info_t)); + + /* Indicate that image is in execution state. */ + bl2_desc->state = IMAGE_STATE_EXECUTED; + + /* Print debug info and flush the console before running BL2. */ + print_entry_point_info(bl2_ep_info); +} +#else /******************************************************************************* * This function prepares the context for Secure/Normal world images. * Normal world images are transitioned to EL2(if supported) else EL1. @@ -93,3 +128,4 @@ void bl1_prepare_next_image(unsigned int image_id) print_entry_point_info(next_bl_ep); } +#endif /* ENABLE_RME */ diff --git a/bl1/aarch64/bl1_entrypoint.S b/bl1/aarch64/bl1_entrypoint.S index 00f27184d..f61c06023 100644 --- a/bl1/aarch64/bl1_entrypoint.S +++ b/bl1/aarch64/bl1_entrypoint.S @@ -1,13 +1,15 @@ /* - * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include +#include #include .globl bl1_entrypoint + .globl bl1_run_bl2_in_root /* ----------------------------------------------------- @@ -66,5 +68,41 @@ func bl1_entrypoint * Do the transition to next boot image. * -------------------------------------------------- */ +#if ENABLE_RME + b bl1_run_bl2_in_root +#else b el3_exit +#endif endfunc bl1_entrypoint + + /* ----------------------------------------------------- + * void bl1_run_bl2_in_root(); + * This function runs BL2 in root/EL3 when RME is enabled. + * ----------------------------------------------------- + */ + +func bl1_run_bl2_in_root + /* read bl2_ep_info */ + adrp x20, bl2_ep_info + add x20, x20, :lo12:bl2_ep_info + ldr x20, [x20] + + /* --------------------------------------------- + * MMU needs to be disabled because BL2 executes + * in EL3. It will initialize the address space + * according to its own requirements. + * --------------------------------------------- + */ + bl disable_mmu_icache_el3 + tlbi alle3 + + ldp x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET] + msr elr_el3, x0 + msr spsr_el3, x1 + + ldp x6, x7, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x30)] + ldp x4, x5, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x20)] + ldp x2, x3, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x10)] + ldp x0, x1, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x0)] + exception_return +endfunc bl1_run_bl2_in_root diff --git a/bl1/bl1_private.h b/bl1/bl1_private.h index 2cfeeea28..e119ba727 100644 --- a/bl1/bl1_private.h +++ b/bl1/bl1_private.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -11,6 +11,8 @@ #include +extern entry_point_info_t *bl2_ep_info; + /****************************************** * Function prototypes *****************************************/ @@ -18,6 +20,7 @@ void bl1_arch_setup(void); void bl1_arch_next_el_setup(void); void bl1_prepare_next_image(unsigned int image_id); +void bl1_run_bl2_in_root(void); u_register_t bl1_fwu_smc_handler(unsigned int smc_fid, u_register_t x1, diff --git a/bl2/aarch32/bl2_el3_entrypoint.S b/bl2/aarch32/bl2_el3_entrypoint.S index 7e855516d..40154aad6 100644 --- a/bl2/aarch32/bl2_el3_entrypoint.S +++ b/bl2/aarch32/bl2_el3_entrypoint.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2021, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -10,7 +10,6 @@ #include .globl bl2_entrypoint - .globl bl2_run_next_image func bl2_entrypoint @@ -56,37 +55,3 @@ func bl2_entrypoint no_ret plat_panic_handler endfunc bl2_entrypoint - -func bl2_run_next_image - mov r8,r0 - - /* - * MMU needs to be disabled because both BL2 and BL32 execute - * in PL1, and therefore share the same address space. - * BL32 will initialize the address space according to its - * own requirement. - */ - bl disable_mmu_icache_secure - stcopr r0, TLBIALL - dsb sy - isb - mov r0, r8 - bl bl2_el3_plat_prepare_exit - - /* - * Extract PC and SPSR based on struct `entry_point_info_t` - * and load it in LR and SPSR registers respectively. - */ - ldr lr, [r8, #ENTRY_POINT_INFO_PC_OFFSET] - ldr r1, [r8, #(ENTRY_POINT_INFO_PC_OFFSET + 4)] - msr spsr_xc, r1 - - /* Some BL32 stages expect lr_svc to provide the BL33 entry address */ - cps #MODE32_svc - ldr lr, [r8, #ENTRY_POINT_INFO_LR_SVC_OFFSET] - cps #MODE32_mon - - add r8, r8, #ENTRY_POINT_INFO_ARGS_OFFSET - ldm r8, {r0, r1, r2, r3} - exception_return -endfunc bl2_run_next_image diff --git a/bl2/aarch32/bl2_run_next_image.S b/bl2/aarch32/bl2_run_next_image.S new file mode 100644 index 000000000..0b3554edc --- /dev/null +++ b/bl2/aarch32/bl2_run_next_image.S @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include + + .globl bl2_run_next_image + + +func bl2_run_next_image + mov r8,r0 + + /* + * MMU needs to be disabled because both BL2 and BL32 execute + * in PL1, and therefore share the same address space. + * BL32 will initialize the address space according to its + * own requirement. + */ + bl disable_mmu_icache_secure + stcopr r0, TLBIALL + dsb sy + isb + mov r0, r8 + bl bl2_el3_plat_prepare_exit + + /* + * Extract PC and SPSR based on struct `entry_point_info_t` + * and load it in LR and SPSR registers respectively. + */ + ldr lr, [r8, #ENTRY_POINT_INFO_PC_OFFSET] + ldr r1, [r8, #(ENTRY_POINT_INFO_PC_OFFSET + 4)] + msr spsr_xc, r1 + + /* Some BL32 stages expect lr_svc to provide the BL33 entry address */ + cps #MODE32_svc + ldr lr, [r8, #ENTRY_POINT_INFO_LR_SVC_OFFSET] + cps #MODE32_mon + + add r8, r8, #ENTRY_POINT_INFO_ARGS_OFFSET + ldm r8, {r0, r1, r2, r3} + exception_return +endfunc bl2_run_next_image diff --git a/bl2/aarch64/bl2_el3_entrypoint.S b/bl2/aarch64/bl2_el3_entrypoint.S index 4eab39cd3..45bac7da1 100644 --- a/bl2/aarch64/bl2_el3_entrypoint.S +++ b/bl2/aarch64/bl2_el3_entrypoint.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2017-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -12,8 +12,6 @@ #include .globl bl2_entrypoint - .globl bl2_el3_run_image - .globl bl2_run_next_image #if BL2_IN_XIP_MEM #define FIXUP_SIZE 0 @@ -72,36 +70,3 @@ func bl2_entrypoint */ no_ret plat_panic_handler endfunc bl2_entrypoint - -func bl2_run_next_image - mov x20,x0 - /* --------------------------------------------- - * MMU needs to be disabled because both BL2 and BL31 execute - * in EL3, and therefore share the same address space. - * BL31 will initialize the address space according to its - * own requirement. - * --------------------------------------------- - */ - bl disable_mmu_icache_el3 - tlbi alle3 - bl bl2_el3_plat_prepare_exit - -#if ENABLE_PAUTH - /* --------------------------------------------- - * Disable pointer authentication before jumping - * to next boot image. - * --------------------------------------------- - */ - bl pauth_disable_el3 -#endif /* ENABLE_PAUTH */ - - ldp x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET] - msr elr_el3, x0 - msr spsr_el3, x1 - - ldp x6, x7, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x30)] - ldp x4, x5, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x20)] - ldp x2, x3, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x10)] - ldp x0, x1, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x0)] - exception_return -endfunc bl2_run_next_image diff --git a/bl2/aarch64/bl2_rme_entrypoint.S b/bl2/aarch64/bl2_rme_entrypoint.S new file mode 100644 index 000000000..076e3267d --- /dev/null +++ b/bl2/aarch64/bl2_rme_entrypoint.S @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include + +#include +#include +#include +#include + + .globl bl2_entrypoint + + +func bl2_entrypoint + /* Save arguments x0-x3 from previous Boot loader */ + mov x20, x0 + mov x21, x1 + mov x22, x2 + mov x23, x3 + + el3_entrypoint_common \ + _init_sctlr=0 \ + _warm_boot_mailbox=0 \ + _secondary_cold_boot=0 \ + _init_memory=0 \ + _init_c_runtime=1 \ + _exception_vectors=bl2_el3_exceptions \ + _pie_fixup_size=0 + + /* --------------------------------------------- + * Restore parameters of boot rom + * --------------------------------------------- + */ + mov x0, x20 + mov x1, x21 + mov x2, x22 + mov x3, x23 + + /* --------------------------------------------- + * Perform BL2 setup + * --------------------------------------------- + */ + bl bl2_setup + +#if ENABLE_PAUTH + /* --------------------------------------------- + * Program APIAKey_EL1 and enable pointer authentication. + * --------------------------------------------- + */ + bl pauth_init_enable_el3 +#endif /* ENABLE_PAUTH */ + + /* --------------------------------------------- + * Jump to main function. + * --------------------------------------------- + */ + bl bl2_main + + /* --------------------------------------------- + * Should never reach this point. + * --------------------------------------------- + */ + no_ret plat_panic_handler +endfunc bl2_entrypoint diff --git a/bl2/aarch64/bl2_run_next_image.S b/bl2/aarch64/bl2_run_next_image.S new file mode 100644 index 000000000..f0a8be87a --- /dev/null +++ b/bl2/aarch64/bl2_run_next_image.S @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include + + .globl bl2_run_next_image + + +func bl2_run_next_image + mov x20,x0 + /* --------------------------------------------- + * MMU needs to be disabled because both BL2 and BL31 execute + * in EL3, and therefore share the same address space. + * BL31 will initialize the address space according to its + * own requirement. + * --------------------------------------------- + */ + bl disable_mmu_icache_el3 + tlbi alle3 + bl bl2_el3_plat_prepare_exit + +#if ENABLE_PAUTH + /* --------------------------------------------- + * Disable pointer authentication before jumping + * to next boot image. + * --------------------------------------------- + */ + bl pauth_disable_el3 +#endif /* ENABLE_PAUTH */ + + ldp x0, x1, [x20, #ENTRY_POINT_INFO_PC_OFFSET] + msr elr_el3, x0 + msr spsr_el3, x1 + + ldp x6, x7, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x30)] + ldp x4, x5, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x20)] + ldp x2, x3, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x10)] + ldp x0, x1, [x20, #(ENTRY_POINT_INFO_ARGS_OFFSET + 0x0)] + exception_return +endfunc bl2_run_next_image diff --git a/bl2/bl2.ld.S b/bl2/bl2.ld.S index 37849c312..d332ec069 100644 --- a/bl2/bl2.ld.S +++ b/bl2/bl2.ld.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -25,7 +25,11 @@ SECTIONS #if SEPARATE_CODE_AND_RODATA .text . : { __TEXT_START__ = .; +#if ENABLE_RME + *bl2_rme_entrypoint.o(.text*) +#else /* ENABLE_RME */ *bl2_entrypoint.o(.text*) +#endif /* ENABLE_RME */ *(SORT_BY_ALIGNMENT(.text*)) *(.vectors) . = ALIGN(PAGE_SIZE); diff --git a/bl2/bl2.mk b/bl2/bl2.mk index 735e7e04f..54c73f506 100644 --- a/bl2/bl2.mk +++ b/bl2/bl2.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # @@ -15,13 +15,24 @@ ifeq (${ARCH},aarch64) BL2_SOURCES += common/aarch64/early_exceptions.S endif -ifeq (${BL2_AT_EL3},0) +ifeq (${ENABLE_RME},1) +# Using RME, run BL2 at EL3 +BL2_SOURCES += bl2/${ARCH}/bl2_rme_entrypoint.S \ + bl2/${ARCH}/bl2_el3_exceptions.S \ + bl2/${ARCH}/bl2_run_next_image.S \ + +BL2_LINKERFILE := bl2/bl2.ld.S + +else ifeq (${BL2_AT_EL3},0) +# Normal operation, no RME, no BL2 at EL3 BL2_SOURCES += bl2/${ARCH}/bl2_entrypoint.S BL2_LINKERFILE := bl2/bl2.ld.S else +# BL2 at EL3, no RME BL2_SOURCES += bl2/${ARCH}/bl2_el3_entrypoint.S \ bl2/${ARCH}/bl2_el3_exceptions.S \ + bl2/${ARCH}/bl2_run_next_image.S \ lib/cpus/${ARCH}/cpu_helpers.S \ lib/cpus/errata_report.c diff --git a/bl2/bl2_main.c b/bl2/bl2_main.c index d2de1350d..197c057e1 100644 --- a/bl2/bl2_main.c +++ b/bl2/bl2_main.c @@ -29,31 +29,9 @@ #define NEXT_IMAGE "BL32" #endif -#if !BL2_AT_EL3 +#if BL2_AT_EL3 /******************************************************************************* - * Setup function for BL2. - ******************************************************************************/ -void bl2_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2, - u_register_t arg3) -{ - /* Perform early platform-specific setup */ - bl2_early_platform_setup2(arg0, arg1, arg2, arg3); - - /* Perform late platform-specific setup */ - bl2_plat_arch_setup(); - -#if CTX_INCLUDE_PAUTH_REGS - /* - * Assert that the ARMv8.3-PAuth registers are present or an access - * fault will be triggered when they are being saved or restored. - */ - assert(is_armv8_3_pauth_present()); -#endif /* CTX_INCLUDE_PAUTH_REGS */ -} - -#else /* if BL2_AT_EL3 */ -/******************************************************************************* - * Setup function for BL2 when BL2_AT_EL3=1. + * Setup function for BL2 when BL2_AT_EL3=1 ******************************************************************************/ void bl2_el3_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2, u_register_t arg3) @@ -64,6 +42,27 @@ void bl2_el3_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2, /* Perform late platform-specific setup */ bl2_el3_plat_arch_setup(); +#if CTX_INCLUDE_PAUTH_REGS + /* + * Assert that the ARMv8.3-PAuth registers are present or an access + * fault will be triggered when they are being saved or restored. + */ + assert(is_armv8_3_pauth_present()); +#endif /* CTX_INCLUDE_PAUTH_REGS */ +} +#else /* BL2_AT_EL3 */ +/******************************************************************************* + * Setup function for BL2 when BL2_AT_EL3=0 + ******************************************************************************/ +void bl2_setup(u_register_t arg0, u_register_t arg1, u_register_t arg2, + u_register_t arg3) +{ + /* Perform early platform-specific setup */ + bl2_early_platform_setup2(arg0, arg1, arg2, arg3); + + /* Perform late platform-specific setup */ + bl2_plat_arch_setup(); + #if CTX_INCLUDE_PAUTH_REGS /* * Assert that the ARMv8.3-PAuth registers are present or an access @@ -115,7 +114,7 @@ void bl2_main(void) measured_boot_finish(); #endif /* MEASURED_BOOT */ -#if !BL2_AT_EL3 +#if !BL2_AT_EL3 && !ENABLE_RME #ifndef __aarch64__ /* * For AArch32 state BL1 and BL2 share the MMU setup. @@ -140,7 +139,7 @@ void bl2_main(void) * be passed to next BL image as an argument. */ smc(BL1_SMC_RUN_IMAGE, (unsigned long)next_bl_ep_info, 0, 0, 0, 0, 0, 0); -#else /* if BL2_AT_EL3 */ +#else /* if BL2_AT_EL3 || ENABLE_RME */ NOTICE("BL2: Booting " NEXT_IMAGE "\n"); print_entry_point_info(next_bl_ep_info); console_flush(); @@ -153,5 +152,5 @@ void bl2_main(void) #endif /* ENABLE_PAUTH */ bl2_run_next_image(next_bl_ep_info); -#endif /* BL2_AT_EL3 */ +#endif /* BL2_AT_EL3 && ENABLE_RME */ } diff --git a/include/arch/aarch64/el3_common_macros.S b/include/arch/aarch64/el3_common_macros.S index d4965841f..7d6a9638d 100644 --- a/include/arch/aarch64/el3_common_macros.S +++ b/include/arch/aarch64/el3_common_macros.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -87,6 +87,13 @@ * do so. */ orr x0, x0, #(SCR_API_BIT | SCR_APK_BIT) +#endif +#if ENABLE_RME + /* + * TODO: Settting the EEL2 bit to allow EL3 access to secure only registers + * in context management. This will need to be refactored. + */ + orr x0, x0, #SCR_EEL2_BIT #endif msr scr_el3, x0 @@ -365,6 +372,7 @@ msr vbar_el3, x0 isb +#if !(defined(IMAGE_BL2) && ENABLE_RME) /* --------------------------------------------------------------------- * It is a cold boot. * Perform any processor specific actions upon reset e.g. cache, TLB @@ -372,6 +380,7 @@ * --------------------------------------------------------------------- */ bl reset_handler +#endif el3_arch_init_common @@ -414,7 +423,8 @@ * --------------------------------------------------------------------- */ .if \_init_c_runtime -#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3 && BL2_INV_DCACHE) +#if defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \ + ((BL2_AT_EL3 && BL2_INV_DCACHE) || ENABLE_RME)) /* ------------------------------------------------------------- * Invalidate the RW memory used by the BL31 image. This * includes the data and NOBITS sections. This is done to diff --git a/lib/aarch64/misc_helpers.S b/lib/aarch64/misc_helpers.S index cc5c5759d..6e4d1fc30 100644 --- a/lib/aarch64/misc_helpers.S +++ b/lib/aarch64/misc_helpers.S @@ -163,7 +163,8 @@ func zeromem_dczva * Check for M bit (MMU enabled) of the current SCTLR_EL(1|3) * register value and panic if the MMU is disabled. */ -#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && BL2_AT_EL3) +#if defined(IMAGE_BL1) || defined(IMAGE_BL31) || (defined(IMAGE_BL2) && \ + (BL2_AT_EL3 || ENABLE_RME)) mrs tmp1, sctlr_el3 #else mrs tmp1, sctlr_el1 From 77c2775323a5ff8b77230f05c0cc57f830e9f153 Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Fri, 9 Jul 2021 14:20:03 -0500 Subject: [PATCH 05/21] feat(rme): add RMM dispatcher (RMMD) This patch introduces the RMM dispatcher into BL31. This will be the mechanism that will enable communication to take place between the Realm and non-secure world. Currently gives the capability for granules to be transitioned from non-secure type to realm and vice versa. Signed-off-by: Zelalem Aweke Signed-off-by: Subhasish Ghosh Change-Id: I1fdc99a4bdd42bc14911aa0c6954b131de309511 --- bl31/bl31.mk | 4 + include/arch/aarch64/arch.h | 2 + include/services/gtsi_svc.h | 40 +++ include/services/rmi_svc.h | 64 ++++ include/services/rmmd_svc.h | 34 ++ services/arm_arch_svc/arm_arch_svc_setup.c | 26 +- services/std_svc/rmmd/aarch64/rmmd_helpers.S | 73 ++++ services/std_svc/rmmd/rmmd.mk | 13 + services/std_svc/rmmd/rmmd_initial_context.h | 33 ++ services/std_svc/rmmd/rmmd_main.c | 341 +++++++++++++++++++ services/std_svc/rmmd/rmmd_private.h | 64 ++++ services/std_svc/std_svc_setup.c | 12 + 12 files changed, 705 insertions(+), 1 deletion(-) create mode 100644 include/services/gtsi_svc.h create mode 100644 include/services/rmi_svc.h create mode 100644 include/services/rmmd_svc.h create mode 100644 services/std_svc/rmmd/aarch64/rmmd_helpers.S create mode 100644 services/std_svc/rmmd/rmmd.mk create mode 100644 services/std_svc/rmmd/rmmd_initial_context.h create mode 100644 services/std_svc/rmmd/rmmd_main.c create mode 100644 services/std_svc/rmmd/rmmd_private.h diff --git a/bl31/bl31.mk b/bl31/bl31.mk index 7819141ef..ce0f69b7d 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -111,6 +111,10 @@ ifeq ($(SMC_PCI_SUPPORT),1) BL31_SOURCES += services/std_svc/pci_svc.c endif +ifeq (${ENABLE_RME},1) +BL31_SOURCES += ${RMMD_SOURCES} +endif + BL31_LINKERFILE := bl31/bl31.ld.S # Flag used to indicate if Crash reporting via console should be included diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h index 1053006a9..5949370e0 100644 --- a/include/arch/aarch64/arch.h +++ b/include/arch/aarch64/arch.h @@ -448,6 +448,7 @@ #define SCR_ECVEN_BIT (UL(1) << 28) #define SCR_FGTEN_BIT (UL(1) << 27) #define SCR_ATA_BIT (UL(1) << 26) +#define SCR_EnSCXT_BIT (UL(1) << 25) #define SCR_FIEN_BIT (UL(1) << 21) #define SCR_EEL2_BIT (UL(1) << 18) #define SCR_API_BIT (UL(1) << 17) @@ -609,6 +610,7 @@ #define SPSR_M_MASK U(0x1) #define SPSR_M_AARCH64 U(0x0) #define SPSR_M_AARCH32 U(0x1) +#define SPSR_M_EL2H U(0x9) #define SPSR_EL_SHIFT U(2) #define SPSR_EL_WIDTH U(2) diff --git a/include/services/gtsi_svc.h b/include/services/gtsi_svc.h new file mode 100644 index 000000000..cb942ed30 --- /dev/null +++ b/include/services/gtsi_svc.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef GTSI_SVC_H +#define GTSI_SVC_H + +/* GTSI error codes. */ +#define GTSI_SUCCESS 0 +#define GTSI_ERROR_NOT_SUPPORTED -1 +#define GTSI_ERROR_INVALID_ADDRESS -2 +#define GTSI_ERROR_INVALID_PAS -3 + +/* The macros below are used to identify GTSI calls from the SMC function ID */ +#define GTSI_FNUM_MIN_VALUE U(0x100) +#define GTSI_FNUM_MAX_VALUE U(0x101) +#define is_gtsi_fid(fid) __extension__ ({ \ + __typeof__(fid) _fid = (fid); \ + ((GET_SMC_NUM(_fid) >= GTSI_FNUM_MIN_VALUE) && \ + (GET_SMC_NUM(_fid) <= GTSI_FNUM_MAX_VALUE)); }) + +/* Get GTSI fastcall std FID from function number */ +#define GTSI_FID(smc_cc, func_num) \ + ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \ + ((smc_cc) << FUNCID_CC_SHIFT) | \ + (OEN_STD_START << FUNCID_OEN_SHIFT) | \ + ((func_num) << FUNCID_NUM_SHIFT)) + +#define GRAN_TRANS_TO_REALM_FNUM U(0x100) +#define GRAN_TRANS_TO_NS_FNUM U(0x101) + +#define SMC_ASC_MARK_REALM GTSI_FID(SMC_64, GRAN_TRANS_TO_REALM_FNUM) +#define SMC_ASC_MARK_NONSECURE GTSI_FID(SMC_64, GRAN_TRANS_TO_NS_FNUM) + +#define GRAN_TRANS_RET_BAD_ADDR -2 +#define GRAN_TRANS_RET_BAD_PAS -3 + +#endif /* GTSI_SVC_H */ diff --git a/include/services/rmi_svc.h b/include/services/rmi_svc.h new file mode 100644 index 000000000..9106f088d --- /dev/null +++ b/include/services/rmi_svc.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RMI_SVC_H +#define RMI_SVC_H + +#include +#include + +/* RMI error codes. */ +#define RMI_SUCCESS 0 +#define RMI_ERROR_NOT_SUPPORTED -1 +#define RMI_ERROR_INVALID_ADDRESS -2 +#define RMI_ERROR_INVALID_PAS -3 + +/* The macros below are used to identify RMI calls from the SMC function ID */ +#define RMI_FNUM_MIN_VALUE U(0x00) +#define RMI_FNUM_MAX_VALUE U(0x20) +#define is_rmi_fid(fid) __extension__ ({ \ + __typeof__(fid) _fid = (fid); \ + ((GET_SMC_NUM(_fid) >= RMI_FNUM_MIN_VALUE) && \ + (GET_SMC_NUM(_fid) <= RMI_FNUM_MAX_VALUE) && \ + (GET_SMC_TYPE(_fid) == SMC_TYPE_FAST) && \ + (GET_SMC_CC(_fid) == SMC_64) && \ + (GET_SMC_OEN(_fid) == OEN_ARM_START) && \ + ((_fid & 0x00FE0000) == 0U)); }) + +/* Get RMI fastcall std FID from function number */ +#define RMI_FID(smc_cc, func_num) \ + ((SMC_TYPE_FAST << FUNCID_TYPE_SHIFT) | \ + ((smc_cc) << FUNCID_CC_SHIFT) | \ + (OEN_ARM_START << FUNCID_OEN_SHIFT) | \ + ((func_num) << FUNCID_NUM_SHIFT)) + +/* + * SMC_RMM_INIT_COMPLETE is the only function in the RMI that originates from + * the Realm world and is handled by the RMMD. The remaining functions are + * always invoked by the Normal world, forwarded by RMMD and handled by the + * RMM + */ +#define RMI_FNUM_REQ_COMPLETE U(0x10) +#define RMI_FNUM_VERSION_REQ U(0x00) + +#define RMI_FNUM_GRAN_NS_REALM U(0x01) +#define RMI_FNUM_GRAN_REALM_NS U(0x10) + +/* RMI SMC64 FIDs handled by the RMMD */ +#define RMI_RMM_REQ_COMPLETE RMI_FID(SMC_64, RMI_FNUM_REQ_COMPLETE) +#define RMI_RMM_REQ_VERSION RMI_FID(SMC_64, RMI_FNUM_VERSION_REQ) + +#define RMI_RMM_GRANULE_DELEGATE RMI_FID(SMC_64, RMI_FNUM_GRAN_NS_REALM) +#define RMI_RMM_GRANULE_UNDELEGATE RMI_FID(SMC_64, RMI_FNUM_GRAN_REALM_NS) + + +#define RMI_ABI_VERSION_GET_MAJOR(_version) ((_version) >> 16) +#define RMI_ABI_VERSION_GET_MINOR(_version) ((_version) & 0xFFFF) + +/* Reserve a special value for MBZ parameters. */ +#define RMI_PARAM_MBZ U(0x0) + +#endif /* RMI_SVC_H */ diff --git a/include/services/rmmd_svc.h b/include/services/rmmd_svc.h new file mode 100644 index 000000000..132973b77 --- /dev/null +++ b/include/services/rmmd_svc.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RMMD_SVC_H +#define RMMD_SVC_H + +#ifndef __ASSEMBLER__ +#include + +int rmmd_setup(void); +uint64_t rmmd_rmi_handler(uint32_t smc_fid, + uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags); + +uint64_t rmmd_gtsi_handler(uint32_t smc_fid, + uint64_t x1, + uint64_t x2, + uint64_t x3, + uint64_t x4, + void *cookie, + void *handle, + uint64_t flags); + +#endif /* __ASSEMBLER__ */ + +#endif /* RMMD_SVC_H */ diff --git a/services/arm_arch_svc/arm_arch_svc_setup.c b/services/arm_arch_svc/arm_arch_svc_setup.c index 37bfc62e2..1d4423cb3 100644 --- a/services/arm_arch_svc/arm_arch_svc_setup.c +++ b/services/arm_arch_svc/arm_arch_svc_setup.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2018-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -11,9 +11,19 @@ #include #include #include +#include +#include #include #include +#if ENABLE_RME +/* Setup Arm architecture Services */ +static int32_t arm_arch_svc_setup(void) +{ + return rmmd_setup(); +} +#endif + static int32_t smccc_version(void) { return MAKE_SMCCC_VERSION(SMCCC_MAJOR_VERSION, SMCCC_MINOR_VERSION); @@ -133,6 +143,16 @@ static uintptr_t arm_arch_svc_smc_handler(uint32_t smc_fid, SMC_RET0(handle); #endif default: +#if ENABLE_RME + /* + * RMI functions are allocated from the Arch service range. Call + * the RMM dispatcher to handle RMI calls. + */ + if (is_rmi_fid(smc_fid)) { + return rmmd_rmi_handler(smc_fid, x1, x2, x3, x4, cookie, + handle, flags); + } +#endif WARN("Unimplemented Arm Architecture Service Call: 0x%x \n", smc_fid); SMC_RET1(handle, SMC_UNK); @@ -145,6 +165,10 @@ DECLARE_RT_SVC( OEN_ARM_START, OEN_ARM_END, SMC_TYPE_FAST, +#if ENABLE_RME + arm_arch_svc_setup, +#else NULL, +#endif arm_arch_svc_smc_handler ); diff --git a/services/std_svc/rmmd/aarch64/rmmd_helpers.S b/services/std_svc/rmmd/aarch64/rmmd_helpers.S new file mode 100644 index 000000000..6229baf4d --- /dev/null +++ b/services/std_svc/rmmd/aarch64/rmmd_helpers.S @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include "../rmmd_private.h" +#include + + .global rmmd_rmm_enter + .global rmmd_rmm_exit + + /* --------------------------------------------------------------------- + * This function is called with SP_EL0 as stack. Here we stash our EL3 + * callee-saved registers on to the stack as a part of saving the C + * runtime and enter the secure payload. + * 'x0' contains a pointer to the memory where the address of the C + * runtime context is to be saved. + * --------------------------------------------------------------------- + */ +func rmmd_rmm_enter + /* Make space for the registers that we're going to save */ + mov x3, sp + str x3, [x0, #0] + sub sp, sp, #RMMD_C_RT_CTX_SIZE + + /* Save callee-saved registers on to the stack */ + stp x19, x20, [sp, #RMMD_C_RT_CTX_X19] + stp x21, x22, [sp, #RMMD_C_RT_CTX_X21] + stp x23, x24, [sp, #RMMD_C_RT_CTX_X23] + stp x25, x26, [sp, #RMMD_C_RT_CTX_X25] + stp x27, x28, [sp, #RMMD_C_RT_CTX_X27] + stp x29, x30, [sp, #RMMD_C_RT_CTX_X29] + + /* --------------------------------------------------------------------- + * Everything is setup now. el3_exit() will use the secure context to + * restore to the general purpose and EL3 system registers to ERET + * into the secure payload. + * --------------------------------------------------------------------- + */ + b el3_exit +endfunc rmmd_rmm_enter + + /* --------------------------------------------------------------------- + * This function is called with 'x0' pointing to a C runtime context. + * It restores the saved registers and jumps to that runtime with 'x0' + * as the new SP register. This destroys the C runtime context that had + * been built on the stack below the saved context by the caller. Later + * the second parameter 'x1' is passed as a return value to the caller. + * --------------------------------------------------------------------- + */ +func rmmd_rmm_exit + /* Restore the previous stack */ + mov sp, x0 + + /* Restore callee-saved registers on to the stack */ + ldp x19, x20, [x0, #(RMMD_C_RT_CTX_X19 - RMMD_C_RT_CTX_SIZE)] + ldp x21, x22, [x0, #(RMMD_C_RT_CTX_X21 - RMMD_C_RT_CTX_SIZE)] + ldp x23, x24, [x0, #(RMMD_C_RT_CTX_X23 - RMMD_C_RT_CTX_SIZE)] + ldp x25, x26, [x0, #(RMMD_C_RT_CTX_X25 - RMMD_C_RT_CTX_SIZE)] + ldp x27, x28, [x0, #(RMMD_C_RT_CTX_X27 - RMMD_C_RT_CTX_SIZE)] + ldp x29, x30, [x0, #(RMMD_C_RT_CTX_X29 - RMMD_C_RT_CTX_SIZE)] + + /* --------------------------------------------------------------------- + * This should take us back to the instruction after the call to the + * last rmmd_rmm_enter().* Place the second parameter to x0 + * so that the caller will see it as a return value from the original + * entry call. + * --------------------------------------------------------------------- + */ + mov x0, x1 + ret +endfunc rmmd_rmm_exit diff --git a/services/std_svc/rmmd/rmmd.mk b/services/std_svc/rmmd/rmmd.mk new file mode 100644 index 000000000..57031fe17 --- /dev/null +++ b/services/std_svc/rmmd/rmmd.mk @@ -0,0 +1,13 @@ +# +# Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +ifneq (${ARCH},aarch64) + $(error "Error: RMMD is only supported on aarch64.") +endif + +RMMD_SOURCES += $(addprefix services/std_svc/rmmd/, \ + ${ARCH}/rmmd_helpers.S \ + rmmd_main.c) diff --git a/services/std_svc/rmmd/rmmd_initial_context.h b/services/std_svc/rmmd/rmmd_initial_context.h new file mode 100644 index 000000000..d7a743d8e --- /dev/null +++ b/services/std_svc/rmmd/rmmd_initial_context.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RMMD_INITIAL_CONTEXT_H +#define RMMD_INITIAL_CONTEXT_H + +#include + +/* + * SPSR_EL2 + * M=0x9 (0b1001 EL2h) + * M[4]=0 + * DAIF=0xF Exceptions masked on entry. + * BTYPE=0 BTI not yet supported. + * SSBS=0 Not yet supported. + * IL=0 Not an illegal exception return. + * SS=0 Not single stepping. + * PAN=1 RMM shouldn't access realm memory. + * UAO=0 + * DIT=0 + * TCO=0 + * NZCV=0 + */ +#define REALM_SPSR_EL2 ( \ + SPSR_M_EL2H | \ + (0xF << SPSR_DAIF_SHIFT) | \ + SPSR_PAN_BIT \ + ) + +#endif /* RMMD_INITIAL_CONTEXT_H */ diff --git a/services/std_svc/rmmd/rmmd_main.c b/services/std_svc/rmmd/rmmd_main.c new file mode 100644 index 000000000..26a5b8464 --- /dev/null +++ b/services/std_svc/rmmd/rmmd_main.c @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "rmmd_initial_context.h" +#include "rmmd_private.h" + +/******************************************************************************* + * RMM context information. + ******************************************************************************/ +rmmd_rmm_context_t rmm_context[PLATFORM_CORE_COUNT]; + +/******************************************************************************* + * RMM entry point information. Discovered on the primary core and reused + * on secondary cores. + ******************************************************************************/ +static entry_point_info_t *rmm_ep_info; + +/******************************************************************************* + * Static function declaration. + ******************************************************************************/ +static int32_t rmm_init(void); +static uint64_t rmmd_smc_forward(uint32_t smc_fid, uint32_t src_sec_state, + uint32_t dst_sec_state, uint64_t x1, + uint64_t x2, uint64_t x3, uint64_t x4, + void *handle); + +/******************************************************************************* + * This function takes an RMM context pointer and performs a synchronous entry + * into it. + ******************************************************************************/ +uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *rmm_ctx) +{ + uint64_t rc; + + assert(rmm_ctx != NULL); + + cm_set_context(&(rmm_ctx->cpu_ctx), REALM); + + /* Save the current el1/el2 context before loading realm context. */ + cm_el1_sysregs_context_save(NON_SECURE); + cm_el2_sysregs_context_save(NON_SECURE); + + /* Restore the realm context assigned above */ + cm_el1_sysregs_context_restore(REALM); + cm_el2_sysregs_context_restore(REALM); + cm_set_next_eret_context(REALM); + + /* Enter RMM */ + rc = rmmd_rmm_enter(&rmm_ctx->c_rt_ctx); + + /* Save realm context */ + cm_el1_sysregs_context_save(REALM); + cm_el2_sysregs_context_save(REALM); + + /* Restore the el1/el2 context again. */ + cm_el1_sysregs_context_restore(NON_SECURE); + cm_el2_sysregs_context_restore(NON_SECURE); + + return rc; +} + +/******************************************************************************* + * This function returns to the place where rmmd_rmm_sync_entry() was + * called originally. + ******************************************************************************/ +__dead2 void rmmd_rmm_sync_exit(uint64_t rc) +{ + rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; + + /* Get context of the RMM in use by this CPU. */ + assert(cm_get_context(REALM) == &(ctx->cpu_ctx)); + + /* + * The RMMD must have initiated the original request through a + * synchronous entry into RMM. Jump back to the original C runtime + * context with the value of rc in x0; + */ + rmmd_rmm_exit(ctx->c_rt_ctx, rc); + + panic(); +} + +static void rmm_el2_context_init(el2_sysregs_t *regs) +{ + regs->ctx_regs[CTX_SPSR_EL2 >> 3] = REALM_SPSR_EL2; + regs->ctx_regs[CTX_SCTLR_EL2 >> 3] = SCTLR_EL2_RES1; +} + +/******************************************************************************* + * Jump to the RMM for the first time. + ******************************************************************************/ +static int32_t rmm_init(void) +{ + + uint64_t rc; + + rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; + + INFO("RMM init start.\n"); + ctx->state = RMM_STATE_RESET; + + /* Initialize RMM EL2 context. */ + rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx); + + rc = rmmd_rmm_sync_entry(ctx); + if (rc != 0ULL) { + ERROR("RMM initialisation failed 0x%llx\n", rc); + panic(); + } + + ctx->state = RMM_STATE_IDLE; + INFO("RMM init end.\n"); + + return 1; +} + +/******************************************************************************* + * Load and read RMM manifest, setup RMM. + ******************************************************************************/ +int rmmd_setup(void) +{ + uint32_t ep_attr; + unsigned int linear_id = plat_my_core_pos(); + rmmd_rmm_context_t *rmm_ctx = &rmm_context[linear_id]; + + /* Make sure RME is supported. */ + assert(get_armv9_2_feat_rme_support() != 0U); + + rmm_ep_info = bl31_plat_get_next_image_ep_info(REALM); + if (rmm_ep_info == NULL) { + WARN("No RMM image provided by BL2 boot loader, Booting " + "device without RMM initialization. SMCs destined for " + "RMM will return SMC_UNK\n"); + return -ENOENT; + } + + /* Under no circumstances will this parameter be 0 */ + assert(rmm_ep_info->pc == RMM_BASE); + + /* Initialise an entrypoint to set up the CPU context */ + ep_attr = EP_REALM; + if ((read_sctlr_el3() & SCTLR_EE_BIT) != 0U) { + ep_attr |= EP_EE_BIG; + } + + SET_PARAM_HEAD(rmm_ep_info, PARAM_EP, VERSION_1, ep_attr); + rmm_ep_info->spsr = SPSR_64(MODE_EL2, + MODE_SP_ELX, + DISABLE_ALL_EXCEPTIONS); + + /* Initialise RMM context with this entry point information */ + cm_setup_context(&rmm_ctx->cpu_ctx, rmm_ep_info); + + INFO("RMM setup done.\n"); + + /* Register init function for deferred init. */ + bl31_register_rmm_init(&rmm_init); + + return 0; +} + +/******************************************************************************* + * Forward SMC to the other security state + ******************************************************************************/ +static uint64_t rmmd_smc_forward(uint32_t smc_fid, uint32_t src_sec_state, + uint32_t dst_sec_state, uint64_t x1, + uint64_t x2, uint64_t x3, uint64_t x4, + void *handle) +{ + /* Save incoming security state */ + cm_el1_sysregs_context_save(src_sec_state); + cm_el2_sysregs_context_save(src_sec_state); + + /* Restore outgoing security state */ + cm_el1_sysregs_context_restore(dst_sec_state); + cm_el2_sysregs_context_restore(dst_sec_state); + cm_set_next_eret_context(dst_sec_state); + + SMC_RET8(cm_get_context(dst_sec_state), smc_fid, x1, x2, x3, x4, + SMC_GET_GP(handle, CTX_GPREG_X5), + SMC_GET_GP(handle, CTX_GPREG_X6), + SMC_GET_GP(handle, CTX_GPREG_X7)); +} + +/******************************************************************************* + * This function handles all SMCs in the range reserved for RMI. Each call is + * either forwarded to the other security state or handled by the RMM dispatcher + ******************************************************************************/ +uint64_t rmmd_rmi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, + uint64_t x3, uint64_t x4, void *cookie, + void *handle, uint64_t flags) +{ + rmmd_rmm_context_t *ctx = &rmm_context[plat_my_core_pos()]; + uint32_t src_sec_state; + + /* Determine which security state this SMC originated from */ + src_sec_state = caller_sec_state(flags); + + /* RMI must not be invoked by the Secure world */ + if (src_sec_state == SMC_FROM_SECURE) { + WARN("RMM: RMI invoked by secure world.\n"); + SMC_RET1(handle, SMC_UNK); + } + + /* + * Forward an RMI call from the Normal world to the Realm world as it + * is. + */ + if (src_sec_state == SMC_FROM_NON_SECURE) { + VERBOSE("RMM: RMI call from non-secure world.\n"); + return rmmd_smc_forward(smc_fid, NON_SECURE, REALM, + x1, x2, x3, x4, handle); + } + + assert(src_sec_state == SMC_FROM_REALM); + + switch (smc_fid) { + case RMI_RMM_REQ_COMPLETE: + if (ctx->state == RMM_STATE_RESET) { + VERBOSE("RMM: running rmmd_rmm_sync_exit\n"); + rmmd_rmm_sync_exit(x1); + } + + return rmmd_smc_forward(x1, REALM, NON_SECURE, + x2, x3, x4, 0, handle); + + default: + WARN("RMM: Unsupported RMM call 0x%08x\n", smc_fid); + SMC_RET1(handle, SMC_UNK); + } +} + +/******************************************************************************* + * This cpu has been turned on. Enter RMM to initialise R-EL2. Entry into RMM + * is done after initialising minimal architectural state that guarantees safe + * execution. + ******************************************************************************/ +static void *rmmd_cpu_on_finish_handler(const void *arg) +{ + int32_t rc; + uint32_t linear_id = plat_my_core_pos(); + rmmd_rmm_context_t *ctx = &rmm_context[linear_id]; + + ctx->state = RMM_STATE_RESET; + + /* Initialise RMM context with this entry point information */ + cm_setup_context(&ctx->cpu_ctx, rmm_ep_info); + + /* Initialize RMM EL2 context. */ + rmm_el2_context_init(&ctx->cpu_ctx.el2_sysregs_ctx); + + rc = rmmd_rmm_sync_entry(ctx); + if (rc != 0) { + ERROR("RMM initialisation failed (%d) on CPU%d\n", rc, + linear_id); + panic(); + } + + ctx->state = RMM_STATE_IDLE; + return NULL; +} + +/* Subscribe to PSCI CPU on to initialize RMM on secondary */ +SUBSCRIBE_TO_EVENT(psci_cpu_on_finish, rmmd_cpu_on_finish_handler); + +static int gtsi_transition_granule(uint64_t pa, + unsigned int src_sec_state, + unsigned int target_pas) +{ + int ret; + + ret = gpt_transition_pas(pa, src_sec_state, target_pas); + + /* Convert TF-A error codes into GTSI error codes */ + if (ret == -EINVAL) { + ret = GRAN_TRANS_RET_BAD_ADDR; + } else if (ret == -EPERM) { + ret = GRAN_TRANS_RET_BAD_PAS; + } + + return ret; +} + +/******************************************************************************* + * This function handles all SMCs in the range reserved for GTF. + ******************************************************************************/ +uint64_t rmmd_gtsi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, + uint64_t x3, uint64_t x4, void *cookie, + void *handle, uint64_t flags) +{ + uint32_t src_sec_state; + + /* Determine which security state this SMC originated from */ + src_sec_state = caller_sec_state(flags); + + if (src_sec_state != SMC_FROM_REALM) { + WARN("RMM: GTF call originated from secure or normal world\n"); + SMC_RET1(handle, SMC_UNK); + } + + switch (smc_fid) { + case SMC_ASC_MARK_REALM: + SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM, + GPI_REALM)); + break; + case SMC_ASC_MARK_NONSECURE: + SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM, + GPI_NS)); + break; + default: + WARN("RMM: Unsupported GTF call 0x%08x\n", smc_fid); + SMC_RET1(handle, SMC_UNK); + } +} diff --git a/services/std_svc/rmmd/rmmd_private.h b/services/std_svc/rmmd/rmmd_private.h new file mode 100644 index 000000000..d170bcd22 --- /dev/null +++ b/services/std_svc/rmmd/rmmd_private.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef RMMD_PRIVATE_H +#define RMMD_PRIVATE_H + +#include + +/******************************************************************************* + * Constants that allow assembler code to preserve callee-saved registers of the + * C runtime context while performing a security state switch. + ******************************************************************************/ +#define RMMD_C_RT_CTX_X19 0x0 +#define RMMD_C_RT_CTX_X20 0x8 +#define RMMD_C_RT_CTX_X21 0x10 +#define RMMD_C_RT_CTX_X22 0x18 +#define RMMD_C_RT_CTX_X23 0x20 +#define RMMD_C_RT_CTX_X24 0x28 +#define RMMD_C_RT_CTX_X25 0x30 +#define RMMD_C_RT_CTX_X26 0x38 +#define RMMD_C_RT_CTX_X27 0x40 +#define RMMD_C_RT_CTX_X28 0x48 +#define RMMD_C_RT_CTX_X29 0x50 +#define RMMD_C_RT_CTX_X30 0x58 + +#define RMMD_C_RT_CTX_SIZE 0x60 +#define RMMD_C_RT_CTX_ENTRIES (RMMD_C_RT_CTX_SIZE >> DWORD_SHIFT) + +#ifndef __ASSEMBLER__ +#include +#include + +typedef enum rmm_state { + RMM_STATE_RESET = 0, + RMM_STATE_IDLE +} rmm_state_t; + +/* + * Data structure used by the RMM dispatcher (RMMD) in EL3 to track context of + * the RMM at R-EL2. + */ +typedef struct rmmd_rmm_context { + uint64_t c_rt_ctx; + cpu_context_t cpu_ctx; + rmm_state_t state; +} rmmd_rmm_context_t; + +/* Functions used to enter/exit the RMM synchronously */ +uint64_t rmmd_rmm_sync_entry(rmmd_rmm_context_t *ctx); +__dead2 void rmmd_rmm_sync_exit(uint64_t rc); + +/* Assembly helpers */ +uint64_t rmmd_rmm_enter(uint64_t *c_rt_ctx); +void __dead2 rmmd_rmm_exit(uint64_t c_rt_ctx, uint64_t ret); + +/* Reference to PM ops for the RMMD */ +extern const spd_pm_ops_t rmmd_pm; + +#endif /* __ASSEMBLER__ */ + +#endif /* RMMD_PRIVATE_H */ diff --git a/services/std_svc/std_svc_setup.c b/services/std_svc/std_svc_setup.c index 1917d0a14..39db42913 100644 --- a/services/std_svc/std_svc_setup.c +++ b/services/std_svc/std_svc_setup.c @@ -13,7 +13,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -158,6 +160,16 @@ static uintptr_t std_svc_smc_handler(uint32_t smc_fid, flags); } #endif +#if ENABLE_RME + /* + * Granule transition service interface functions (GTSI) are allocated + * from the Std service range. Call the RMM dispatcher to handle calls. + */ + if (is_gtsi_fid(smc_fid)) { + return rmmd_gtsi_handler(smc_fid, x1, x2, x3, x4, cookie, + handle, flags); + } +#endif #if SMC_PCI_SUPPORT if (is_pci_fid(smc_fid)) { From 50a3056a3cd33d395e8712e1d1e67a8840bf3db1 Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Fri, 9 Jul 2021 15:32:21 -0500 Subject: [PATCH 06/21] feat(rme): add Test Realm Payload (TRP) TRP is a small test payload that implements Realm Monitor Management (RMM) functionalities. RMM runs in the Realm world (R-EL2) and manages the execution of Realm VMs and their interaction with the hypervisor in Normal world. TRP is used to test the interface between RMM and Normal world software, known as Realm Management Interface (RMI). Current functions includes returning RMM version and transitioning granules from Non-secure to Realm world and vice versa. More information about RMM can be found at: https://developer.arm.com/documentation/den0125/latest Signed-off-by: Zelalem Aweke Change-Id: Ic7b9a1e1f3142ef6458d40150d0b4ba6bd723ea2 --- include/services/trp/platform_trp.h | 15 +++ plat/arm/board/fvp/include/platform_def.h | 3 + plat/arm/board/fvp/trp/trp-fvp.mk | 12 ++ plat/arm/common/trp/arm_trp.mk | 10 ++ plat/arm/common/trp/arm_trp_setup.c | 40 +++++++ services/std_svc/rmmd/rmmd.mk | 3 + services/std_svc/rmmd/trp/linker.lds | 71 +++++++++++ services/std_svc/rmmd/trp/trp.mk | 20 ++++ services/std_svc/rmmd/trp/trp_entry.S | 74 ++++++++++++ services/std_svc/rmmd/trp/trp_main.c | 136 ++++++++++++++++++++++ services/std_svc/rmmd/trp/trp_private.h | 50 ++++++++ 11 files changed, 434 insertions(+) create mode 100644 include/services/trp/platform_trp.h create mode 100644 plat/arm/board/fvp/trp/trp-fvp.mk create mode 100644 plat/arm/common/trp/arm_trp.mk create mode 100644 plat/arm/common/trp/arm_trp_setup.c create mode 100644 services/std_svc/rmmd/trp/linker.lds create mode 100644 services/std_svc/rmmd/trp/trp.mk create mode 100644 services/std_svc/rmmd/trp/trp_entry.S create mode 100644 services/std_svc/rmmd/trp/trp_main.c create mode 100644 services/std_svc/rmmd/trp/trp_private.h diff --git a/include/services/trp/platform_trp.h b/include/services/trp/platform_trp.h new file mode 100644 index 000000000..b34da8512 --- /dev/null +++ b/include/services/trp/platform_trp.h @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef PLATFORM_TRP_H +#define PLATFORM_TRP_H + +/******************************************************************************* + * Mandatory TRP functions (only if platform contains a TRP) + ******************************************************************************/ +void trp_early_platform_setup(void); + +#endif /* PLATFORM_TRP_H */ diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h index 8b25a5463..a716546a4 100644 --- a/plat/arm/board/fvp/include/platform_def.h +++ b/plat/arm/board/fvp/include/platform_def.h @@ -222,6 +222,9 @@ #define PLAT_ARM_TSP_UART_BASE V2M_IOFPGA_UART2_BASE #define PLAT_ARM_TSP_UART_CLK_IN_HZ V2M_IOFPGA_UART2_CLK_IN_HZ +#define PLAT_ARM_TRP_UART_BASE V2M_IOFPGA_UART3_BASE +#define PLAT_ARM_TRP_UART_CLK_IN_HZ V2M_IOFPGA_UART3_CLK_IN_HZ + #define PLAT_FVP_SMMUV3_BASE UL(0x2b400000) /* CCI related constants */ diff --git a/plat/arm/board/fvp/trp/trp-fvp.mk b/plat/arm/board/fvp/trp/trp-fvp.mk new file mode 100644 index 000000000..a450541d3 --- /dev/null +++ b/plat/arm/board/fvp/trp/trp-fvp.mk @@ -0,0 +1,12 @@ +# +# Copyright (c) 2021, Arm Limited. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# TRP source files specific to FVP platform + +RMM_SOURCES += plat/arm/board/fvp/aarch64/fvp_helpers.S + +include plat/arm/common/trp/arm_trp.mk + diff --git a/plat/arm/common/trp/arm_trp.mk b/plat/arm/common/trp/arm_trp.mk new file mode 100644 index 000000000..997111f99 --- /dev/null +++ b/plat/arm/common/trp/arm_trp.mk @@ -0,0 +1,10 @@ +# +# Copyright (c) 2021, Arm Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +# TRP source files common to ARM standard platforms +RMM_SOURCES += plat/arm/common/trp/arm_trp_setup.c \ + plat/arm/common/arm_topology.c \ + plat/common/aarch64/platform_mp_stack.S diff --git a/plat/arm/common/trp/arm_trp_setup.c b/plat/arm/common/trp/arm_trp_setup.c new file mode 100644 index 000000000..8e4829344 --- /dev/null +++ b/plat/arm/common/trp/arm_trp_setup.c @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include +#include +#include + +/******************************************************************************* + * Initialize the UART + ******************************************************************************/ +static console_t arm_trp_runtime_console; + +void arm_trp_early_platform_setup(void) +{ + /* + * Initialize a different console than already in use to display + * messages from trp + */ + int rc = console_pl011_register(PLAT_ARM_TRP_UART_BASE, + PLAT_ARM_TRP_UART_CLK_IN_HZ, + ARM_CONSOLE_BAUDRATE, + &arm_trp_runtime_console); + if (rc == 0) { + panic(); + } + + console_set_scope(&arm_trp_runtime_console, + CONSOLE_FLAG_BOOT | CONSOLE_FLAG_RUNTIME); +} + +void trp_early_platform_setup(void) +{ + arm_trp_early_platform_setup(); +} diff --git a/services/std_svc/rmmd/rmmd.mk b/services/std_svc/rmmd/rmmd.mk index 57031fe17..b42568183 100644 --- a/services/std_svc/rmmd/rmmd.mk +++ b/services/std_svc/rmmd/rmmd.mk @@ -8,6 +8,9 @@ ifneq (${ARCH},aarch64) $(error "Error: RMMD is only supported on aarch64.") endif +include services/std_svc/rmmd/trp/trp.mk + RMMD_SOURCES += $(addprefix services/std_svc/rmmd/, \ ${ARCH}/rmmd_helpers.S \ rmmd_main.c) + diff --git a/services/std_svc/rmmd/trp/linker.lds b/services/std_svc/rmmd/trp/linker.lds new file mode 100644 index 000000000..2b7f38333 --- /dev/null +++ b/services/std_svc/rmmd/trp/linker.lds @@ -0,0 +1,71 @@ +/* + * (C) COPYRIGHT 2021 Arm Limited or its affiliates. + * ALL RIGHTS RESERVED + */ + +#include +#include + +/* Mapped using 4K pages, requires us to align different sections with + * different property at the same granularity. */ +PAGE_SIZE_4K = 4096; + +OUTPUT_FORMAT("elf64-littleaarch64") +OUTPUT_ARCH(aarch64) +ENTRY(trp_head) + +MEMORY { + RAM (rwx): ORIGIN = RMM_BASE, LENGTH = RMM_LIMIT - RMM_BASE +} + + +SECTIONS +{ + . = RMM_BASE; + + .text : { + *(.head.text) + . = ALIGN(8); + *(.text*) + } >RAM + + . = ALIGN(PAGE_SIZE_4K); + + .rodata : { + *(.rodata*) + } >RAM + + . = ALIGN(PAGE_SIZE_4K); + + __RW_START__ = . ; + + .data : { + *(.data*) + } >RAM + + .bss (NOLOAD) : { + __BSS_START__ = .; + *(.bss*) + __BSS_END__ = .; + } >RAM + __BSS_SIZE__ = SIZEOF(.bss); + + + STACK_SECTION >RAM + + + /* + * Define a linker symbol to mark the end of the RW memory area for this + * image. + */ + __RW_END__ = .; + __RMM_END__ = .; + + + /DISCARD/ : { *(.dynstr*) } + /DISCARD/ : { *(.dynamic*) } + /DISCARD/ : { *(.plt*) } + /DISCARD/ : { *(.interp*) } + /DISCARD/ : { *(.gnu*) } + /DISCARD/ : { *(.note*) } +} diff --git a/services/std_svc/rmmd/trp/trp.mk b/services/std_svc/rmmd/trp/trp.mk new file mode 100644 index 000000000..a4f6e03e0 --- /dev/null +++ b/services/std_svc/rmmd/trp/trp.mk @@ -0,0 +1,20 @@ +# +# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +RMM_SOURCES += services/std_svc/rmmd/trp/trp_entry.S \ + services/std_svc/rmmd/trp/trp_main.c + +RMM_LINKERFILE := services/std_svc/rmmd/trp/linker.lds + +# Include the platform-specific TRP Makefile +# If no platform-specific TRP Makefile exists, it means TRP is not supported +# on this platform. +TRP_PLAT_MAKEFILE := $(wildcard ${PLAT_DIR}/trp/trp-${PLAT}.mk) +ifeq (,${TRP_PLAT_MAKEFILE}) + $(error TRP is not supported on platform ${PLAT}) +else + include ${TRP_PLAT_MAKEFILE} +endif diff --git a/services/std_svc/rmmd/trp/trp_entry.S b/services/std_svc/rmmd/trp/trp_entry.S new file mode 100644 index 000000000..23b48fb42 --- /dev/null +++ b/services/std_svc/rmmd/trp/trp_entry.S @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include "trp_private.h" + +.global trp_head +.global trp_smc + +.section ".head.text", "ax" + + /* --------------------------------------------- + * Populate the params in x0-x7 from the pointer + * to the smc args structure in x0. + * --------------------------------------------- + */ + .macro restore_args_call_smc + ldp x6, x7, [x0, #TRP_ARG6] + ldp x4, x5, [x0, #TRP_ARG4] + ldp x2, x3, [x0, #TRP_ARG2] + ldp x0, x1, [x0, #TRP_ARG0] + smc #0 + .endm + + /* --------------------------------------------- + * Entry point for TRP + * --------------------------------------------- + */ +trp_head: + bl plat_set_my_stack + bl plat_is_my_cpu_primary + cbz x0, trp_secondary_cpu_entry + + /* --------------------------------------------- + * Zero out BSS section + * --------------------------------------------- + */ + ldr x0, =__BSS_START__ + ldr x1, =__BSS_SIZE__ + bl zeromem + + bl trp_setup + + bl trp_main +trp_secondary_cpu_entry: + mov_imm x0, RMI_RMM_REQ_COMPLETE + mov x1, xzr + smc #0 + b trp_handler + + /* --------------------------------------------- + * Direct SMC call to BL31 service provided by + * RMM Dispatcher + * --------------------------------------------- + */ +func trp_smc + restore_args_call_smc + ret +endfunc trp_smc + + /* --------------------------------------------- + * RMI call handler + * --------------------------------------------- + */ +func trp_handler + bl trp_rmi_handler + restore_args_call_smc + b trp_handler +endfunc trp_handler diff --git a/services/std_svc/rmmd/trp/trp_main.c b/services/std_svc/rmmd/trp/trp_main.c new file mode 100644 index 000000000..2ab9eccfd --- /dev/null +++ b/services/std_svc/rmmd/trp/trp_main.c @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + + +#include +#include +#include +#include +#include + +#include +#include "trp_private.h" + +/******************************************************************************* + * Per cpu data structure to populate parameters for an SMC in C code and use + * a pointer to this structure in assembler code to populate x0-x7 + ******************************************************************************/ +static trp_args_t trp_smc_args[PLATFORM_CORE_COUNT]; + +/******************************************************************************* + * Set the arguments for SMC call + ******************************************************************************/ +static trp_args_t *set_smc_args(uint64_t arg0, + uint64_t arg1, + uint64_t arg2, + uint64_t arg3, + uint64_t arg4, + uint64_t arg5, + uint64_t arg6, + uint64_t arg7) +{ + uint32_t linear_id; + trp_args_t *pcpu_smc_args; + + /* + * Return to Secure Monitor by raising an SMC. The results of the + * service are passed as an arguments to the SMC + */ + linear_id = plat_my_core_pos(); + pcpu_smc_args = &trp_smc_args[linear_id]; + write_trp_arg(pcpu_smc_args, TRP_ARG0, arg0); + write_trp_arg(pcpu_smc_args, TRP_ARG1, arg1); + write_trp_arg(pcpu_smc_args, TRP_ARG2, arg2); + write_trp_arg(pcpu_smc_args, TRP_ARG3, arg3); + write_trp_arg(pcpu_smc_args, TRP_ARG4, arg4); + write_trp_arg(pcpu_smc_args, TRP_ARG5, arg5); + write_trp_arg(pcpu_smc_args, TRP_ARG6, arg6); + write_trp_arg(pcpu_smc_args, TRP_ARG7, arg7); + + return pcpu_smc_args; +} + +/******************************************************************************* + * Setup function for TRP. + ******************************************************************************/ +void trp_setup(void) +{ + /* Perform early platform-specific setup */ + trp_early_platform_setup(); +} + +/* Main function for TRP */ +void trp_main(void) +{ + NOTICE("TRP: %s\n", version_string); + NOTICE("TRP: %s\n", build_message); + INFO("TRP: Memory base : 0x%lx\n", (unsigned long)RMM_BASE); + INFO("TRP: Total size : 0x%lx bytes\n", (unsigned long)(RMM_END + - RMM_BASE)); +} + +/******************************************************************************* + * Returning RMI version back to Normal World + ******************************************************************************/ +static trp_args_t *trp_ret_rmi_version(void) +{ + VERBOSE("RMM version is %u.%u\n", RMI_ABI_VERSION_MAJOR, + RMI_ABI_VERSION_MINOR); + return set_smc_args(RMI_RMM_REQ_COMPLETE, RMI_ABI_VERSION, + 0, 0, 0, 0, 0, 0); +} + +/******************************************************************************* + * Transitioning granule of NON-SECURE type to REALM type + ******************************************************************************/ +static trp_args_t *trp_asc_mark_realm(unsigned long long x1) +{ + unsigned long long ret; + + VERBOSE("Delegating granule 0x%llx\n", x1); + ret = trp_smc(set_smc_args(SMC_ASC_MARK_REALM, x1, 0, 0, 0, 0, 0, 0)); + + if (ret != 0ULL) { + ERROR("Granule transition from NON-SECURE type to REALM type " + "failed 0x%llx\n", ret); + } + return set_smc_args(RMI_RMM_REQ_COMPLETE, ret, 0, 0, 0, 0, 0, 0); +} + +/******************************************************************************* + * Transitioning granule of REALM type to NON-SECURE type + ******************************************************************************/ +static trp_args_t *trp_asc_mark_nonsecure(unsigned long long x1) +{ + unsigned long long ret; + + VERBOSE("Undelegating granule 0x%llx\n", x1); + ret = trp_smc(set_smc_args(SMC_ASC_MARK_NONSECURE, x1, 0, 0, 0, 0, 0, 0)); + + if (ret != 0ULL) { + ERROR("Granule transition from REALM type to NON-SECURE type " + "failed 0x%llx\n", ret); + } + return set_smc_args(RMI_RMM_REQ_COMPLETE, ret, 0, 0, 0, 0, 0, 0); +} + +/******************************************************************************* + * Main RMI SMC handler function + ******************************************************************************/ +trp_args_t *trp_rmi_handler(unsigned long fid, unsigned long long x1) +{ + switch (fid) { + case RMI_RMM_REQ_VERSION: + return trp_ret_rmi_version(); + case RMI_RMM_GRANULE_DELEGATE: + return trp_asc_mark_realm(x1); + case RMI_RMM_GRANULE_UNDELEGATE: + return trp_asc_mark_nonsecure(x1); + default: + ERROR("Invalid SMC code to %s, FID %lu\n", __func__, fid); + } + return set_smc_args(SMC_UNK, 0, 0, 0, 0, 0, 0, 0); +} diff --git a/services/std_svc/rmmd/trp/trp_private.h b/services/std_svc/rmmd/trp/trp_private.h new file mode 100644 index 000000000..923139007 --- /dev/null +++ b/services/std_svc/rmmd/trp/trp_private.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2021, Arm Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef TRP_PRIVATE_H +#define TRP_PRIVATE_H + +/* Definitions to help the assembler access the SMC/ERET args structure */ +#define TRP_ARGS_SIZE TRP_ARGS_END +#define TRP_ARG0 0x0 +#define TRP_ARG1 0x8 +#define TRP_ARG2 0x10 +#define TRP_ARG3 0x18 +#define TRP_ARG4 0x20 +#define TRP_ARG5 0x28 +#define TRP_ARG6 0x30 +#define TRP_ARG7 0x38 +#define TRP_ARGS_END 0x40 + +#ifndef __ASSEMBLER__ + +#include + +/* Data structure to hold SMC arguments */ +typedef struct trp_args { + uint64_t regs[TRP_ARGS_END >> 3]; +} __aligned(CACHE_WRITEBACK_GRANULE) trp_args_t; + +#define write_trp_arg(args, offset, val) (((args)->regs[offset >> 3]) \ + = val) + +/* Definitions for RMI VERSION */ +#define RMI_ABI_VERSION_MAJOR U(0x0) +#define RMI_ABI_VERSION_MINOR U(0x0) +#define RMI_ABI_VERSION ((RMI_ABI_VERSION_MAJOR << 16) | \ + RMI_ABI_VERSION_MINOR) + +/* Helper to issue SMC calls to BL31 */ +uint64_t trp_smc(trp_args_t *); + +/* The main function to executed only by Primary CPU */ +void trp_main(void); + +/* Setup TRP. Executed only by Primary CPU */ +void trp_setup(void); + +#endif /* __ASSEMBLER__ */ +#endif /* TRP_PRIVATE_H */ From c5ea4f8a6679131010636eb524d2a15b709d0196 Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Fri, 9 Jul 2021 17:54:30 -0500 Subject: [PATCH 07/21] feat(rme): add context management changes for FEAT_RME This patch adds a new context for realm world and realm world awareness in context management. Signed-off-by: Zelalem Aweke Signed-off-by: Subhasish Ghosh Change-Id: Ic17469393603e789d7adc025880346bc3d6233d7 --- bl31/bl31_context_mgmt.c | 18 +++--- include/lib/el3_runtime/aarch64/context.h | 13 ++-- include/lib/el3_runtime/cpu_data.h | 73 +++++++++++++++++++---- lib/el3_runtime/aarch64/context_mgmt.c | 51 ++++++++++++---- 4 files changed, 116 insertions(+), 39 deletions(-) diff --git a/bl31/bl31_context_mgmt.c b/bl31/bl31_context_mgmt.c index 9175ee35d..34f69ade9 100644 --- a/bl31/bl31_context_mgmt.c +++ b/bl31/bl31_context_mgmt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -19,9 +19,9 @@ ******************************************************************************/ void *cm_get_context(uint32_t security_state) { - assert(security_state <= NON_SECURE); + assert(sec_state_is_valid(security_state)); - return get_cpu_data(cpu_context[security_state]); + return get_cpu_data(cpu_context[get_cpu_context_index(security_state)]); } /******************************************************************************* @@ -30,9 +30,10 @@ void *cm_get_context(uint32_t security_state) ******************************************************************************/ void cm_set_context(void *context, uint32_t security_state) { - assert(security_state <= NON_SECURE); + assert(sec_state_is_valid(security_state)); - set_cpu_data(cpu_context[security_state], context); + set_cpu_data(cpu_context[get_cpu_context_index(security_state)], + context); } /******************************************************************************* @@ -46,7 +47,8 @@ void *cm_get_context_by_index(unsigned int cpu_idx, { assert(sec_state_is_valid(security_state)); - return get_cpu_data_by_index(cpu_idx, cpu_context[security_state]); + return get_cpu_data_by_index(cpu_idx, + cpu_context[get_cpu_context_index(security_state)]); } /******************************************************************************* @@ -58,5 +60,7 @@ void cm_set_context_by_index(unsigned int cpu_idx, void *context, { assert(sec_state_is_valid(security_state)); - set_cpu_data_by_index(cpu_idx, cpu_context[security_state], context); + set_cpu_data_by_index(cpu_idx, + cpu_context[get_cpu_context_index(security_state)], + context); } diff --git a/include/lib/el3_runtime/aarch64/context.h b/include/lib/el3_runtime/aarch64/context.h index c3f41179f..698e20876 100644 --- a/include/lib/el3_runtime/aarch64/context.h +++ b/include/lib/el3_runtime/aarch64/context.h @@ -405,13 +405,12 @@ DEFINE_REG_STRUCT(pauth, CTX_PAUTH_REGS_ALL); = (uint64_t) (val)) /* - * Top-level context structure which is used by EL3 firmware to - * preserve the state of a core at EL1 in one of the two security - * states and save enough EL3 meta data to be able to return to that - * EL and security state. The context management library will be used - * to ensure that SP_EL3 always points to an instance of this - * structure at exception entry and exit. Each instance will - * correspond to either the secure or the non-secure state. + * Top-level context structure which is used by EL3 firmware to preserve + * the state of a core at the next lower EL in a given security state and + * save enough EL3 meta data to be able to return to that EL and security + * state. The context management library will be used to ensure that + * SP_EL3 always points to an instance of this structure at exception + * entry and exit. */ typedef struct cpu_context { gp_regs_t gpregs_ctx; diff --git a/include/lib/el3_runtime/cpu_data.h b/include/lib/el3_runtime/cpu_data.h index 3d57a5c59..2c7b61967 100644 --- a/include/lib/el3_runtime/cpu_data.h +++ b/include/lib/el3_runtime/cpu_data.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2021, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2014-2021, Arm Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -19,16 +19,25 @@ /* 8-bytes aligned size of psci_cpu_data structure */ #define PSCI_CPU_DATA_SIZE_ALIGNED ((PSCI_CPU_DATA_SIZE + 7) & ~7) +#if ENABLE_RME +/* Size of cpu_context array */ +#define CPU_DATA_CONTEXT_NUM 3 /* Offset of cpu_ops_ptr, size 8 bytes */ +#define CPU_DATA_CPU_OPS_PTR 0x18 +#else /* ENABLE_RME */ +#define CPU_DATA_CONTEXT_NUM 2 #define CPU_DATA_CPU_OPS_PTR 0x10 +#endif /* ENABLE_RME */ #if ENABLE_PAUTH /* 8-bytes aligned offset of apiakey[2], size 16 bytes */ -#define CPU_DATA_APIAKEY_OFFSET (0x18 + PSCI_CPU_DATA_SIZE_ALIGNED) -#define CPU_DATA_CRASH_BUF_OFFSET (CPU_DATA_APIAKEY_OFFSET + 0x10) -#else -#define CPU_DATA_CRASH_BUF_OFFSET (0x18 + PSCI_CPU_DATA_SIZE_ALIGNED) -#endif /* ENABLE_PAUTH */ +#define CPU_DATA_APIAKEY_OFFSET (0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \ + + CPU_DATA_CPU_OPS_PTR) +#define CPU_DATA_CRASH_BUF_OFFSET (0x10 + CPU_DATA_APIAKEY_OFFSET) +#else /* ENABLE_PAUTH */ +#define CPU_DATA_CRASH_BUF_OFFSET (0x8 + PSCI_CPU_DATA_SIZE_ALIGNED \ + + CPU_DATA_CPU_OPS_PTR) +#endif /* ENABLE_PAUTH */ /* need enough space in crash buffer to save 8 registers */ #define CPU_DATA_CRASH_BUF_SIZE 64 @@ -65,11 +74,14 @@ #ifndef __ASSEMBLER__ +#include +#include + #include #include #include + #include -#include /* Offsets for the cpu_data structure */ #define CPU_DATA_PSCI_LOCK_OFFSET __builtin_offsetof\ @@ -80,27 +92,34 @@ (cpu_data_t, platform_cpu_data) #endif +typedef enum context_pas { + CPU_CONTEXT_SECURE = 0, + CPU_CONTEXT_NS, +#if ENABLE_RME + CPU_CONTEXT_REALM, +#endif + CPU_CONTEXT_NUM +} context_pas_t; + /******************************************************************************* * Function & variable prototypes ******************************************************************************/ /******************************************************************************* * Cache of frequently used per-cpu data: - * Pointers to non-secure and secure security state contexts + * Pointers to non-secure, realm, and secure security state contexts * Address of the crash stack * It is aligned to the cache line boundary to allow efficient concurrent * manipulation of these pointers on different cpus * - * TODO: Add other commonly used variables to this (tf_issues#90) - * * The data structure and the _cpu_data accessors should not be used directly * by components that have per-cpu members. The member access macros should be * used for this. ******************************************************************************/ typedef struct cpu_data { #ifdef __aarch64__ - void *cpu_context[2]; -#endif + void *cpu_context[CPU_DATA_CONTEXT_NUM]; +#endif /* __aarch64__ */ uintptr_t cpu_ops_ptr; struct psci_cpu_data psci_svc_cpu_data; #if ENABLE_PAUTH @@ -122,6 +141,11 @@ typedef struct cpu_data { extern cpu_data_t percpu_data[PLATFORM_CORE_COUNT]; +#ifdef __aarch64__ +CASSERT(CPU_DATA_CONTEXT_NUM == CPU_CONTEXT_NUM, + assert_cpu_data_context_num_mismatch); +#endif + #if ENABLE_PAUTH CASSERT(CPU_DATA_APIAKEY_OFFSET == __builtin_offsetof (cpu_data_t, apiakey), @@ -160,6 +184,31 @@ static inline struct cpu_data *_cpu_data(void) struct cpu_data *_cpu_data(void); #endif +/* + * Returns the index of the cpu_context array for the given security state. + * All accesses to cpu_context should be through this helper to make sure + * an access is not out-of-bounds. The function assumes security_state is + * valid. + */ +static inline context_pas_t get_cpu_context_index(uint32_t security_state) +{ + if (security_state == SECURE) { + return CPU_CONTEXT_SECURE; + } else { +#if ENABLE_RME + if (security_state == NON_SECURE) { + return CPU_CONTEXT_NS; + } else { + assert(security_state == REALM); + return CPU_CONTEXT_REALM; + } +#else + assert(security_state == NON_SECURE); + return CPU_CONTEXT_NS; +#endif + } +} + /************************************************************************** * APIs for initialising and accessing per-cpu data *************************************************************************/ diff --git a/lib/el3_runtime/aarch64/context_mgmt.c b/lib/el3_runtime/aarch64/context_mgmt.c index 08022d4ad..0ec7e7e1c 100644 --- a/lib/el3_runtime/aarch64/context_mgmt.c +++ b/lib/el3_runtime/aarch64/context_mgmt.c @@ -93,24 +93,41 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) scr_el3 = read_scr(); scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT | SCR_ST_BIT | SCR_HCE_BIT); + +#if ENABLE_RME + /* When RME support is enabled, clear the NSE bit as well. */ + scr_el3 &= ~SCR_NSE_BIT; +#endif /* ENABLE_RME */ + /* * SCR_NS: Set the security state of the next EL. */ - if (security_state != SECURE) + if (security_state == NON_SECURE) { scr_el3 |= SCR_NS_BIT; + } + +#if ENABLE_RME + /* Check for realm state if RME support enabled. */ + if (security_state == REALM) { + scr_el3 |= SCR_NS_BIT | SCR_NSE_BIT | SCR_EnSCXT_BIT; + } +#endif /* ENABLE_RME */ + /* * SCR_EL3.RW: Set the execution state, AArch32 or AArch64, for next * Exception level as specified by SPSR. */ - if (GET_RW(ep->spsr) == MODE_RW_64) + if (GET_RW(ep->spsr) == MODE_RW_64) { scr_el3 |= SCR_RW_BIT; + } /* * SCR_EL3.ST: Traps Secure EL1 accesses to the Counter-timer Physical * Secure timer registers to EL3, from AArch64 state only, if specified * by the entrypoint attributes. */ - if (EP_GET_ST(ep->h.attr) != 0U) + if (EP_GET_ST(ep->h.attr) != 0U) { scr_el3 |= SCR_ST_BIT; + } /* * If FEAT_HCX is enabled, enable access to HCRX_EL2 by setting @@ -152,8 +169,9 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) * If the Secure world wants to use pointer authentication, * CTX_INCLUDE_PAUTH_REGS must be set to 1. */ - if (security_state == NON_SECURE) + if (security_state == NON_SECURE) { scr_el3 |= SCR_API_BIT | SCR_APK_BIT; + } #endif /* !CTX_INCLUDE_PAUTH_REGS */ #if !CTX_INCLUDE_MTE_REGS || ENABLE_ASSERTIONS @@ -188,8 +206,14 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) /* * SCR_EL3.IRQ, SCR_EL3.FIQ: Enable the physical FIQ and IRQ routing as * indicated by the interrupt routing model for BL31. + * + * TODO: The interrupt routing model code is not updated for REALM + * state. Use the default values of IRQ = FIQ = 0 for REALM security + * state for now. */ - scr_el3 |= get_scr_el3_from_routing_model(security_state); + if (security_state != REALM) { + scr_el3 |= get_scr_el3_from_routing_model(security_state); + } #endif /* Save the initialized value of CPTR_EL3 register */ @@ -256,9 +280,9 @@ void cm_setup_context(cpu_context_t *ctx, const entry_point_info_t *ep) * required by PSCI specification) */ sctlr_elx = (EP_GET_EE(ep->h.attr) != 0U) ? SCTLR_EE_BIT : 0U; - if (GET_RW(ep->spsr) == MODE_RW_64) + if (GET_RW(ep->spsr) == MODE_RW_64) { sctlr_elx |= SCTLR_EL1_RES1; - else { + } else { /* * If the target execution state is AArch32 then the following * fields need to be set. @@ -413,7 +437,8 @@ void cm_init_my_context(const entry_point_info_t *ep) } /******************************************************************************* - * Prepare the CPU system registers for first entry into secure or normal world + * Prepare the CPU system registers for first entry into realm, secure, or + * normal world. * * If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized * If execution is requested to non-secure EL1 or svc mode, and the CPU supports @@ -497,7 +522,7 @@ void cm_prepare_el3_exit(uint32_t security_state) * architecturally UNKNOWN on reset and are set to zero * except for field(s) listed below. * - * CNTHCTL_EL2.EL1PCEN: Set to one to disable traps to + * CNTHCTL_EL2.EL1PTEN: Set to one to disable traps to * Hyp mode of Non-secure EL0 and EL1 accesses to the * physical timer registers. * @@ -645,10 +670,10 @@ void cm_el2_sysregs_context_save(uint32_t security_state) u_register_t scr_el3 = read_scr(); /* - * Always save the non-secure EL2 context, only save the + * Always save the non-secure and realm EL2 context, only save the * S-EL2 context if S-EL2 is enabled. */ - if ((security_state == NON_SECURE) || + if ((security_state != SECURE) || ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) { cpu_context_t *ctx; @@ -667,10 +692,10 @@ void cm_el2_sysregs_context_restore(uint32_t security_state) u_register_t scr_el3 = read_scr(); /* - * Always restore the non-secure EL2 context, only restore the + * Always restore the non-secure and realm EL2 context, only restore the * S-EL2 context if S-EL2 is enabled. */ - if ((security_state == NON_SECURE) || + if ((security_state != SECURE) || ((security_state == SECURE) && ((scr_el3 & SCR_EEL2_BIT) != 0U))) { cpu_context_t *ctx; From 434d0491c5504142e7077ff75471441e62855412 Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Sun, 11 Jul 2021 17:25:48 -0500 Subject: [PATCH 08/21] refactor(makefile): remove BL prefixes in build macros The current Makefile assumes all TF-A binaries have BL prefixes (BL1, BL2, etc). Now that we have other binary names with FEAT_RME feature, remove this assumption. With this change, we need to pass the full name of a binary when using build macros. Signed-off-by: Zelalem Aweke Change-Id: I44e094b2366aa526f807d92dffa709390d14d145 --- Makefile | 14 ++-- make_helpers/build_macros.mk | 84 ++++++++++---------- plat/marvell/armada/a8k/common/a8k_common.mk | 2 +- 3 files changed, 50 insertions(+), 50 deletions(-) diff --git a/Makefile b/Makefile index 59d14ba02..d6e99f338 100644 --- a/Makefile +++ b/Makefile @@ -1148,7 +1148,7 @@ $(eval $(call MAKE_LIB,c)) # Expand build macros for the different images ifeq (${NEED_BL1},yes) -$(eval $(call MAKE_BL,1)) +$(eval $(call MAKE_BL,bl1)) endif ifeq (${NEED_BL2},yes) @@ -1157,7 +1157,7 @@ FIP_BL2_ARGS := tb-fw endif $(if ${BL2}, $(eval $(call TOOL_ADD_IMG,bl2,--${FIP_BL2_ARGS})),\ - $(eval $(call MAKE_BL,2,${FIP_BL2_ARGS}))) + $(eval $(call MAKE_BL,bl2,${FIP_BL2_ARGS}))) endif ifeq (${NEED_SCP_BL2},yes) @@ -1170,10 +1170,10 @@ BL31_SOURCES += ${SPD_SOURCES} BL31_SOURCES := $(sort ${BL31_SOURCES}) ifneq (${DECRYPTION_SUPPORT},none) $(if ${BL31}, $(eval $(call TOOL_ADD_IMG,bl31,--soc-fw,,$(ENCRYPT_BL31))),\ - $(eval $(call MAKE_BL,31,soc-fw,,$(ENCRYPT_BL31)))) + $(eval $(call MAKE_BL,bl31,soc-fw,,$(ENCRYPT_BL31)))) else $(if ${BL31}, $(eval $(call TOOL_ADD_IMG,bl31,--soc-fw)),\ - $(eval $(call MAKE_BL,31,soc-fw))) + $(eval $(call MAKE_BL,bl31,soc-fw))) endif endif @@ -1186,10 +1186,10 @@ BL32_SOURCES := $(sort ${BL32_SOURCES}) BUILD_BL32 := $(if $(BL32),,$(if $(BL32_SOURCES),1)) ifneq (${DECRYPTION_SUPPORT},none) -$(if ${BUILD_BL32}, $(eval $(call MAKE_BL,32,tos-fw,,$(ENCRYPT_BL32))),\ +$(if ${BUILD_BL32}, $(eval $(call MAKE_BL,bl32,tos-fw,,$(ENCRYPT_BL32))),\ $(eval $(call TOOL_ADD_IMG,bl32,--tos-fw,,$(ENCRYPT_BL32)))) else -$(if ${BUILD_BL32}, $(eval $(call MAKE_BL,32,tos-fw)),\ +$(if ${BUILD_BL32}, $(eval $(call MAKE_BL,bl32,tos-fw)),\ $(eval $(call TOOL_ADD_IMG,bl32,--tos-fw))) endif endif @@ -1201,7 +1201,7 @@ endif ifeq (${NEED_BL2U},yes) $(if ${BL2U}, $(eval $(call TOOL_ADD_IMG,bl2u,--ap-fwu-cfg,FWU_)),\ - $(eval $(call MAKE_BL,2u,ap-fwu-cfg,FWU_))) + $(eval $(call MAKE_BL,bl2u,ap-fwu-cfg,FWU_))) endif # Expand build macros for the different images diff --git a/make_helpers/build_macros.mk b/make_helpers/build_macros.mk index 86550288c..12aaee684 100644 --- a/make_helpers/build_macros.mk +++ b/make_helpers/build_macros.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # @@ -98,41 +98,41 @@ $(if $(word $(2), $($(1))),\ endef # IMG_LINKERFILE defines the linker script corresponding to a BL stage -# $(1) = BL stage (1, 2, 2u, 31, 32) +# $(1) = BL stage define IMG_LINKERFILE - ${BUILD_DIR}/bl$(1).ld + ${BUILD_DIR}/$(1).ld endef # IMG_MAPFILE defines the output file describing the memory map corresponding # to a BL stage -# $(1) = BL stage (1, 2, 2u, 31, 32) +# $(1) = BL stage define IMG_MAPFILE - ${BUILD_DIR}/bl$(1).map + ${BUILD_DIR}/$(1).map endef # IMG_ELF defines the elf file corresponding to a BL stage -# $(1) = BL stage (1, 2, 2u, 31, 32) +# $(1) = BL stage define IMG_ELF - ${BUILD_DIR}/bl$(1).elf + ${BUILD_DIR}/$(1).elf endef # IMG_DUMP defines the symbols dump file corresponding to a BL stage -# $(1) = BL stage (1, 2, 2u, 31, 32) +# $(1) = BL stage define IMG_DUMP - ${BUILD_DIR}/bl$(1).dump + ${BUILD_DIR}/$(1).dump endef # IMG_BIN defines the default image file corresponding to a BL stage -# $(1) = BL stage (1, 2, 2u, 31, 32) +# $(1) = BL stage define IMG_BIN - ${BUILD_PLAT}/bl$(1).bin + ${BUILD_PLAT}/$(1).bin endef # IMG_ENC_BIN defines the default encrypted image file corresponding to a # BL stage -# $(1) = BL stage (2, 30, 31, 32, 33) +# $(1) = BL stage define IMG_ENC_BIN - ${BUILD_PLAT}/bl$(1)_enc.bin + ${BUILD_PLAT}/$(1)_enc.bin endef # ENCRYPT_FW invokes enctool to encrypt firmware binary @@ -294,15 +294,15 @@ endef # MAKE_C builds a C source file and generates the dependency file # $(1) = output directory # $(2) = source file (%.c) -# $(3) = BL stage (1, 2, 2u, 31, 32) +# $(3) = BL stage define MAKE_C $(eval OBJ := $(1)/$(patsubst %.c,%.o,$(notdir $(2)))) $(eval DEP := $(patsubst %.o,%.d,$(OBJ))) -$(eval BL_CPPFLAGS := $(BL$(call uppercase,$(3))_CPPFLAGS) -DIMAGE_BL$(call uppercase,$(3))) -$(eval BL_CFLAGS := $(BL$(call uppercase,$(3))_CFLAGS)) +$(eval BL_CPPFLAGS := $($(call uppercase,$(3))_CPPFLAGS) -DIMAGE_$(call uppercase,$(3))) +$(eval BL_CFLAGS := $($(call uppercase,$(3))_CFLAGS)) -$(OBJ): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | bl$(3)_dirs +$(OBJ): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | $(3)_dirs $$(ECHO) " CC $$<" $$(Q)$$(CC) $$(LTO_CFLAGS) $$(TF_CFLAGS) $$(CFLAGS) $(BL_CPPFLAGS) $(BL_CFLAGS) $(MAKE_DEP) -c $$< -o $$@ @@ -314,15 +314,15 @@ endef # MAKE_S builds an assembly source file and generates the dependency file # $(1) = output directory # $(2) = assembly file (%.S) -# $(3) = BL stage (1, 2, 2u, 31, 32) +# $(3) = BL stage define MAKE_S $(eval OBJ := $(1)/$(patsubst %.S,%.o,$(notdir $(2)))) $(eval DEP := $(patsubst %.o,%.d,$(OBJ))) -$(eval BL_CPPFLAGS := $(BL$(call uppercase,$(3))_CPPFLAGS) -DIMAGE_BL$(call uppercase,$(3))) -$(eval BL_ASFLAGS := $(BL$(call uppercase,$(3))_ASFLAGS)) +$(eval BL_CPPFLAGS := $($(call uppercase,$(3))_CPPFLAGS) -DIMAGE_$(call uppercase,$(3))) +$(eval BL_ASFLAGS := $($(call uppercase,$(3))_ASFLAGS)) -$(OBJ): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | bl$(3)_dirs +$(OBJ): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | $(3)_dirs $$(ECHO) " AS $$<" $$(Q)$$(AS) $$(ASFLAGS) $(BL_CPPFLAGS) $(BL_ASFLAGS) $(MAKE_DEP) -c $$< -o $$@ @@ -334,13 +334,13 @@ endef # MAKE_LD generate the linker script using the C preprocessor # $(1) = output linker script # $(2) = input template -# $(3) = BL stage (1, 2, 2u, 31, 32) +# $(3) = BL stage define MAKE_LD $(eval DEP := $(1).d) -$(eval BL_CPPFLAGS := $(BL$(call uppercase,$(3))_CPPFLAGS) -DIMAGE_BL$(call uppercase,$(3))) +$(eval BL_CPPFLAGS := $($(call uppercase,$(3))_CPPFLAGS) -DIMAGE_$(call uppercase,$(3))) -$(1): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | bl$(3)_dirs +$(1): $(2) $(filter-out %.d,$(MAKEFILE_LIST)) | $(3)_dirs $$(ECHO) " PP $$<" $$(Q)$$(CPP) $$(CPPFLAGS) $(BL_CPPFLAGS) $(TF_CFLAGS_$(ARCH)) -P -x assembler-with-cpp -D__LINKER__ $(MAKE_DEP) -o $$@ $$< @@ -368,7 +368,7 @@ endef # MAKE_OBJS builds both C and assembly source files # $(1) = output directory # $(2) = list of source files (both C and assembly) -# $(3) = BL stage (1, 2, 2u, 31, 32) +# $(3) = BL stage define MAKE_OBJS $(eval C_OBJS := $(filter %.c,$(2))) $(eval REMAIN := $(filter-out %.c,$(2))) @@ -445,13 +445,13 @@ endef # MAKE_BL macro defines the targets and options to build each BL image. # Arguments: -# $(1) = BL stage (1, 2, 2u, 31, 32) +# $(1) = BL stage # $(2) = FIP command line option (if empty, image will not be included in the FIP) # $(3) = FIP prefix (optional) (if FWU_, target is fwu_fip instead of fip) # $(4) = BL encryption flag (optional) (0, 1) define MAKE_BL - $(eval BUILD_DIR := ${BUILD_PLAT}/bl$(1)) - $(eval BL_SOURCES := $(BL$(call uppercase,$(1))_SOURCES)) + $(eval BUILD_DIR := ${BUILD_PLAT}/$(1)) + $(eval BL_SOURCES := $($(call uppercase,$(1))_SOURCES)) $(eval SOURCES := $(BL_SOURCES) $(BL_COMMON_SOURCES) $(PLAT_BL_COMMON_SOURCES)) $(eval OBJS := $(addprefix $(BUILD_DIR)/,$(call SOURCES_TO_OBJS,$(SOURCES)))) $(eval LINKERFILE := $(call IMG_LINKERFILE,$(1))) @@ -460,8 +460,8 @@ define MAKE_BL $(eval DUMP := $(call IMG_DUMP,$(1))) $(eval BIN := $(call IMG_BIN,$(1))) $(eval ENC_BIN := $(call IMG_ENC_BIN,$(1))) - $(eval BL_LINKERFILE := $(BL$(call uppercase,$(1))_LINKERFILE)) - $(eval BL_LIBS := $(BL$(call uppercase,$(1))_LIBS)) + $(eval BL_LINKERFILE := $($(call uppercase,$(1))_LINKERFILE)) + $(eval BL_LIBS := $($(call uppercase,$(1))_LIBS)) # We use sort only to get a list of unique object directory names. # ordering is not relevant but sort removes duplicates. $(eval TEMP_OBJ_DIRS := $(sort $(dir ${OBJS} ${LINKERFILE}))) @@ -475,21 +475,21 @@ $(eval $(call MAKE_PREREQ_DIR,${BUILD_DIR},${BUILD_PLAT})) $(eval $(foreach objd,${OBJ_DIRS},$(call MAKE_PREREQ_DIR,${objd},${BUILD_DIR}))) -.PHONY : bl${1}_dirs +.PHONY : ${1}_dirs # We use order-only prerequisites to ensure that directories are created, # but do not cause re-builds every time a file is written. -bl${1}_dirs: | ${OBJ_DIRS} +${1}_dirs: | ${OBJ_DIRS} $(eval $(call MAKE_OBJS,$(BUILD_DIR),$(SOURCES),$(1))) $(eval $(call MAKE_LD,$(LINKERFILE),$(BL_LINKERFILE),$(1))) -$(eval BL_LDFLAGS := $(BL$(call uppercase,$(1))_LDFLAGS)) +$(eval BL_LDFLAGS := $($(call uppercase,$(1))_LDFLAGS)) ifeq ($(USE_ROMLIB),1) $(ELF): romlib.bin endif -$(ELF): $(OBJS) $(LINKERFILE) | bl$(1)_dirs libraries $(BL_LIBS) +$(ELF): $(OBJS) $(LINKERFILE) | $(1)_dirs libraries $(BL_LIBS) $$(ECHO) " LD $$@" ifdef MAKE_BUILD_STRINGS $(call MAKE_BUILD_STRINGS, $(BUILD_DIR)/build_message.o) @@ -499,10 +499,10 @@ else $$(CC) $$(TF_CFLAGS) $$(CFLAGS) -xc -c - -o $(BUILD_DIR)/build_message.o endif ifneq ($(findstring armlink,$(notdir $(LD))),) - $$(Q)$$(LD) -o $$@ $$(TF_LDFLAGS) $$(LDFLAGS) $(BL_LDFLAGS) --entry=bl${1}_entrypoint \ + $$(Q)$$(LD) -o $$@ $$(TF_LDFLAGS) $$(LDFLAGS) $(BL_LDFLAGS) --entry=${1}_entrypoint \ --predefine="-D__LINKER__=$(__LINKER__)" \ --predefine="-DTF_CFLAGS=$(TF_CFLAGS)" \ - --map --list="$(MAPFILE)" --scatter=${PLAT_DIR}/scat/bl${1}.scat \ + --map --list="$(MAPFILE)" --scatter=${PLAT_DIR}/scat/${1}.scat \ $(LDPATHS) $(LIBWRAPPER) $(LDLIBS) $(BL_LIBS) \ $(BUILD_DIR)/build_message.o $(OBJS) else ifneq ($(findstring gcc,$(notdir $(LD))),) @@ -531,21 +531,21 @@ $(BIN): $(ELF) @echo "Built $$@ successfully" @${ECHO_BLANK_LINE} -.PHONY: bl$(1) +.PHONY: $(1) ifeq ($(DISABLE_BIN_GENERATION),1) -bl$(1): $(ELF) $(DUMP) +$(1): $(ELF) $(DUMP) else -bl$(1): $(BIN) $(DUMP) +$(1): $(BIN) $(DUMP) endif -all: bl$(1) +all: $(1) ifeq ($(4),1) $(call ENCRYPT_FW,$(BIN),$(ENC_BIN)) -$(if $(2),$(call TOOL_ADD_IMG_PAYLOAD,bl$(1),$(BIN),--$(2),$(ENC_BIN),$(3), \ +$(if $(2),$(call TOOL_ADD_IMG_PAYLOAD,$(1),$(BIN),--$(2),$(ENC_BIN),$(3), \ $(ENC_BIN))) else -$(if $(2),$(call TOOL_ADD_IMG_PAYLOAD,bl$(1),$(BIN),--$(2),$(BIN),$(3))) +$(if $(2),$(call TOOL_ADD_IMG_PAYLOAD,$(1),$(BIN),--$(2),$(BIN),$(3))) endif endef diff --git a/plat/marvell/armada/a8k/common/a8k_common.mk b/plat/marvell/armada/a8k/common/a8k_common.mk index 30e6280e7..9474d085d 100644 --- a/plat/marvell/armada/a8k/common/a8k_common.mk +++ b/plat/marvell/armada/a8k/common/a8k_common.mk @@ -166,7 +166,7 @@ endif BLE_PATH ?= $(PLAT_COMMON_BASE)/ble include ${BLE_PATH}/ble.mk -$(eval $(call MAKE_BL,e)) +$(eval $(call MAKE_BL,ble)) clean realclean distclean: mrvl_clean From 5b18de09e80f87963df9a2e451c47e2321b8643a Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Sun, 11 Jul 2021 18:33:20 -0500 Subject: [PATCH 09/21] feat(rme): add ENABLE_RME build option and support for RMM image The changes include: - A new build option (ENABLE_RME) to enable FEAT_RME - New image called RMM. RMM is R-EL2 firmware that manages Realms. When building TF-A, a path to RMM image can be specified using the "RMM" build flag. If RMM image is not provided, TRP is built by default and used as RMM image. - Support for RMM image in fiptool Signed-off-by: Zelalem Aweke Change-Id: I017c23ef02e465a5198baafd665a60858ecd1b25 --- Makefile | 44 ++++++++++++++++- bl31/bl31_main.c | 49 +++++++++++++++++-- docs/getting_started/build-options.rst | 4 ++ include/bl31/bl31.h | 3 +- include/common/bl_common.h | 4 +- include/export/common/tbbr/tbbr_img_def_exp.h | 5 +- include/tools_share/firmware_image_package.h | 2 + make_helpers/defaults.mk | 5 +- services/std_svc/rmmd/rmmd.mk | 2 + tools/fiptool/tbbr_config.c | 7 ++- 10 files changed, 114 insertions(+), 11 deletions(-) diff --git a/Makefile b/Makefile index d6e99f338..ab61d1c94 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ # -# Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved. +# Copyright (c) 2013-2021, Arm Limited and Contributors. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # @@ -129,6 +129,23 @@ else $(error Unknown BRANCH_PROTECTION value ${BRANCH_PROTECTION}) endif +# FEAT_RME +ifeq (${ENABLE_RME},1) +# RME doesn't support PIE +ifneq (${ENABLE_PIE},0) + $(error ENABLE_RME does not support PIE) +endif +# RME requires AARCH64 +ifneq (${ARCH},aarch64) + $(error ENABLE_RME requires AArch64) +endif +# RME requires el2 context to be saved for now. +CTX_INCLUDE_EL2_REGS := 1 +CTX_INCLUDE_AARCH32_REGS := 0 +ARM_ARCH_MAJOR := 8 +ARM_ARCH_MINOR := 6 +endif + # USE_SPINLOCK_CAS requires AArch64 build ifeq (${USE_SPINLOCK_CAS},1) ifneq (${ARCH},aarch64) @@ -558,6 +575,18 @@ ifneq (${SPD},none) # over the sources. endif +################################################################################ +# Include rmmd Makefile if RME is enabled +################################################################################ + +ifneq (${ENABLE_RME},0) +ifneq (${ARCH},aarch64) + $(error ENABLE_RME requires AArch64) +endif +include services/std_svc/rmmd/rmmd.mk +$(warning "RME is an experimental feature") +endif + ################################################################################ # Include the platform specific Makefile after the SPD Makefile (the platform # makefile may use all previous definitions in this file) @@ -926,6 +955,7 @@ $(eval $(call assert_booleans,\ ENABLE_PIE \ ENABLE_PMF \ ENABLE_PSCI_STAT \ + ENABLE_RME \ ENABLE_RUNTIME_INSTRUMENTATION \ ENABLE_SPE_FOR_LOWER_ELS \ ENABLE_SVE_FOR_NS \ @@ -1028,6 +1058,7 @@ $(eval $(call add_defines,\ ENABLE_PIE \ ENABLE_PMF \ ENABLE_PSCI_STAT \ + ENABLE_RME \ ENABLE_RUNTIME_INSTRUMENTATION \ ENABLE_SPE_FOR_LOWER_ELS \ ENABLE_SVE_FOR_NS \ @@ -1194,6 +1225,17 @@ $(if ${BUILD_BL32}, $(eval $(call MAKE_BL,bl32,tos-fw)),\ endif endif +# If RMM image is needed but RMM is not defined, Test Realm Payload (TRP) +# needs to be built from RMM_SOURCES. +ifeq (${NEED_RMM},yes) +# Sort RMM source files to remove duplicates +RMM_SOURCES := $(sort ${RMM_SOURCES}) +BUILD_RMM := $(if $(RMM),,$(if $(RMM_SOURCES),1)) + +$(if ${BUILD_RMM}, $(eval $(call MAKE_BL,rmm,rmm-fw)),\ + $(eval $(call TOOL_ADD_IMG,rmm,--rmm-fw))) +endif + # Add the BL33 image if required by the platform ifeq (${NEED_BL33},yes) $(eval $(call TOOL_ADD_IMG,bl33,--nt-fw)) diff --git a/bl31/bl31_main.c b/bl31/bl31_main.c index f272af502..9ac10e240 100644 --- a/bl31/bl31_main.c +++ b/bl31/bl31_main.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -35,6 +35,13 @@ PMF_REGISTER_SERVICE_SMC(rt_instr_svc, PMF_RT_INSTR_SVC_ID, ******************************************************************************/ static int32_t (*bl32_init)(void); +/***************************************************************************** + * Function used to initialise RMM if RME is enabled + *****************************************************************************/ +#if ENABLE_RME +static int32_t (*rmm_init)(void); +#endif + /******************************************************************************* * Variable to indicate whether next image to execute after BL31 is BL33 * (non-secure & default) or BL32 (secure). @@ -139,12 +146,15 @@ void bl31_main(void) /* * All the cold boot actions on the primary cpu are done. We now need to - * decide which is the next image (BL32 or BL33) and how to execute it. + * decide which is the next image and how to execute it. * If the SPD runtime service is present, it would want to pass control * to BL32 first in S-EL1. In that case, SPD would have registered a * function to initialize bl32 where it takes responsibility of entering - * S-EL1 and returning control back to bl31_main. Once this is done we - * can prepare entry into BL33 as normal. + * S-EL1 and returning control back to bl31_main. Similarly, if RME is + * enabled and a function is registered to initialize RMM, control is + * transferred to RMM in R-EL2. After RMM initialization, control is + * returned back to bl31_main. Once this is done we can prepare entry + * into BL33 as normal. */ /* @@ -155,9 +165,27 @@ void bl31_main(void) int32_t rc = (*bl32_init)(); - if (rc == 0) + if (rc == 0) { WARN("BL31: BL32 initialization failed\n"); + } } + + /* + * If RME is enabled and init hook is registered, initialize RMM + * in R-EL2. + */ +#if ENABLE_RME + if (rmm_init != NULL) { + INFO("BL31: Initializing RMM\n"); + + int32_t rc = (*rmm_init)(); + + if (rc == 0) { + WARN("BL31: RMM initialization failed\n"); + } + } +#endif + /* * We are ready to enter the next EL. Prepare entry into the image * corresponding to the desired security state after the next ERET. @@ -236,3 +264,14 @@ void bl31_register_bl32_init(int32_t (*func)(void)) { bl32_init = func; } + +#if ENABLE_RME +/******************************************************************************* + * This function initializes the pointer to RMM init function. This is expected + * to be called by the RMMD after it finishes all its initialization + ******************************************************************************/ +void bl31_register_rmm_init(int32_t (*func)(void)) +{ + rmm_init = func; +} +#endif diff --git a/docs/getting_started/build-options.rst b/docs/getting_started/build-options.rst index 7fe6ccd8a..1259881ed 100644 --- a/docs/getting_started/build-options.rst +++ b/docs/getting_started/build-options.rst @@ -271,6 +271,10 @@ Common build options be enabled. If ``ENABLE_PMF`` is set, the residency statistics are tracked in software. +- ``ENABLE_RME``: Boolean option to enable support for the ARMv9 Realm + Management Extension. Default value is 0. This is currently an experimental + feature. + - ``ENABLE_RUNTIME_INSTRUMENTATION``: Boolean option to enable runtime instrumentation which injects timestamp collection points into TF-A to allow runtime performance to be measured. Currently, only PSCI is diff --git a/include/bl31/bl31.h b/include/bl31/bl31.h index 3deb0a51d..1d58ef968 100644 --- a/include/bl31/bl31.h +++ b/include/bl31/bl31.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2019, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -19,6 +19,7 @@ void bl31_set_next_image_type(uint32_t security_state); uint32_t bl31_get_next_image_type(void); void bl31_prepare_next_image_entry(void); void bl31_register_bl32_init(int32_t (*func)(void)); +void bl31_register_rmm_init(int32_t (*func)(void)); void bl31_warm_entrypoint(void); void bl31_main(void); void bl31_lib_init(void); diff --git a/include/common/bl_common.h b/include/common/bl_common.h index e33840c9d..8cb4990f0 100644 --- a/include/common/bl_common.h +++ b/include/common/bl_common.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -126,6 +126,8 @@ IMPORT_SYM(uintptr_t, __BL31_START__, BL31_START); IMPORT_SYM(uintptr_t, __BL31_END__, BL31_END); #elif defined(IMAGE_BL32) IMPORT_SYM(uintptr_t, __BL32_END__, BL32_END); +#elif defined(IMAGE_RMM) +IMPORT_SYM(uintptr_t, __RMM_END__, RMM_END); #endif /* IMAGE_BLX */ /* The following symbols are only exported from the BL2 at EL3 linker script. */ diff --git a/include/export/common/tbbr/tbbr_img_def_exp.h b/include/export/common/tbbr/tbbr_img_def_exp.h index 2623c7599..98544c0ae 100644 --- a/include/export/common/tbbr/tbbr_img_def_exp.h +++ b/include/export/common/tbbr/tbbr_img_def_exp.h @@ -101,7 +101,10 @@ */ #define BKUP_FWU_METADATA_IMAGE_ID U(33) +/* Realm Monitor Manager (RMM) */ +#define RMM_IMAGE_ID U(34) + /* Max Images */ -#define MAX_IMAGE_IDS U(34) +#define MAX_IMAGE_IDS U(35) #endif /* ARM_TRUSTED_FIRMWARE_EXPORT_COMMON_TBBR_TBBR_IMG_DEF_EXP_H */ diff --git a/include/tools_share/firmware_image_package.h b/include/tools_share/firmware_image_package.h index dc65cc626..bd5b14b9d 100644 --- a/include/tools_share/firmware_image_package.h +++ b/include/tools_share/firmware_image_package.h @@ -38,6 +38,8 @@ {{0x8e, 0xa8, 0x7b, 0xb1}, {0xcf, 0xa2}, {0x3f, 0x4d}, 0x85, 0xfd, {0xe7, 0xbb, 0xa5, 0x02, 0x20, 0xd9} } #define UUID_NON_TRUSTED_FIRMWARE_BL33 \ {{0xd6, 0xd0, 0xee, 0xa7}, {0xfc, 0xea}, {0xd5, 0x4b}, 0x97, 0x82, {0x99, 0x34, 0xf2, 0x34, 0xb6, 0xe4} } +#define UUID_REALM_MONITOR_MGMT_FIRMWARE \ + {{0x6c, 0x07, 0x62, 0xa6}, {0x12, 0xf2}, {0x4b, 0x56}, 0x92, 0xcb, {0xba, 0x8f, 0x63, 0x36, 0x06, 0xd9} } /* Key certificates */ #define UUID_ROT_KEY_CERT \ {{0x86, 0x2d, 0x1d, 0x72}, {0xf8, 0x60}, {0xe4, 0x11}, 0x92, 0x0b, {0x8b, 0xe7, 0x62, 0x16, 0x0f, 0x24} } diff --git a/make_helpers/defaults.mk b/make_helpers/defaults.mk index 8b350db7b..819c53686 100644 --- a/make_helpers/defaults.mk +++ b/make_helpers/defaults.mk @@ -1,5 +1,5 @@ # -# Copyright (c) 2016-2021, ARM Limited. All rights reserved. +# Copyright (c) 2016-2021, Arm Limited. All rights reserved. # # SPDX-License-Identifier: BSD-3-Clause # @@ -105,6 +105,9 @@ ENABLE_PMF := 0 # Flag to enable PSCI STATs functionality ENABLE_PSCI_STAT := 0 +# Flag to enable Realm Management Extension (FEAT_RME) +ENABLE_RME := 0 + # Flag to enable runtime instrumentation using PMF ENABLE_RUNTIME_INSTRUMENTATION := 0 diff --git a/services/std_svc/rmmd/rmmd.mk b/services/std_svc/rmmd/rmmd.mk index b42568183..bac0a9f28 100644 --- a/services/std_svc/rmmd/rmmd.mk +++ b/services/std_svc/rmmd/rmmd.mk @@ -14,3 +14,5 @@ RMMD_SOURCES += $(addprefix services/std_svc/rmmd/, \ ${ARCH}/rmmd_helpers.S \ rmmd_main.c) +# Let the top-level Makefile know that we intend to include RMM image +NEED_RMM := yes diff --git a/tools/fiptool/tbbr_config.c b/tools/fiptool/tbbr_config.c index c1e5217f0..4998bb2c2 100644 --- a/tools/fiptool/tbbr_config.c +++ b/tools/fiptool/tbbr_config.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -67,6 +67,11 @@ toc_entry_t toc_entries[] = { .uuid = UUID_NON_TRUSTED_FIRMWARE_BL33, .cmdline_name = "nt-fw" }, + { + .name = "Realm Monitor Management Firmware", + .uuid = UUID_REALM_MONITOR_MGMT_FIRMWARE, + .cmdline_name = "rmm-fw" + }, /* Dynamic Configs */ { .name = "FW_CONFIG", From 1839012d5b5d431f7ec307230eae9890a5fe7477 Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Tue, 13 Jul 2021 14:05:20 -0500 Subject: [PATCH 10/21] feat(rme): add GPT Library This patch introduces the Granule Protection Table (GPT) library code. This implementation will be updated later to be more flexible, as the current implementation is very rigid. Signed-off-by: Zelalem Aweke Change-Id: I3af824a28c6e9a5d36459c0c51d2d9bebfba1505 --- bl2/bl2.mk | 4 +- bl31/aarch64/bl31_entrypoint.S | 10 +- bl31/bl31.mk | 5 +- include/lib/gpt/gpt.h | 86 ++++ include/lib/gpt/gpt_defs.h | 76 ++++ lib/gpt/gpt.mk | 8 + lib/gpt/gpt_core.c | 767 +++++++++++++++++++++++++++++++++ 7 files changed, 953 insertions(+), 3 deletions(-) create mode 100644 include/lib/gpt/gpt.h create mode 100644 include/lib/gpt/gpt_defs.h create mode 100644 lib/gpt/gpt.mk create mode 100644 lib/gpt/gpt_core.c diff --git a/bl2/bl2.mk b/bl2/bl2.mk index 54c73f506..fd8374795 100644 --- a/bl2/bl2.mk +++ b/bl2/bl2.mk @@ -17,10 +17,12 @@ endif ifeq (${ENABLE_RME},1) # Using RME, run BL2 at EL3 +include lib/gpt/gpt.mk + BL2_SOURCES += bl2/${ARCH}/bl2_rme_entrypoint.S \ bl2/${ARCH}/bl2_el3_exceptions.S \ bl2/${ARCH}/bl2_run_next_image.S \ - + ${GPT_LIB_SRCS} BL2_LINKERFILE := bl2/bl2.ld.S else ifeq (${BL2_AT_EL3},0) diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S index 2d672dd12..2e9a39496 100644 --- a/bl31/aarch64/bl31_entrypoint.S +++ b/bl31/aarch64/bl31_entrypoint.S @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2013-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -172,6 +172,14 @@ func bl31_warm_entrypoint _exception_vectors=runtime_exceptions \ _pie_fixup_size=0 +#if ENABLE_RME + /* + * Initialise and enable Granule Protection + * before enabling any stage of translation. + */ + bl gpt_enable +#endif + /* * We're about to enable MMU and participate in PSCI state coordination. * diff --git a/bl31/bl31.mk b/bl31/bl31.mk index ce0f69b7d..5927fb1c9 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -112,7 +112,10 @@ BL31_SOURCES += services/std_svc/pci_svc.c endif ifeq (${ENABLE_RME},1) -BL31_SOURCES += ${RMMD_SOURCES} +include lib/gpt/gpt.mk + +BL31_SOURCES += ${GPT_LIB_SRCS} \ + ${RMMD_SOURCES} endif BL31_LINKERFILE := bl31/bl31.ld.S diff --git a/include/lib/gpt/gpt.h b/include/lib/gpt/gpt.h new file mode 100644 index 000000000..89d30177d --- /dev/null +++ b/include/lib/gpt/gpt.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef GPT_H +#define GPT_H + +#include + +#include + +#include "gpt_defs.h" + +#define GPT_DESC_ATTRS(_type, _gpi) \ + ((((_type) & PAS_REG_DESC_TYPE_MASK) \ + << PAS_REG_DESC_TYPE_SHIFT) | \ + (((_gpi) & PAS_REG_GPI_MASK) \ + << PAS_REG_GPI_SHIFT)) + +/* + * Macro to create a GPT entry for this PAS range either as a L0 block + * descriptor or L1 table descriptor depending upon the size of the range. + */ +#define MAP_GPT_REGION(_pa, _sz, _gpi) \ + { \ + .base_pa = (_pa), \ + .size = (_sz), \ + .attrs = GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_ANY, (_gpi)), \ + } + +/* + * Special macro to create a L1 table descriptor at L0 for a 1GB region as + * opposed to creating a block mapping by default. + */ +#define MAP_GPT_REGION_TBL(_pa, _sz, _gpi) \ + { \ + .base_pa = (_pa), \ + .size = (_sz), \ + .attrs = GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_TBL, (_gpi)), \ + } + +/* + * Structure for specifying a Granule range and its properties + */ +typedef struct pas_region { + unsigned long long base_pa; /**< Base address for PAS. */ + size_t size; /**< Size of the PAS. */ + unsigned int attrs; /**< PAS GPI and entry type. */ +} pas_region_t; + +/* + * Structure to initialise the Granule Protection Tables. + */ +typedef struct gpt_init_params { + unsigned int pgs; /**< Address Width of Phisical Granule Size. */ + unsigned int pps; /**< Protected Physical Address Size. */ + unsigned int l0gptsz; /**< Granule size on L0 table entry. */ + pas_region_t *pas_regions; /**< PAS regions to protect. */ + unsigned int pas_count; /**< Number of PAS regions to initialise. */ + uintptr_t l0_mem_base; /**< L0 Table base address. */ + size_t l0_mem_size; /**< Size of memory reserved for L0 tables. */ + uintptr_t l1_mem_base; /**< L1 Table base address. */ + size_t l1_mem_size; /**< Size of memory reserved for L1 tables. */ +} gpt_init_params_t; + +/** @brief Initialise the Granule Protection tables. + */ +int gpt_init(gpt_init_params_t *params); + +/** @brief Enable the Granule Protection Checks. + */ +void gpt_enable(void); + +/** @brief Disable the Granule Protection Checks. + */ +void gpt_disable(void); + +/** @brief Transition a granule between security states. + */ +int gpt_transition_pas(uint64_t pa, + unsigned int src_sec_state, + unsigned int target_pas); + +#endif /* GPT_H */ diff --git a/include/lib/gpt/gpt_defs.h b/include/lib/gpt/gpt_defs.h new file mode 100644 index 000000000..6122a126f --- /dev/null +++ b/include/lib/gpt/gpt_defs.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef GPT_DEFS_H +#define GPT_DEFS_H + +#include +#include + +#include "gpt.h" + +/* GPI values */ +#define GPI_NO_ACCESS U(0x0) +#define GPI_SECURE U(0x8) +#define GPI_NS U(0x9) +#define GPI_ROOT U(0xa) +#define GPI_REALM U(0xb) +#define GPI_ANY U(0xf) +#define GPI_VAL_MASK ULL(0xf) + +/* GPT descriptor bit definitions */ +#define GPT_L1_INDEX_MASK ULL(0xf) +#define GPT_L1_INDEX_SHIFT ULL(0x0) + +#define GPT_TBL_DESC ULL(0x3) +#define GPT_BLK_DESC ULL(0x1) + +#define GPT_TBL_DESC_ADDR_SHIFT ULL(12) +#define GPT_TBL_DESC_ADDR_MASK (((ULL(1) << \ + (51 - GPT_TBL_DESC_ADDR_SHIFT)) - 1) \ + << GPT_TBL_DESC_ADDR_SHIFT) + +#define GPT_BLOCK_DESC_GPI_VAL_SHIFT ULL(4) + +/* Each descriptor is 8 bytes long. */ +#define GPT_DESC_SIZE ULL(8) + +#define PPS_MAX_VAL PSTCR_EL3_PPS_4PB +#define PPS_NUM_1GB_ENTRIES ULL(1024) +#define PGS_4K_1GB_L1_TABLE_SZ (U(2) << 17) + +/* 2 << LOG2_8K = Bytes in 8K */ +#define LOG2_8K U(13) + +#define GPT_L1_SIZE ULL(0x40000) /* 256K */ +#define SZ_1G (ULL(0x1) << 30) /* 1GB */ + +#define GPT_MIN_PGS_SHIFT U(12) /* 4K */ + +#define L1_GPT_INDEX_MASK U(0x3fffffff) +#define GPT_GRAN_DESC_NUM_GPIS U(4) + +#define PAS_REG_GPI_SHIFT U(0) +#define PAS_REG_GPI_MASK U(0xf) + +/* .attrs field definitions */ +#define PAS_REG_DESC_TYPE_ANY U(0) +#define PAS_REG_DESC_TYPE_BLK U(1) +#define PAS_REG_DESC_TYPE_TBL U(2) +#define PAS_REG_DESC_TYPE_SHIFT U(4) +#define PAS_REG_DESC_TYPE_MASK U(0x3) +#define PAS_REG_DESC_TYPE(_attrs) (((_attrs) \ + >> PAS_REG_DESC_TYPE_SHIFT) \ + & PAS_REG_DESC_TYPE_MASK) + +#define PAS_REG_GPI(_attrs) (((_attrs) \ + >> PAS_REG_GPI_SHIFT) \ + & PAS_REG_GPI_MASK) + +#define SZ_1G_MASK (SZ_1G - U(1)) +#define IS_1GB_ALIGNED(addr) (((addr) & SZ_1G_MASK) == U(0)) + +#endif /* GPT_DEFS */ diff --git a/lib/gpt/gpt.mk b/lib/gpt/gpt.mk new file mode 100644 index 000000000..611e50457 --- /dev/null +++ b/lib/gpt/gpt.mk @@ -0,0 +1,8 @@ +# +# Copyright (c) 2021, Arm Limited. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# + +GPT_LIB_SRCS := $(addprefix lib/gpt/, \ + gpt_core.c) diff --git a/lib/gpt/gpt_core.c b/lib/gpt/gpt_core.c new file mode 100644 index 000000000..8a3afd2fa --- /dev/null +++ b/lib/gpt/gpt_core.c @@ -0,0 +1,767 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#if !ENABLE_RME +#error "ENABLE_RME must be enabled to use the GPT library." +#endif + +typedef struct { + uintptr_t plat_gpt_l0_base; + uintptr_t plat_gpt_l1_base; + size_t plat_gpt_l0_size; + size_t plat_gpt_l1_size; + unsigned int plat_gpt_pps; + unsigned int plat_gpt_pgs; + unsigned int plat_gpt_l0gptsz; +} gpt_config_t; + +gpt_config_t gpt_config; + +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) +/* Helper function that cleans the data cache only if it is enabled. */ +static inline + void gpt_clean_dcache_range(uintptr_t addr, size_t size) +{ + if ((read_sctlr_el3() & SCTLR_C_BIT) != 0U) { + clean_dcache_range(addr, size); + } +} + +/* Helper function that invalidates the data cache only if it is enabled. */ +static inline + void gpt_inv_dcache_range(uintptr_t addr, size_t size) +{ + if ((read_sctlr_el3() & SCTLR_C_BIT) != 0U) { + inv_dcache_range(addr, size); + } +} +#endif + +typedef struct l1_gpt_attr_desc { + size_t t_sz; /** Table size */ + size_t g_sz; /** Granularity size */ + unsigned int p_val; /** Associated P value */ +} l1_gpt_attr_desc_t; + +/* + * Lookup table to find out the size in bytes of the L1 tables as well + * as the index mask, given the Width of Physical Granule Size (PGS). + * L1 tables are indexed by PA[29:p+4], being 'p' the width in bits of the + * aforementioned Physical Granule Size. + */ +static const l1_gpt_attr_desc_t l1_gpt_attr_lookup[] = { + [GPCCR_PGS_4K] = {U(1) << U(17), /* 16384B x 64bit entry = 128KB */ + PAGE_SIZE_4KB, /* 4KB Granularity */ + U(12)}, + [GPCCR_PGS_64K] = {U(1) << U(13), /* Table size = 8KB */ + PAGE_SIZE_64KB, /* 64KB Granularity */ + U(16)}, + [GPCCR_PGS_16K] = {U(1) << U(15), /* Table size = 32KB */ + PAGE_SIZE_16KB, /* 16KB Granularity */ + U(14)} +}; + +typedef struct l0_gpt_attr_desc { + size_t sz; + unsigned int t_val_mask; +} l0_gpt_attr_desc_t; + +/* + * Lookup table to find out the size in bytes of the L0 table as well + * as the index mask, given the Protected Physical Address Size (PPS). + * L0 table is indexed by PA[t-1:30], being 't' the size in bits + * of the aforementioned Protected Physical Address Size. + */ +static const l0_gpt_attr_desc_t l0_gpt_attr_lookup[] = { + + [GPCCR_PPS_4GB] = {U(1) << U(5), /* 4 x 64 bit entry = 32 bytes */ + 0x3}, /* Bits[31:30] */ + + [GPCCR_PPS_64GB] = {U(1) << U(9), /* 512 bytes */ + 0x3f}, /* Bits[35:30] */ + + [GPCCR_PPS_1TB] = {U(1) << U(13), /* 8KB */ + 0x3ff}, /* Bits[39:30] */ + + [GPCCR_PPS_4TB] = {U(1) << U(15), /* 32KB */ + 0xfff}, /* Bits[41:30] */ + + [GPCCR_PPS_16TB] = {U(1) << U(17), /* 128KB */ + 0x3fff}, /* Bits[43:30] */ + + [GPCCR_PPS_256TB] = {U(1) << U(21), /* 2MB */ + 0x3ffff}, /* Bits[47:30] */ + + [GPCCR_PPS_4PB] = {U(1) << U(25), /* 32MB */ + 0x3fffff}, /* Bits[51:30] */ + +}; + +static unsigned int get_l1_gpt_index(unsigned int pgs, uintptr_t pa) +{ + unsigned int l1_gpt_arr_idx; + + /* + * Mask top 2 bits to obtain the 30 bits required to + * generate the L1 GPT index + */ + l1_gpt_arr_idx = (unsigned int)(pa & L1_GPT_INDEX_MASK); + + /* Shift by 'p' value + 4 to obtain the index */ + l1_gpt_arr_idx >>= (l1_gpt_attr_lookup[pgs].p_val + 4); + + return l1_gpt_arr_idx; +} + +unsigned int plat_is_my_cpu_primary(void); + +/* The granule partition tables can only be configured on BL2 */ +#ifdef IMAGE_BL2 + +/* Global to keep track of next available index in array of L1 GPTs */ +static unsigned int l1_gpt_mem_avlbl_index; + +static int validate_l0_gpt_params(gpt_init_params_t *params) +{ + /* Only 1GB of address space per L0 entry is allowed */ + if (params->l0gptsz != GPCCR_L0GPTSZ_30BITS) { + WARN("Invalid L0GPTSZ %u.\n", params->l0gptsz); + } + + /* Only 4K granule is supported for now */ + if (params->pgs != GPCCR_PGS_4K) { + WARN("Invalid GPT PGS %u.\n", params->pgs); + return -EINVAL; + } + + /* Only 4GB of protected physical address space is supported for now */ + if (params->pps != GPCCR_PPS_4GB) { + WARN("Invalid GPT PPS %u.\n", params->pps); + return -EINVAL; + } + + /* Check if GPT base address is aligned with the system granule */ + if (!IS_PAGE_ALIGNED(params->l0_mem_base)) { + ERROR("Unaligned L0 GPT base address.\n"); + return -EFAULT; + } + + /* Check if there is enough memory for L0 GPTs */ + if (params->l0_mem_size < l0_gpt_attr_lookup[params->pps].sz) { + ERROR("Inadequate memory for L0 GPTs. "); + ERROR("Expected 0x%lx bytes. Got 0x%lx bytes\n", + l0_gpt_attr_lookup[params->pps].sz, + params->l0_mem_size); + return -ENOMEM; + } + + return 0; +} + +/* + * A L1 GPT is required if any one of the following conditions is true: + * + * - The base address is not 1GB aligned + * - The size of the memory region is not a multiple of 1GB + * - A L1 GPT has been explicitly requested (attrs == PAS_REG_DESC_TYPE_TBL) + * + * This function: + * - iterates over all the PAS regions to determine whether they + * will need a 2 stage look up (and therefore a L1 GPT will be required) or + * if it would be enough with a single level lookup table. + * - Updates the attr field of the PAS regions. + * - Returns the total count of L1 tables needed. + * + * In the future wwe should validate that the PAS range does not exceed the + * configured PPS. (and maybe rename this function as it is validating PAS + * regions). + */ +static unsigned int update_gpt_type(pas_region_t *pas_regions, + unsigned int pas_region_cnt) +{ + unsigned int idx, cnt = 0U; + + for (idx = 0U; idx < pas_region_cnt; idx++) { + if (PAS_REG_DESC_TYPE(pas_regions[idx].attrs) == + PAS_REG_DESC_TYPE_TBL) { + cnt++; + continue; + } + if (!(IS_1GB_ALIGNED(pas_regions[idx].base_pa) && + IS_1GB_ALIGNED(pas_regions[idx].size))) { + + /* Current region will need L1 GPTs. */ + assert(PAS_REG_DESC_TYPE(pas_regions[idx].attrs) + == PAS_REG_DESC_TYPE_ANY); + + pas_regions[idx].attrs = + GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_TBL, + PAS_REG_GPI(pas_regions[idx].attrs)); + cnt++; + continue; + } + + /* The PAS can be mapped on a one stage lookup table */ + assert(PAS_REG_DESC_TYPE(pas_regions[idx].attrs) != + PAS_REG_DESC_TYPE_TBL); + + pas_regions[idx].attrs = GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_BLK, + PAS_REG_GPI(pas_regions[idx].attrs)); + } + + return cnt; +} + +static int validate_l1_gpt_params(gpt_init_params_t *params, + unsigned int l1_gpt_cnt) +{ + size_t l1_gpt_sz, l1_gpt_mem_sz; + + /* Check if the granularity is supported */ + assert(xlat_arch_is_granule_size_supported( + l1_gpt_attr_lookup[params->pgs].g_sz)); + + + /* Check if naturally aligned L1 GPTs can be created */ + l1_gpt_sz = l1_gpt_attr_lookup[params->pgs].g_sz; + if (params->l1_mem_base & (l1_gpt_sz - 1)) { + WARN("Unaligned L1 GPT base address.\n"); + return -EFAULT; + } + + /* Check if there is enough memory for L1 GPTs */ + l1_gpt_mem_sz = l1_gpt_cnt * l1_gpt_sz; + if (params->l1_mem_size < l1_gpt_mem_sz) { + WARN("Inadequate memory for L1 GPTs. "); + WARN("Expected 0x%lx bytes. Got 0x%lx bytes\n", + l1_gpt_mem_sz, params->l1_mem_size); + return -ENOMEM; + } + + INFO("Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz); + return 0; +} + +/* + * Helper function to determine if the end physical address lies in the same GB + * as the current physical address. If true, the end physical address is + * returned else, the start address of the next GB is returned. + */ +static uintptr_t get_l1_gpt_end_pa(uintptr_t cur_pa, uintptr_t end_pa) +{ + uintptr_t cur_gb, end_gb; + + cur_gb = cur_pa >> ONE_GB_SHIFT; + end_gb = end_pa >> ONE_GB_SHIFT; + + assert(cur_gb <= end_gb); + + if (cur_gb == end_gb) { + return end_pa; + } + + return (cur_gb + 1) << ONE_GB_SHIFT; +} + +static void generate_l0_blk_desc(gpt_init_params_t *params, + unsigned int idx) +{ + uint64_t gpt_desc; + uintptr_t end_addr; + unsigned int end_idx, start_idx; + pas_region_t *pas = params->pas_regions + idx; + uint64_t *l0_gpt_arr = (uint64_t *)params->l0_mem_base; + + /* Create the GPT Block descriptor for this PAS region */ + gpt_desc = GPT_BLK_DESC; + gpt_desc |= PAS_REG_GPI(pas->attrs) + << GPT_BLOCK_DESC_GPI_VAL_SHIFT; + + /* Start index of this region in L0 GPTs */ + start_idx = pas->base_pa >> ONE_GB_SHIFT; + + /* + * Determine number of L0 GPT descriptors covered by + * this PAS region and use the count to populate these + * descriptors. + */ + end_addr = pas->base_pa + pas->size; + assert(end_addr \ + <= (ULL(l0_gpt_attr_lookup[params->pps].t_val_mask + 1)) << 30); + end_idx = end_addr >> ONE_GB_SHIFT; + + for (; start_idx < end_idx; start_idx++) { + l0_gpt_arr[start_idx] = gpt_desc; + INFO("L0 entry (BLOCK) index %u [%p]: GPI = 0x%llx (0x%llx)\n", + start_idx, &l0_gpt_arr[start_idx], + (gpt_desc >> GPT_BLOCK_DESC_GPI_VAL_SHIFT) & + GPT_L1_INDEX_MASK, l0_gpt_arr[start_idx]); + } +} + +static void generate_l0_tbl_desc(gpt_init_params_t *params, + unsigned int idx) +{ + uint64_t gpt_desc = 0U, *l1_gpt_arr; + uintptr_t start_pa, end_pa, cur_pa, next_pa; + unsigned int start_idx, l1_gpt_idx; + unsigned int p_val, gran_sz; + pas_region_t *pas = params->pas_regions + idx; + uint64_t *l0_gpt_base = (uint64_t *)params->l0_mem_base; + uint64_t *l1_gpt_base = (uint64_t *)params->l1_mem_base; + + start_pa = pas->base_pa; + end_pa = start_pa + pas->size; + p_val = l1_gpt_attr_lookup[params->pgs].p_val; + gran_sz = 1 << p_val; + + /* + * end_pa cannot be larger than the maximum protected physical memory. + */ + assert(((1ULL<<30) << l0_gpt_attr_lookup[params->pps].t_val_mask) + > end_pa); + + for (cur_pa = start_pa; cur_pa < end_pa;) { + /* + * Determine the PA range that will be covered + * in this loop iteration. + */ + next_pa = get_l1_gpt_end_pa(cur_pa, end_pa); + + INFO("PAS[%u]: start: 0x%lx, end: 0x%lx, next_pa: 0x%lx.\n", + idx, cur_pa, end_pa, next_pa); + + /* Index of this PA in L0 GPTs */ + start_idx = cur_pa >> ONE_GB_SHIFT; + + /* + * If cur_pa is on a 1GB boundary then determine + * the base address of next available L1 GPT + * memory region + */ + if (IS_1GB_ALIGNED(cur_pa)) { + l1_gpt_arr = (uint64_t *)((uint64_t)l1_gpt_base + + (l1_gpt_attr_lookup[params->pgs].t_sz * + l1_gpt_mem_avlbl_index)); + + assert(l1_gpt_arr < + (l1_gpt_base + params->l1_mem_size)); + + /* Create the L0 GPT descriptor for this PAS region */ + gpt_desc = GPT_TBL_DESC | + ((uintptr_t)l1_gpt_arr + & GPT_TBL_DESC_ADDR_MASK); + + l0_gpt_base[start_idx] = gpt_desc; + + /* + * Update index to point to next available L1 + * GPT memory region + */ + l1_gpt_mem_avlbl_index++; + } else { + /* Use the existing L1 GPT */ + l1_gpt_arr = (uint64_t *)(l0_gpt_base[start_idx] + & ~((1U<<12) - 1U)); + } + + INFO("L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%llx)\n", + start_idx, &l0_gpt_base[start_idx], + (unsigned long long)(l1_gpt_arr), + l0_gpt_base[start_idx]); + + /* + * Fill up L1 GPT entries between these two + * addresses. + */ + for (; cur_pa < next_pa; cur_pa += gran_sz) { + unsigned int gpi_idx, gpi_idx_shift; + + /* Obtain index of L1 GPT entry */ + l1_gpt_idx = get_l1_gpt_index(params->pgs, cur_pa); + + /* + * Obtain index of GPI in L1 GPT entry + * (i = PA[p_val+3:p_val]) + */ + gpi_idx = (cur_pa >> p_val) & GPT_L1_INDEX_MASK; + + /* + * Shift by index * 4 to reach correct + * GPI entry in L1 GPT descriptor. + * GPI = gpt_desc[(4*idx)+3:(4*idx)] + */ + gpi_idx_shift = gpi_idx << 2; + + gpt_desc = l1_gpt_arr[l1_gpt_idx]; + + /* Clear existing GPI encoding */ + gpt_desc &= ~(GPT_L1_INDEX_MASK << gpi_idx_shift); + + /* Set the GPI encoding */ + gpt_desc |= ((uint64_t)PAS_REG_GPI(pas->attrs) + << gpi_idx_shift); + + l1_gpt_arr[l1_gpt_idx] = gpt_desc; + + if (gpi_idx == 15U) { + VERBOSE("\tEntry %u [%p] = 0x%llx\n", + l1_gpt_idx, + &l1_gpt_arr[l1_gpt_idx], gpt_desc); + } + } + } +} + +static void create_gpt(gpt_init_params_t *params) +{ + unsigned int idx; + pas_region_t *pas_regions = params->pas_regions; + + INFO("pgs = 0x%x, pps = 0x%x, l0gptsz = 0x%x\n", + params->pgs, params->pps, params->l0gptsz); + INFO("pas_region_cnt = 0x%x L1 base = 0x%lx, L1 sz = 0x%lx\n", + params->pas_count, params->l1_mem_base, params->l1_mem_size); + +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + gpt_inv_dcache_range(params->l0_mem_base, params->l0_mem_size); + gpt_inv_dcache_range(params->l1_mem_base, params->l1_mem_size); +#endif + + for (idx = 0U; idx < params->pas_count; idx++) { + + INFO("PAS[%u]: base 0x%llx, sz 0x%lx, GPI 0x%x, type 0x%x\n", + idx, pas_regions[idx].base_pa, pas_regions[idx].size, + PAS_REG_GPI(pas_regions[idx].attrs), + PAS_REG_DESC_TYPE(pas_regions[idx].attrs)); + + /* Check if a block or table descriptor is required */ + if (PAS_REG_DESC_TYPE(pas_regions[idx].attrs) == + PAS_REG_DESC_TYPE_BLK) { + generate_l0_blk_desc(params, idx); + + } else { + generate_l0_tbl_desc(params, idx); + } + } + +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + gpt_clean_dcache_range(params->l0_mem_base, params->l0_mem_size); + gpt_clean_dcache_range(params->l1_mem_base, params->l1_mem_size); +#endif + + /* Make sure that all the entries are written to the memory. */ + dsbishst(); +} + +#endif /* IMAGE_BL2 */ + +int gpt_init(gpt_init_params_t *params) +{ +#ifdef IMAGE_BL2 + unsigned int l1_gpt_cnt; + int ret; +#endif + /* Validate arguments */ + assert(params != NULL); + assert(params->pgs <= GPCCR_PGS_16K); + assert(params->pps <= GPCCR_PPS_4PB); + assert(params->l0_mem_base != (uintptr_t)0); + assert(params->l0_mem_size > 0U); + assert(params->l1_mem_base != (uintptr_t)0); + assert(params->l1_mem_size > 0U); + +#ifdef IMAGE_BL2 + /* + * The Granule Protection Tables are initialised only in BL2. + * BL31 is not allowed to initialise them again in case + * these are modified by any other image loaded by BL2. + */ + assert(params->pas_regions != NULL); + assert(params->pas_count > 0U); + + ret = validate_l0_gpt_params(params); + if (ret < 0) { + + return ret; + } + + /* Check if L1 GPTs are required and how many. */ + l1_gpt_cnt = update_gpt_type(params->pas_regions, + params->pas_count); + INFO("%u L1 GPTs requested.\n", l1_gpt_cnt); + + if (l1_gpt_cnt > 0U) { + ret = validate_l1_gpt_params(params, l1_gpt_cnt); + if (ret < 0) { + return ret; + } + } + + create_gpt(params); +#else + /* If running in BL31, only primary CPU can initialise GPTs */ + assert(plat_is_my_cpu_primary() == 1U); + + /* + * If the primary CPU is calling this function from BL31 + * we expect that the tables are aready initialised from + * BL2 and GPCCR_EL3 is already configured with + * Granule Protection Check Enable bit set. + */ + assert((read_gpccr_el3() & GPCCR_GPC_BIT) != 0U); +#endif /* IMAGE_BL2 */ + +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + gpt_inv_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config)); +#endif + gpt_config.plat_gpt_l0_base = params->l0_mem_base; + gpt_config.plat_gpt_l1_base = params->l1_mem_base; + gpt_config.plat_gpt_l0_size = params->l0_mem_size; + gpt_config.plat_gpt_l1_size = params->l1_mem_size; + + /* Backup the parameters used to configure GPCCR_EL3 on every PE. */ + gpt_config.plat_gpt_pgs = params->pgs; + gpt_config.plat_gpt_pps = params->pps; + gpt_config.plat_gpt_l0gptsz = params->l0gptsz; + +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + gpt_clean_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config)); +#endif + + return 0; +} + +void gpt_enable(void) +{ + u_register_t gpccr_el3; + + /* Invalidate any stale TLB entries */ + tlbipaallos(); + +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + gpt_inv_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config)); +#endif + +#ifdef IMAGE_BL2 + /* + * Granule tables must be initialised before enabling + * granule protection. + */ + assert(gpt_config.plat_gpt_l0_base != (uintptr_t)NULL); +#endif + write_gptbr_el3(gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT); + + /* GPCCR_EL3.L0GPTSZ */ + gpccr_el3 = SET_GPCCR_L0GPTSZ(gpt_config.plat_gpt_l0gptsz); + + /* GPCCR_EL3.PPS */ + gpccr_el3 |= SET_GPCCR_PPS(gpt_config.plat_gpt_pps); + + /* GPCCR_EL3.PGS */ + gpccr_el3 |= SET_GPCCR_PGS(gpt_config.plat_gpt_pgs); + + /* Set shareability attribute to Outher Shareable */ + gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_OS); + + /* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */ + gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA); + gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA); + + /* Enable GPT */ + gpccr_el3 |= GPCCR_GPC_BIT; + + write_gpccr_el3(gpccr_el3); + dsbsy(); + + VERBOSE("Granule Protection Checks enabled\n"); +} + +void gpt_disable(void) +{ + u_register_t gpccr_el3 = read_gpccr_el3(); + + write_gpccr_el3(gpccr_el3 &= ~GPCCR_GPC_BIT); + dsbsy(); +} + +#ifdef IMAGE_BL31 + +/* + * Each L1 descriptor is protected by 1 spinlock. The number of descriptors is + * equal to the size of the total protected memory area divided by the size of + * protected memory area covered by each descriptor. + * + * The size of memory covered by each descriptor is the 'size of the granule' x + * 'number of granules' in a descriptor. The former is PLAT_ARM_GPT_PGS and + * latter is always 16. + */ +static spinlock_t gpt_lock; + +static unsigned int get_l0_gpt_index(unsigned int pps, uint64_t pa) +{ + unsigned int idx; + + /* Get the index into the L0 table */ + idx = pa >> ONE_GB_SHIFT; + + /* Check if the pa lies within the PPS */ + if (idx & ~(l0_gpt_attr_lookup[pps].t_val_mask)) { + WARN("Invalid address 0x%llx.\n", pa); + return -EINVAL; + } + + return idx; +} + +int gpt_transition_pas(uint64_t pa, + unsigned int src_sec_state, + unsigned int target_pas) +{ + int idx; + unsigned int idx_shift; + unsigned int gpi; + uint64_t gpt_l1_desc; + uint64_t *gpt_l1_addr, *gpt_addr; + + /* + * Check if caller is allowed to transition the granule's PAS. + * + * - Secure world caller can only request S <-> NS transitions on a + * granule that is already in either S or NS PAS. + * + * - Realm world caller can only request R <-> NS transitions on a + * granule that is already in either R or NS PAS. + */ + if (src_sec_state == SMC_FROM_REALM) { + if ((target_pas != GPI_REALM) && (target_pas != GPI_NS)) { + WARN("Invalid caller (%s) and PAS (%d) combination.\n", + "realm world", target_pas); + return -EINVAL; + } + } else if (src_sec_state == SMC_FROM_SECURE) { + if ((target_pas != GPI_SECURE) && (target_pas != GPI_NS)) { + WARN("Invalid caller (%s) and PAS (%d) combination.\n", + "secure world", target_pas); + return -EINVAL; + } + } else { + WARN("Invalid caller security state 0x%x\n", src_sec_state); + return -EINVAL; + } + + /* Obtain the L0 GPT address. */ + gpt_addr = (uint64_t *)gpt_config.plat_gpt_l0_base; + + /* Validate physical address and obtain index into L0 GPT table */ + idx = get_l0_gpt_index(gpt_config.plat_gpt_pps, pa); + if (idx < 0U) { + return idx; + } + + VERBOSE("PA 0x%llx, L0 base addr 0x%llx, L0 index %u\n", + pa, (uint64_t)gpt_addr, idx); + + /* Obtain the L0 descriptor */ + gpt_l1_desc = gpt_addr[idx]; + + /* + * Check if it is a table descriptor. Granule transition only applies to + * memory ranges for which L1 tables were created at boot time. So there + * is no possibility of splitting and coalescing tables. + */ + if ((gpt_l1_desc & GPT_L1_INDEX_MASK) != GPT_TBL_DESC) { + WARN("Invalid address 0x%llx.\n", pa); + return -EPERM; + } + + /* Obtain the L1 table address from L0 descriptor. */ + gpt_l1_addr = (uint64_t *)(gpt_l1_desc & ~(0xFFF)); + + /* Obtain the index into the L1 table */ + idx = get_l1_gpt_index(gpt_config.plat_gpt_pgs, pa); + + VERBOSE("L1 table base addr 0x%llx, L1 table index %u\n", (uint64_t)gpt_l1_addr, idx); + + /* Lock access to the granule */ + spin_lock(&gpt_lock); + + /* Obtain the L1 descriptor */ + gpt_l1_desc = gpt_l1_addr[idx]; + + /* Obtain the shift for GPI in L1 GPT entry */ + idx_shift = (pa >> 12) & GPT_L1_INDEX_MASK; + idx_shift <<= 2; + + /* Obtain the current GPI encoding for this PA */ + gpi = (gpt_l1_desc >> idx_shift) & GPT_L1_INDEX_MASK; + + if (src_sec_state == SMC_FROM_REALM) { + /* + * Realm world is only allowed to transition a NS or Realm world + * granule. + */ + if ((gpi != GPI_REALM) && (gpi != GPI_NS)) { + WARN("Invalid transition request from %s.\n", + "realm world"); + spin_unlock(&gpt_lock); + return -EPERM; + } + } else if (src_sec_state == SMC_FROM_SECURE) { + /* + * Secure world is only allowed to transition a NS or Secure world + * granule. + */ + if ((gpi != GPI_SECURE) && (gpi != GPI_NS)) { + WARN("Invalid transition request from %s.\n", + "secure world"); + spin_unlock(&gpt_lock); + return -EPERM; + } + } + /* We don't need an else here since we already handle that above. */ + + VERBOSE("L1 table desc 0x%llx before mod \n", gpt_l1_desc); + + /* Clear existing GPI encoding */ + gpt_l1_desc &= ~(GPT_L1_INDEX_MASK << idx_shift); + + /* Transition the granule to the new PAS */ + gpt_l1_desc |= ((uint64_t)target_pas << idx_shift); + + /* Update the L1 GPT entry */ + gpt_l1_addr[idx] = gpt_l1_desc; + + VERBOSE("L1 table desc 0x%llx after mod \n", gpt_l1_desc); + + /* Make sure change is propagated to other CPUs. */ +#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) + gpt_clean_dcache_range((uintptr_t)&gpt_addr[idx], sizeof(uint64_t)); +#endif + + gpt_tlbi_by_pa(pa, PAGE_SIZE_4KB); + + /* Make sure that all the entries are written to the memory. */ + dsbishst(); + + /* Unlock access to the granule */ + spin_unlock(&gpt_lock); + + return 0; +} + +#endif /* IMAGE_BL31 */ From 9d870b79c16ef09b0c4a9db18e071c2fa235d1ad Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Sun, 11 Jul 2021 18:39:39 -0500 Subject: [PATCH 11/21] feat(plat/fvp): add RMM image support for FVP platform This patch adds the necessary changes needed to build and load RMM image for the FVP platform. RMM image is loaded by BL2 after BL32 (if BL32 exists) and before BL33. Signed-off-by: Zelalem Aweke Change-Id: I1ac9eade84c2e35c7479a322ca1d090b4e626819 --- plat/arm/board/fvp/fvp_common.c | 9 +++++ plat/arm/board/fvp/include/platform_def.h | 2 + plat/arm/board/fvp/platform.mk | 4 ++ .../common/aarch64/arm_bl2_mem_params_desc.c | 26 ++++++++++++- plat/arm/common/arm_bl31_setup.c | 37 +++++++++++++++---- plat/arm/common/fconf/arm_fconf_io.c | 6 +++ 6 files changed, 75 insertions(+), 9 deletions(-) diff --git a/plat/arm/board/fvp/fvp_common.c b/plat/arm/board/fvp/fvp_common.c index 9d3c03133..1f9e4396b 100644 --- a/plat/arm/board/fvp/fvp_common.c +++ b/plat/arm/board/fvp/fvp_common.c @@ -191,6 +191,15 @@ const mmap_region_t plat_arm_mmap[] = { }; #endif +#ifdef IMAGE_RMM +const mmap_region_t plat_arm_mmap[] = { + V2M_MAP_IOFPGA, + MAP_DEVICE0, + MAP_DEVICE1, + {0} +}; +#endif + ARM_CASSERT_MMAP #if FVP_INTERCONNECT_DRIVER != FVP_CCN diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h index a716546a4..6b084e491 100644 --- a/plat/arm/board/fvp/include/platform_def.h +++ b/plat/arm/board/fvp/include/platform_def.h @@ -185,6 +185,8 @@ # define PLATFORM_STACK_SIZE UL(0x800) #elif defined(IMAGE_BL32) # define PLATFORM_STACK_SIZE UL(0x440) +#elif defined(IMAGE_RMM) +# define PLATFORM_STACK_SIZE UL(0x440) #endif #define MAX_IO_DEVICES 3 diff --git a/plat/arm/board/fvp/platform.mk b/plat/arm/board/fvp/platform.mk index 73f09e50c..b37514626 100644 --- a/plat/arm/board/fvp/platform.mk +++ b/plat/arm/board/fvp/platform.mk @@ -186,6 +186,10 @@ ifeq (${COT_DESC_IN_DTB},1) BL2_SOURCES += plat/arm/common/fconf/fconf_nv_cntr_getter.c endif +ifeq (${ENABLE_RME},1) +BL2_SOURCES += plat/arm/board/fvp/aarch64/fvp_helpers.S +endif + ifeq (${BL2_AT_EL3},1) BL2_SOURCES += plat/arm/board/fvp/${ARCH}/fvp_helpers.S \ plat/arm/board/fvp/fvp_bl2_el3_setup.c \ diff --git a/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c b/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c index 6a8943d5d..0666e57fa 100644 --- a/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c +++ b/plat/arm/common/aarch64/arm_bl2_mem_params_desc.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2016-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -75,8 +75,10 @@ static bl_mem_params_node_t bl2_mem_params_descs[] = { .image_info.image_base = BL31_BASE, .image_info.image_max_size = BL31_LIMIT - BL31_BASE, -# ifdef BL32_BASE +# if defined(BL32_BASE) .next_handoff_image_id = BL32_IMAGE_ID, +# elif ENABLE_RME + .next_handoff_image_id = RMM_IMAGE_ID, # else .next_handoff_image_id = BL33_IMAGE_ID, # endif @@ -99,6 +101,22 @@ static bl_mem_params_node_t bl2_mem_params_descs[] = { VERSION_2, image_info_t, IMAGE_ATTRIB_SKIP_LOADING), .next_handoff_image_id = INVALID_IMAGE_ID, }, + +# if ENABLE_RME + /* Fill RMM related information */ + { + .image_id = RMM_IMAGE_ID, + SET_STATIC_PARAM_HEAD(ep_info, PARAM_EP, + VERSION_2, entry_point_info_t, EP_REALM | EXECUTABLE), + .ep_info.pc = RMM_BASE, + SET_STATIC_PARAM_HEAD(image_info, PARAM_EP, + VERSION_2, image_info_t, 0), + .image_info.image_base = RMM_BASE, + .image_info.image_max_size = RMM_LIMIT - RMM_BASE, + .next_handoff_image_id = BL33_IMAGE_ID, + }, +# endif + # ifdef BL32_BASE /* Fill BL32 related information */ { @@ -113,7 +131,11 @@ static bl_mem_params_node_t bl2_mem_params_descs[] = { .image_info.image_base = BL32_BASE, .image_info.image_max_size = BL32_LIMIT - BL32_BASE, +# if ENABLE_RME + .next_handoff_image_id = RMM_IMAGE_ID, +# else .next_handoff_image_id = BL33_IMAGE_ID, +# endif }, /* diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c index b819888d3..85a795360 100644 --- a/plat/arm/common/arm_bl31_setup.c +++ b/plat/arm/common/arm_bl31_setup.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2020, ARM Limited and Contributors. All rights reserved. + * Copyright (c) 2015-2021, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ @@ -25,6 +25,9 @@ */ static entry_point_info_t bl32_image_ep_info; static entry_point_info_t bl33_image_ep_info; +#if ENABLE_RME +static entry_point_info_t rmm_image_ep_info; +#endif #if !RESET_TO_BL31 /* @@ -80,8 +83,18 @@ struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type) entry_point_info_t *next_image_info; assert(sec_state_is_valid(type)); - next_image_info = (type == NON_SECURE) - ? &bl33_image_ep_info : &bl32_image_ep_info; + if (type == NON_SECURE) { + next_image_info = &bl33_image_ep_info; + } +#if ENABLE_RME + else if (type == REALM) { + next_image_info = &rmm_image_ep_info; + } +#endif + else { + next_image_info = &bl32_image_ep_info; + } + /* * None of the images on the ARM development platforms can have 0x0 * as the entrypoint @@ -169,21 +182,31 @@ void __init arm_bl31_early_platform_setup(void *from_bl2, uintptr_t soc_fw_confi bl_params_node_t *bl_params = params_from_bl2->head; /* - * Copy BL33 and BL32 (if present), entry point information. + * Copy BL33, BL32 and RMM (if present), entry point information. * They are stored in Secure RAM, in BL2's address space. */ while (bl_params != NULL) { - if (bl_params->image_id == BL32_IMAGE_ID) + if (bl_params->image_id == BL32_IMAGE_ID) { bl32_image_ep_info = *bl_params->ep_info; - - if (bl_params->image_id == BL33_IMAGE_ID) + } +#if ENABLE_RME + else if (bl_params->image_id == RMM_IMAGE_ID) { + rmm_image_ep_info = *bl_params->ep_info; + } +#endif + else if (bl_params->image_id == BL33_IMAGE_ID) { bl33_image_ep_info = *bl_params->ep_info; + } bl_params = bl_params->next_params_info; } if (bl33_image_ep_info.pc == 0U) panic(); +#if ENABLE_RME + if (rmm_image_ep_info.pc == 0U) + panic(); +#endif #endif /* RESET_TO_BL31 */ # if ARM_LINUX_KERNEL_AS_BL33 diff --git a/plat/arm/common/fconf/arm_fconf_io.c b/plat/arm/common/fconf/arm_fconf_io.c index 86fd6d565..aea2f38d4 100644 --- a/plat/arm/common/fconf/arm_fconf_io.c +++ b/plat/arm/common/fconf/arm_fconf_io.c @@ -67,6 +67,7 @@ const io_uuid_spec_t arm_uuid_spec[MAX_NUMBER_IDS] = { [SOC_FW_CONFIG_ID] = {UUID_SOC_FW_CONFIG}, [TOS_FW_CONFIG_ID] = {UUID_TOS_FW_CONFIG}, [NT_FW_CONFIG_ID] = {UUID_NT_FW_CONFIG}, + [RMM_IMAGE_ID] = {UUID_REALM_MONITOR_MGMT_FIRMWARE}, #endif /* ARM_IO_IN_DTB */ #if TRUSTED_BOARD_BOOT [TRUSTED_BOOT_FW_CERT_ID] = {UUID_TRUSTED_BOOT_FW_CERT}, @@ -162,6 +163,11 @@ struct plat_io_policy policies[MAX_NUMBER_IDS] = { (uintptr_t)&arm_uuid_spec[BL33_IMAGE_ID], open_fip }, + [RMM_IMAGE_ID] = { + &fip_dev_handle, + (uintptr_t)&arm_uuid_spec[RMM_IMAGE_ID], + open_fip + }, [HW_CONFIG_ID] = { &fip_dev_handle, (uintptr_t)&arm_uuid_spec[HW_CONFIG_ID], From 4bb72c47dd78fb4119c0e41e283f295cc471d33b Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Mon, 12 Jul 2021 22:33:55 -0500 Subject: [PATCH 12/21] refactor(plat/arm): modify memory region attributes to account for FEAT_RME If FEAT_RME is enabled, EL3 runs in the Root world as opposed to Secure world. This patch changes EL3 memory region attributes for Arm platforms accordingly. Signed-off-by: Zelalem Aweke Change-Id: Ie176f8b440ff34330e4e44bd3bf8d9703b3892ff --- include/plat/arm/common/arm_def.h | 20 ++++++++++---------- plat/arm/common/arm_bl1_setup.c | 8 ++++---- plat/arm/common/arm_bl2_setup.c | 8 +++++++- plat/arm/common/arm_bl31_setup.c | 6 +++--- 4 files changed, 24 insertions(+), 18 deletions(-) diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h index 7cc215f22..173591f1b 100644 --- a/include/plat/arm/common/arm_def.h +++ b/include/plat/arm/common/arm_def.h @@ -209,7 +209,7 @@ #define ARM_MAP_SHARED_RAM MAP_REGION_FLAT( \ ARM_SHARED_RAM_BASE, \ ARM_SHARED_RAM_SIZE, \ - MT_DEVICE | MT_RW | MT_SECURE) + MT_DEVICE | MT_RW | EL3_PAS) #define ARM_MAP_NS_DRAM1 MAP_REGION_FLAT( \ ARM_NS_DRAM1_BASE, \ @@ -236,7 +236,7 @@ #define ARM_MAP_EL3_TZC_DRAM MAP_REGION_FLAT( \ ARM_EL3_TZC_DRAM1_BASE, \ ARM_EL3_TZC_DRAM1_SIZE, \ - MT_MEMORY | MT_RW | MT_SECURE) + MT_MEMORY | MT_RW | EL3_PAS) #if defined(SPD_spmd) #define ARM_MAP_TRUSTED_DRAM MAP_REGION_FLAT( \ @@ -255,7 +255,7 @@ #define ARM_MAP_BL1_RW MAP_REGION_FLAT( \ BL1_RW_BASE, \ BL1_RW_LIMIT - BL1_RW_BASE, \ - MT_MEMORY | MT_RW | MT_SECURE) + MT_MEMORY | MT_RW | EL3_PAS) /* * If SEPARATE_CODE_AND_RODATA=1 we define a region for each section @@ -265,35 +265,35 @@ #define ARM_MAP_BL_RO MAP_REGION_FLAT( \ BL_CODE_BASE, \ BL_CODE_END - BL_CODE_BASE, \ - MT_CODE | MT_SECURE), \ + MT_CODE | EL3_PAS), \ MAP_REGION_FLAT( \ BL_RO_DATA_BASE, \ BL_RO_DATA_END \ - BL_RO_DATA_BASE, \ - MT_RO_DATA | MT_SECURE) + MT_RO_DATA | EL3_PAS) #else #define ARM_MAP_BL_RO MAP_REGION_FLAT( \ BL_CODE_BASE, \ BL_CODE_END - BL_CODE_BASE, \ - MT_CODE | MT_SECURE) + MT_CODE | EL3_PAS) #endif #if USE_COHERENT_MEM #define ARM_MAP_BL_COHERENT_RAM MAP_REGION_FLAT( \ BL_COHERENT_RAM_BASE, \ BL_COHERENT_RAM_END \ - BL_COHERENT_RAM_BASE, \ - MT_DEVICE | MT_RW | MT_SECURE) + MT_DEVICE | MT_RW | EL3_PAS) #endif #if USE_ROMLIB #define ARM_MAP_ROMLIB_CODE MAP_REGION_FLAT( \ ROMLIB_RO_BASE, \ ROMLIB_RO_LIMIT - ROMLIB_RO_BASE,\ - MT_CODE | MT_SECURE) + MT_CODE | EL3_PAS) #define ARM_MAP_ROMLIB_DATA MAP_REGION_FLAT( \ ROMLIB_RW_BASE, \ ROMLIB_RW_END - ROMLIB_RW_BASE,\ - MT_MEMORY | MT_RW | MT_SECURE) + MT_MEMORY | MT_RW | EL3_PAS) #endif /* @@ -308,7 +308,7 @@ #define ARM_MAP_BL_CONFIG_REGION MAP_REGION_FLAT(ARM_BL_RAM_BASE, \ (ARM_FW_CONFIGS_LIMIT \ - ARM_BL_RAM_BASE), \ - MT_MEMORY | MT_RW | MT_SECURE) + MT_MEMORY | MT_RW | EL3_PAS) /* * The max number of regions like RO(code), coherent and data required by diff --git a/plat/arm/common/arm_bl1_setup.c b/plat/arm/common/arm_bl1_setup.c index 872de3e3d..320bb8274 100644 --- a/plat/arm/common/arm_bl1_setup.c +++ b/plat/arm/common/arm_bl1_setup.c @@ -32,7 +32,7 @@ #define MAP_BL1_TOTAL MAP_REGION_FLAT( \ bl1_tzram_layout.total_base, \ bl1_tzram_layout.total_size, \ - MT_MEMORY | MT_RW | MT_SECURE) + MT_MEMORY | MT_RW | EL3_PAS) /* * If SEPARATE_CODE_AND_RODATA=1 we define a region for each section * otherwise one region is defined containing both @@ -41,17 +41,17 @@ #define MAP_BL1_RO MAP_REGION_FLAT( \ BL_CODE_BASE, \ BL1_CODE_END - BL_CODE_BASE, \ - MT_CODE | MT_SECURE), \ + MT_CODE | EL3_PAS), \ MAP_REGION_FLAT( \ BL1_RO_DATA_BASE, \ BL1_RO_DATA_END \ - BL_RO_DATA_BASE, \ - MT_RO_DATA | MT_SECURE) + MT_RO_DATA | EL3_PAS) #else #define MAP_BL1_RO MAP_REGION_FLAT( \ BL_CODE_BASE, \ BL1_CODE_END - BL_CODE_BASE, \ - MT_CODE | MT_SECURE) + MT_CODE | EL3_PAS) #endif /* Data structure which holds the extents of the trusted SRAM for BL1*/ diff --git a/plat/arm/common/arm_bl2_setup.c b/plat/arm/common/arm_bl2_setup.c index 5b26a1d3b..ae62016d0 100644 --- a/plat/arm/common/arm_bl2_setup.c +++ b/plat/arm/common/arm_bl2_setup.c @@ -45,11 +45,17 @@ CASSERT(BL2_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl2_base_overflows); #pragma weak bl2_plat_get_hash #endif +#if ENABLE_RME +#define MAP_BL2_TOTAL MAP_REGION_FLAT( \ + bl2_tzram_layout.total_base, \ + bl2_tzram_layout.total_size, \ + MT_MEMORY | MT_RW | MT_ROOT) +#else #define MAP_BL2_TOTAL MAP_REGION_FLAT( \ bl2_tzram_layout.total_base, \ bl2_tzram_layout.total_size, \ MT_MEMORY | MT_RW | MT_SECURE) - +#endif /* ENABLE_RME */ #pragma weak arm_bl2_plat_handle_post_image_load diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c index 85a795360..3286710de 100644 --- a/plat/arm/common/arm_bl31_setup.c +++ b/plat/arm/common/arm_bl31_setup.c @@ -46,7 +46,7 @@ CASSERT(BL31_BASE >= ARM_FW_CONFIG_LIMIT, assert_bl31_base_overflows); #define MAP_BL31_TOTAL MAP_REGION_FLAT( \ BL31_START, \ BL31_END - BL31_START, \ - MT_MEMORY | MT_RW | MT_SECURE) + MT_MEMORY | MT_RW | EL3_PAS) #if RECLAIM_INIT_CODE IMPORT_SYM(unsigned long, __INIT_CODE_START__, BL_INIT_CODE_BASE); IMPORT_SYM(unsigned long, __INIT_CODE_END__, BL_CODE_END_UNALIGNED); @@ -61,7 +61,7 @@ IMPORT_SYM(unsigned long, __STACKS_END__, BL_STACKS_END_UNALIGNED); BL_INIT_CODE_BASE, \ BL_INIT_CODE_END \ - BL_INIT_CODE_BASE, \ - MT_CODE | MT_SECURE) + MT_CODE | EL3_PAS) #endif #if SEPARATE_NOBITS_REGION @@ -69,7 +69,7 @@ IMPORT_SYM(unsigned long, __STACKS_END__, BL_STACKS_END_UNALIGNED); BL31_NOBITS_BASE, \ BL31_NOBITS_LIMIT \ - BL31_NOBITS_BASE, \ - MT_MEMORY | MT_RW | MT_SECURE) + MT_MEMORY | MT_RW | EL3_PAS) #endif /******************************************************************************* From c8720729726faffc39ec64f3a02440a48c8c305a Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Mon, 12 Jul 2021 23:41:05 -0500 Subject: [PATCH 13/21] feat(plat/fvp): add memory map for FVP platform for FEAT_RME When FEAT_RME is enabled, memory is divided into four Physical Address Spaces (PAS): Root, Realm, Secure and Non-secure. This patch introduces new carveouts for the Trusted SRAM and DRAM for the FVP platform accordingly. The following new regions are introduced with this change: ARM_MAP_L0_GPT_REGION: Trusted SRAM region used to store Level 0 Granule Protection Table (GPT). This region resides in the Root PAS. ARM_MAP_GPT_L1_DRAM: DRAM region used to store Level 1 GPT. It resides in the Root PAS. ARM_MAP_RMM_DRAM: DRAM region used to store RMM image. It resides in the Realm PAS. The L0 GPT is stored on Trusted SRAM next to firmware configuration memory. The DRAM carveout when RME is enable is modified as follow: -------------------- | | | AP TZC (~28MB) | -------------------- | | | REALM (32MB) | -------------------- | | | EL3 TZC (3MB) | -------------------- | L1 GPT + SCP TZC | | (~1MB) | 0xFFFF_FFFF -------------------- During initialization of the TrustZone controller, Root regions are configured as Secure regions. Then they are later reconfigured to Root upon GPT initialization. Signed-off-by: Zelalem Aweke Change-Id: If2e257141d51f51f715b70d4a06f18af53607254 --- include/plat/arm/common/arm_def.h | 125 ++++++++++++++++++---- include/plat/arm/common/arm_pas_def.h | 90 ++++++++++++++++ include/plat/arm/common/plat_arm.h | 15 ++- plat/arm/board/fvp/fvp_common.c | 7 ++ plat/arm/board/fvp/include/platform_def.h | 30 ++++-- plat/arm/common/arm_bl2_setup.c | 3 + plat/arm/common/arm_bl31_setup.c | 3 + 7 files changed, 246 insertions(+), 27 deletions(-) create mode 100644 include/plat/arm/common/arm_pas_def.h diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h index 173591f1b..a8b5d26df 100644 --- a/include/plat/arm/common/arm_def.h +++ b/include/plat/arm/common/arm_def.h @@ -74,38 +74,84 @@ ARM_SHARED_RAM_SIZE) /* - * The top 16MB of DRAM1 is configured as secure access only using the TZC + * The top 16MB (or 64MB if RME is enabled) of DRAM1 is configured as + * follows: * - SCP TZC DRAM: If present, DRAM reserved for SCP use + * - L1 GPT DRAM: Reserved for L1 GPT if RME is enabled + * - REALM DRAM: Reserved for Realm world if RME is enabled * - AP TZC DRAM: The remaining TZC secured DRAM reserved for AP use + * + * RME enabled(64MB) RME not enabled(16MB) + * -------------------- ------------------- + * | | | | + * | AP TZC (~28MB) | | AP TZC (~14MB) | + * -------------------- ------------------- + * | | | | + * | REALM (32MB) | | EL3 TZC (2MB) | + * -------------------- ------------------- + * | | | | + * | EL3 TZC (3MB) | | SCP TZC | + * -------------------- 0xFFFF_FFFF------------------- + * | L1 GPT + SCP TZC | + * | (~1MB) | + * 0xFFFF_FFFF -------------------- */ -#define ARM_TZC_DRAM1_SIZE UL(0x01000000) - -#define ARM_SCP_TZC_DRAM1_BASE (ARM_DRAM1_BASE + \ - ARM_DRAM1_SIZE - \ - ARM_SCP_TZC_DRAM1_SIZE) -#define ARM_SCP_TZC_DRAM1_SIZE PLAT_ARM_SCP_TZC_DRAM1_SIZE -#define ARM_SCP_TZC_DRAM1_END (ARM_SCP_TZC_DRAM1_BASE + \ - ARM_SCP_TZC_DRAM1_SIZE - 1U) - +#if ENABLE_RME +#define ARM_TZC_DRAM1_SIZE UL(0x04000000) /* 64MB */ /* - * Define a 2MB region within the TZC secured DRAM for use by EL3 runtime + * Define a region within the TZC secured DRAM for use by EL3 runtime * firmware. This region is meant to be NOLOAD and will not be zero * initialized. Data sections with the attribute `arm_el3_tzc_dram` will be - * placed here. + * placed here. 3MB region is reserved if RME is enabled, 2MB otherwise. */ -#define ARM_EL3_TZC_DRAM1_BASE (ARM_SCP_TZC_DRAM1_BASE - ARM_EL3_TZC_DRAM1_SIZE) -#define ARM_EL3_TZC_DRAM1_SIZE UL(0x00200000) /* 2 MB */ +#define ARM_EL3_TZC_DRAM1_SIZE UL(0x00300000) /* 3MB */ +#define ARM_L1_GPT_SIZE UL(0x00100000) /* 1MB */ +#define ARM_REALM_SIZE UL(0x02000000) /* 32MB */ +#else +#define ARM_TZC_DRAM1_SIZE UL(0x01000000) /* 16MB */ +#define ARM_EL3_TZC_DRAM1_SIZE UL(0x00200000) /* 2MB */ +#define ARM_L1_GPT_SIZE UL(0) +#define ARM_REALM_SIZE UL(0) +#endif /* ENABLE_RME */ + +#define ARM_SCP_TZC_DRAM1_BASE (ARM_DRAM1_BASE + \ + ARM_DRAM1_SIZE - \ + (ARM_SCP_TZC_DRAM1_SIZE + \ + ARM_L1_GPT_SIZE)) +#define ARM_SCP_TZC_DRAM1_SIZE PLAT_ARM_SCP_TZC_DRAM1_SIZE +#define ARM_SCP_TZC_DRAM1_END (ARM_SCP_TZC_DRAM1_BASE + \ + ARM_SCP_TZC_DRAM1_SIZE - 1U) +#if ENABLE_RME +#define ARM_L1_GPT_ADDR_BASE (ARM_DRAM1_BASE + \ + ARM_DRAM1_SIZE - \ + ARM_L1_GPT_SIZE) +#define ARM_L1_GPT_END (ARM_L1_GPT_ADDR_BASE + \ + ARM_L1_GPT_SIZE - 1U) + +#define ARM_REALM_BASE (ARM_DRAM1_BASE + \ + ARM_DRAM1_SIZE - \ + (ARM_SCP_TZC_DRAM1_SIZE + \ + ARM_EL3_TZC_DRAM1_SIZE + \ + ARM_REALM_SIZE + \ + ARM_L1_GPT_SIZE)) +#define ARM_REALM_END (ARM_REALM_BASE + ARM_REALM_SIZE - 1U) +#endif /* ENABLE_RME */ + +#define ARM_EL3_TZC_DRAM1_BASE (ARM_SCP_TZC_DRAM1_BASE - \ + ARM_EL3_TZC_DRAM1_SIZE) #define ARM_EL3_TZC_DRAM1_END (ARM_EL3_TZC_DRAM1_BASE + \ ARM_EL3_TZC_DRAM1_SIZE - 1U) #define ARM_AP_TZC_DRAM1_BASE (ARM_DRAM1_BASE + \ - ARM_DRAM1_SIZE - \ - ARM_TZC_DRAM1_SIZE) + ARM_DRAM1_SIZE - \ + ARM_TZC_DRAM1_SIZE) #define ARM_AP_TZC_DRAM1_SIZE (ARM_TZC_DRAM1_SIZE - \ - (ARM_SCP_TZC_DRAM1_SIZE + \ - ARM_EL3_TZC_DRAM1_SIZE)) + (ARM_SCP_TZC_DRAM1_SIZE + \ + ARM_EL3_TZC_DRAM1_SIZE + \ + ARM_REALM_SIZE + \ + ARM_L1_GPT_SIZE)) #define ARM_AP_TZC_DRAM1_END (ARM_AP_TZC_DRAM1_BASE + \ - ARM_AP_TZC_DRAM1_SIZE - 1U) + ARM_AP_TZC_DRAM1_SIZE - 1U) /* Define the Access permissions for Secure peripherals to NS_DRAM */ #if ARM_CRYPTOCELL_INTEG @@ -245,6 +291,19 @@ MT_MEMORY | MT_RW | MT_SECURE) #endif +#if ENABLE_RME +#define ARM_MAP_RMM_DRAM MAP_REGION_FLAT( \ + PLAT_ARM_RMM_BASE, \ + PLAT_ARM_RMM_SIZE, \ + MT_MEMORY | MT_RW | MT_REALM) + + +#define ARM_MAP_GPT_L1_DRAM MAP_REGION_FLAT( \ + ARM_L1_GPT_ADDR_BASE, \ + ARM_L1_GPT_SIZE, \ + MT_MEMORY | MT_RW | EL3_PAS) + +#endif /* ENABLE_RME */ /* * Mapping for the BL1 RW region. This mapping is needed by BL2 in order to @@ -309,6 +368,14 @@ (ARM_FW_CONFIGS_LIMIT \ - ARM_BL_RAM_BASE), \ MT_MEMORY | MT_RW | EL3_PAS) +/* + * Map L0_GPT with read and write permissions + */ +#if ENABLE_RME +#define ARM_MAP_L0_GPT_REGION MAP_REGION_FLAT(ARM_L0_GPT_ADDR_BASE, \ + ARM_L0_GPT_SIZE, \ + MT_MEMORY | MT_RW | MT_ROOT) +#endif /* * The max number of regions like RO(code), coherent and data required by @@ -409,6 +476,18 @@ */ #define ARM_FW_CONFIGS_LIMIT (ARM_BL_RAM_BASE + (PAGE_SIZE * 2)) +#if ENABLE_RME +/* + * Store the L0 GPT on Trusted SRAM next to firmware + * configuration memory, 4KB aligned. + */ +#define ARM_L0_GPT_SIZE (PAGE_SIZE) +#define ARM_L0_GPT_ADDR_BASE (ARM_FW_CONFIGS_LIMIT) +#define ARM_L0_GPT_LIMIT (ARM_L0_GPT_ADDR_BASE + ARM_L0_GPT_SIZE) +#else +#define ARM_L0_GPT_SIZE U(0) +#endif + /******************************************************************************* * BL1 specific defines. * BL1 RW data is relocated from ROM to RAM at runtime so we need 2 sets of @@ -501,6 +580,14 @@ #endif #endif +/****************************************************************************** + * RMM specific defines + *****************************************************************************/ +#if ENABLE_RME +#define RMM_BASE (ARM_REALM_BASE) +#define RMM_LIMIT (RMM_BASE + ARM_REALM_SIZE) +#endif + #if !defined(__aarch64__) || JUNO_AARCH32_EL3_RUNTIME /******************************************************************************* * BL32 specific defines for EL3 runtime in AArch32 mode diff --git a/include/plat/arm/common/arm_pas_def.h b/include/plat/arm/common/arm_pas_def.h new file mode 100644 index 000000000..a8ebee3f1 --- /dev/null +++ b/include/plat/arm/common/arm_pas_def.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2021, ARM Limited and Contributors. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ +#ifndef ARM_PAS_DEF_H +#define ARM_PAS_DEF_H + +#include + +/***************************************************************************** + * PAS regions used to initialize the Granule Protection Table (GPT) + ****************************************************************************/ + +/* + * The PA space is initially mapped in the GPT as follows: + * + * ============================================================================ + * Base Addr| Size |L? GPT|PAS |Content |Comment + * ============================================================================ + * 0GB | 1GB |L0 GPT|ANY |TBROM (EL3 code) |Fixed mapping + * | | | |TSRAM (EL3 data) | + * | | | |IO (incl.UARTs & GIC) | + * ---------------------------------------------------------------------------- + * 1GB | 1GB |L0 GPT|ANY |IO |Fixed mapping + * ---------------------------------------------------------------------------- + * 2GB | 1GB |L1 GPT|NS |DRAM (NS Kernel) |Use T.Descrip + * ---------------------------------------------------------------------------- + * 3GB |1GB-64MB |L1 GPT|NS |DRAM (NS Kernel) |Use T.Descrip + * ---------------------------------------------------------------------------- + * 4GB-64MB |64MB-32MB | | | | + * | -4MB |L1 GPT|SECURE|DRAM TZC |Use T.Descrip + * ---------------------------------------------------------------------------- + * 4GB-32MB | | | | | + * -3MB-1MB |32MB |L1 GPT|REALM |RMM |Use T.Descrip + * ---------------------------------------------------------------------------- + * 4GB-3MB | | | | | + * -1MB |3MB |L1 GPT|ROOT |EL3 DRAM data |Use T.Descrip + * ---------------------------------------------------------------------------- + * 4GB-1MB |1MB |L1 GPT|ROOT |DRAM (L1 GPTs, SCP TZC) |Fixed mapping + * ============================================================================ + * + * - 4KB of L0 GPT reside in TSRAM, on top of the CONFIG section. + * - ~1MB of L1 GPTs reside at the top of DRAM1 (TZC area). + * - The first 1GB region has GPI_ANY and, therefore, is not protected by + * the GPT. + * - The DRAM TZC area is split into three regions: the L1 GPT region and + * 3MB of region below that are defined as GPI_ROOT, 32MB Realm region + * below that is defined as GPI_REALM and the rest of it is defined as + * GPI_SECURE. + */ + +/* TODO: This might not be the best way to map the PAS */ + +/* Device memory 0 to 2GB */ +#define ARM_PAS_1_BASE (U(0)) +#define ARM_PAS_1_SIZE ((ULL(1)<<31)) /* 2GB */ + +/* NS memory 2GB to (end - 64MB) */ +#define ARM_PAS_2_BASE (ARM_PAS_1_BASE + ARM_PAS_1_SIZE) +#define ARM_PAS_2_SIZE (ARM_NS_DRAM1_SIZE) + +/* Secure TZC region */ +#define ARM_PAS_3_BASE (ARM_AP_TZC_DRAM1_BASE) +#define ARM_PAS_3_SIZE (ARM_AP_TZC_DRAM1_SIZE) + +#define ARM_PAS_GPI_ANY MAP_GPT_REGION(ARM_PAS_1_BASE, \ + ARM_PAS_1_SIZE, \ + GPI_ANY) +#define ARM_PAS_KERNEL MAP_GPT_REGION_TBL(ARM_PAS_2_BASE, \ + ARM_PAS_2_SIZE, \ + GPI_NS) + +#define ARM_PAS_TZC MAP_GPT_REGION_TBL(ARM_PAS_3_BASE, \ + ARM_PAS_3_SIZE, \ + GPI_SECURE) + +#define ARM_PAS_REALM MAP_GPT_REGION_TBL(ARM_REALM_BASE, \ + ARM_REALM_SIZE, \ + GPI_REALM) + +#define ARM_PAS_EL3_DRAM MAP_GPT_REGION_TBL(ARM_EL3_TZC_DRAM1_BASE, \ + ARM_EL3_TZC_DRAM1_SIZE, \ + GPI_ROOT) + +#define ARM_PAS_GPTS MAP_GPT_REGION_TBL(ARM_L1_GPT_ADDR_BASE, \ + ARM_L1_GPT_SIZE, \ + GPI_ROOT) + +#endif /* ARM_PAS_DEF_H */ diff --git a/include/plat/arm/common/plat_arm.h b/include/plat/arm/common/plat_arm.h index 0a19d8b36..1500ed379 100644 --- a/include/plat/arm/common/plat_arm.h +++ b/include/plat/arm/common/plat_arm.h @@ -41,7 +41,7 @@ typedef struct arm_tzc_regions_info { ******************************************************************************/ #if SPM_MM #define ARM_TZC_REGIONS_DEF \ - {ARM_AP_TZC_DRAM1_BASE, ARM_EL3_TZC_DRAM1_END, \ + {ARM_AP_TZC_DRAM1_BASE, ARM_EL3_TZC_DRAM1_END + ARM_L1_GPT_SIZE,\ TZC_REGION_S_RDWR, 0}, \ {ARM_NS_DRAM1_BASE, ARM_NS_DRAM1_END, ARM_TZC_NS_DRAM_S_ACCESS, \ PLAT_ARM_TZC_NS_DEV_ACCESS}, \ @@ -51,9 +51,20 @@ typedef struct arm_tzc_regions_info { PLAT_SP_IMAGE_NS_BUF_SIZE) - 1, TZC_REGION_S_NONE, \ PLAT_ARM_TZC_NS_DEV_ACCESS} +#elif ENABLE_RME +#define ARM_TZC_REGIONS_DEF \ + {ARM_AP_TZC_DRAM1_BASE, ARM_AP_TZC_DRAM1_END, TZC_REGION_S_RDWR, 0},\ + {ARM_EL3_TZC_DRAM1_BASE, ARM_L1_GPT_END, TZC_REGION_S_RDWR, 0}, \ + {ARM_NS_DRAM1_BASE, ARM_NS_DRAM1_END, ARM_TZC_NS_DRAM_S_ACCESS, \ + PLAT_ARM_TZC_NS_DEV_ACCESS}, \ + {ARM_REALM_BASE, ARM_REALM_END, ARM_TZC_NS_DRAM_S_ACCESS, \ + PLAT_ARM_TZC_NS_DEV_ACCESS}, \ + {ARM_DRAM2_BASE, ARM_DRAM2_END, ARM_TZC_NS_DRAM_S_ACCESS, \ + PLAT_ARM_TZC_NS_DEV_ACCESS} + #else #define ARM_TZC_REGIONS_DEF \ - {ARM_AP_TZC_DRAM1_BASE, ARM_EL3_TZC_DRAM1_END, \ + {ARM_AP_TZC_DRAM1_BASE, ARM_EL3_TZC_DRAM1_END + ARM_L1_GPT_SIZE,\ TZC_REGION_S_RDWR, 0}, \ {ARM_NS_DRAM1_BASE, ARM_NS_DRAM1_END, ARM_TZC_NS_DRAM_S_ACCESS, \ PLAT_ARM_TZC_NS_DEV_ACCESS}, \ diff --git a/plat/arm/board/fvp/fvp_common.c b/plat/arm/board/fvp/fvp_common.c index 1f9e4396b..e7a28ac35 100644 --- a/plat/arm/board/fvp/fvp_common.c +++ b/plat/arm/board/fvp/fvp_common.c @@ -107,6 +107,10 @@ const mmap_region_t plat_arm_mmap[] = { #if defined(SPD_spmd) ARM_MAP_TRUSTED_DRAM, #endif +#if ENABLE_RME + ARM_MAP_RMM_DRAM, + ARM_MAP_GPT_L1_DRAM, +#endif /* ENABLE_RME */ #ifdef SPD_tspd ARM_MAP_TSP_SEC_MEM, #endif @@ -159,6 +163,9 @@ const mmap_region_t plat_arm_mmap[] = { #endif /* Required by fconf APIs to read HW_CONFIG dtb loaded into DRAM */ ARM_DTB_DRAM_NS, +#if ENABLE_RME + ARM_MAP_GPT_L1_DRAM, +#endif {0} }; diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h index 6b084e491..6e72b5999 100644 --- a/plat/arm/board/fvp/include/platform_def.h +++ b/plat/arm/board/fvp/include/platform_def.h @@ -43,6 +43,11 @@ #define PLAT_ARM_TRUSTED_DRAM_BASE UL(0x06000000) #define PLAT_ARM_TRUSTED_DRAM_SIZE UL(0x02000000) /* 32 MB */ +#if ENABLE_RME +#define PLAT_ARM_RMM_BASE (RMM_BASE) +#define PLAT_ARM_RMM_SIZE (RMM_LIMIT - RMM_BASE) +#endif + /* * Max size of SPMC is 2MB for fvp. With SPMD enabled this value corresponds to * max size of BL32 image. @@ -80,15 +85,27 @@ #if defined(IMAGE_BL31) # if SPM_MM # define PLAT_ARM_MMAP_ENTRIES 10 -# define MAX_XLAT_TABLES 9 +# if ENABLE_RME +# define MAX_XLAT_TABLES 10 +# else +# define MAX_XLAT_TABLES 9 +# endif # define PLAT_SP_IMAGE_MMAP_REGIONS 30 # define PLAT_SP_IMAGE_MAX_XLAT_TABLES 10 # else # define PLAT_ARM_MMAP_ENTRIES 9 # if USE_DEBUGFS -# define MAX_XLAT_TABLES 8 +# if ENABLE_RME +# define MAX_XLAT_TABLES 9 +# else +# define MAX_XLAT_TABLES 8 +# endif # else -# define MAX_XLAT_TABLES 7 +# if ENABLE_RME +# define MAX_XLAT_TABLES 8 +# else +# define MAX_XLAT_TABLES 7 +# endif # endif # endif #elif defined(IMAGE_BL32) @@ -137,16 +154,17 @@ #endif #if RESET_TO_BL31 -/* Size of Trusted SRAM - the first 4KB of shared memory */ +/* Size of Trusted SRAM - the first 4KB of shared memory - GPT L0 Tables */ #define PLAT_ARM_MAX_BL31_SIZE (PLAT_ARM_TRUSTED_SRAM_SIZE - \ - ARM_SHARED_RAM_SIZE) + ARM_SHARED_RAM_SIZE - \ + ARM_L0_GPT_SIZE) #else /* * Since BL31 NOBITS overlays BL2 and BL1-RW, PLAT_ARM_MAX_BL31_SIZE is * calculated using the current BL31 PROGBITS debug size plus the sizes of * BL2 and BL1-RW */ -#define PLAT_ARM_MAX_BL31_SIZE UL(0x3D000) +#define PLAT_ARM_MAX_BL31_SIZE (UL(0x3D000) - ARM_L0_GPT_SIZE) #endif /* RESET_TO_BL31 */ #ifndef __aarch64__ diff --git a/plat/arm/common/arm_bl2_setup.c b/plat/arm/common/arm_bl2_setup.c index ae62016d0..758a061b8 100644 --- a/plat/arm/common/arm_bl2_setup.c +++ b/plat/arm/common/arm_bl2_setup.c @@ -149,6 +149,9 @@ void arm_bl2_plat_arch_setup(void) ARM_MAP_BL_COHERENT_RAM, #endif ARM_MAP_BL_CONFIG_REGION, +#if ENABLE_RME + ARM_MAP_L0_GPT_REGION, +#endif {0} }; diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c index 3286710de..d2bacd3fa 100644 --- a/plat/arm/common/arm_bl31_setup.c +++ b/plat/arm/common/arm_bl31_setup.c @@ -378,6 +378,9 @@ void __init arm_bl31_plat_arch_setup(void) { const mmap_region_t bl_regions[] = { MAP_BL31_TOTAL, +#if ENABLE_RME + ARM_MAP_L0_GPT_REGION, +#endif #if RECLAIM_INIT_CODE MAP_BL_INIT_CODE, #endif From deb4b3a63e3a52f2e9823865a1932f6289ccb7ac Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Tue, 13 Jul 2021 17:19:54 -0500 Subject: [PATCH 14/21] feat(plat/arm): add GPT initialization code for Arm platforms When RME is enabled, during configuration of the TrustZone controller, Root regions are initially configured as Secure regions, and Realm regions as Non-secure regions. Then later these regions are configured as Root and Realm regions respectively in the GPT. According to the RME architecture reference manual, Root firmware must ensure that Granule Protection Check is enabled before enabling any stage of translation. Therefore initializations are done as follows when RME is enabled : Initialize/enable the TrustZone controller (plat_arm_security_setup) --> Initialize/enable GPC (arm_bl2_plat_gpt_setup) --> enable MMU (enable_mmu_el3) Signed-off-by: Zelalem Aweke Change-Id: I91094e8259079437bee02de1f65edb9ad51e43cf --- include/plat/arm/common/arm_pas_def.h | 5 +++ plat/arm/common/arm_bl2_setup.c | 60 ++++++++++++++++++++++++++- plat/arm/common/arm_bl31_setup.c | 24 +++++++++++ 3 files changed, 87 insertions(+), 2 deletions(-) diff --git a/include/plat/arm/common/arm_pas_def.h b/include/plat/arm/common/arm_pas_def.h index a8ebee3f1..d268ce613 100644 --- a/include/plat/arm/common/arm_pas_def.h +++ b/include/plat/arm/common/arm_pas_def.h @@ -87,4 +87,9 @@ ARM_L1_GPT_SIZE, \ GPI_ROOT) +/* GPT Configuration options */ +#define PLATFORM_PGS GPCCR_PGS_4K +#define PLATFORM_PPS GPCCR_PPS_4GB +#define PLATFORM_L0GPTSZ GPCCR_L0GPTSZ_30BITS + #endif /* ARM_PAS_DEF_H */ diff --git a/plat/arm/common/arm_bl2_setup.c b/plat/arm/common/arm_bl2_setup.c index 758a061b8..ef372068a 100644 --- a/plat/arm/common/arm_bl2_setup.c +++ b/plat/arm/common/arm_bl2_setup.c @@ -9,6 +9,7 @@ #include +#include #include #include #include @@ -17,10 +18,12 @@ #include #include #include +#include #ifdef SPD_opteed #include #endif #include +#include #include #include @@ -111,8 +114,10 @@ void bl2_plat_preload_setup(void) */ void arm_bl2_platform_setup(void) { +#if !ENABLE_RME /* Initialize the secure environment */ plat_arm_security_setup(); +#endif #if defined(PLAT_ARM_MEM_PROT_ADDR) arm_nor_psci_do_static_mem_protect(); @@ -124,9 +129,47 @@ void bl2_platform_setup(void) arm_bl2_platform_setup(); } +#if ENABLE_RME +static void arm_bl2_plat_gpt_setup(void) +{ + /* + * The GPT library might modify the gpt regions structure to optimize + * the layout, so the array cannot be constant. + */ + pas_region_t pas_regions[] = { + ARM_PAS_GPI_ANY, + ARM_PAS_KERNEL, + ARM_PAS_TZC, + ARM_PAS_REALM, + ARM_PAS_EL3_DRAM, + ARM_PAS_GPTS + }; + + gpt_init_params_t gpt_params = { + PLATFORM_PGS, + PLATFORM_PPS, + PLATFORM_L0GPTSZ, + pas_regions, + (unsigned int)(sizeof(pas_regions)/sizeof(pas_region_t)), + ARM_L0_GPT_ADDR_BASE, ARM_L0_GPT_SIZE, + ARM_L1_GPT_ADDR_BASE, ARM_L1_GPT_SIZE + }; + + /* Initialise the global granule tables */ + INFO("Enabling Granule Protection Checks\n"); + if (gpt_init(&gpt_params) < 0) { + panic(); + } + + gpt_enable(); +} +#endif /* ENABLE_RME */ + /******************************************************************************* - * Perform the very early platform specific architectural setup here. At the - * moment this is only initializes the mmu in a quick and dirty way. + * Perform the very early platform specific architectural setup here. + * When RME is enabled the secure environment is initialised before + * initialising and enabling Granule Protection. + * This function initialises the MMU in a quick and dirty way. ******************************************************************************/ void arm_bl2_plat_arch_setup(void) { @@ -155,10 +198,23 @@ void arm_bl2_plat_arch_setup(void) {0} }; +#if ENABLE_RME + /* Initialise the secure environment */ + plat_arm_security_setup(); + + /* Initialise and enable Granule Protection */ + arm_bl2_plat_gpt_setup(); +#endif setup_page_tables(bl_regions, plat_arm_get_mmap()); #ifdef __aarch64__ +#if ENABLE_RME + /* BL2 runs in EL3 when RME enabled. */ + assert(get_armv9_2_feat_rme_support() != 0U); + enable_mmu_el3(0); +#else enable_mmu_el1(0); +#endif #else enable_mmu_svc_mon(0); #endif diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c index d2bacd3fa..d76031282 100644 --- a/plat/arm/common/arm_bl31_setup.c +++ b/plat/arm/common/arm_bl31_setup.c @@ -13,8 +13,10 @@ #include #include #include +#include #include #include +#include #include #include #include @@ -229,6 +231,28 @@ void __init arm_bl31_early_platform_setup(void *from_bl2, uintptr_t soc_fw_confi */ bl33_image_ep_info.args.arg0 = (u_register_t)ARM_DRAM1_BASE; #endif + +#if ENABLE_RME + /* + * Initialise Granule Protection library and enable GPC + * for the primary processor. The tables were initialised + * in BL2, so there is no need to provide any PAS here. + */ + gpt_init_params_t gpt_params = { + PLATFORM_PGS, + PLATFORM_PPS, + PLATFORM_L0GPTSZ, + NULL, + 0U, + ARM_L0_GPT_ADDR_BASE, ARM_L0_GPT_SIZE, + ARM_L1_GPT_ADDR_BASE, ARM_L1_GPT_SIZE + }; + + /* Initialise the global granule tables. */ + if (gpt_init(&gpt_params) < 0) { + panic(); + } +#endif /* ENABLE_RME */ } void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, From dbbc9a6790e8da63e5b2c19b7e01a2c303b156e3 Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Tue, 13 Jul 2021 18:59:19 -0500 Subject: [PATCH 15/21] refactor(plat/fvp): update FVP platform DTS for FEAT_RME This patch make minor modifications to FVP DTS including modifying the Non-secure memory range when RME is enabled. Signed-off-by: Zelalem Aweke Change-Id: I6b3650a2abfff10462a8a2d42755e6d764f7b035 --- fdts/fvp-base-gicv3-psci-common.dtsi | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/fdts/fvp-base-gicv3-psci-common.dtsi b/fdts/fvp-base-gicv3-psci-common.dtsi index b6753de8e..3cb613f63 100644 --- a/fdts/fvp-base-gicv3-psci-common.dtsi +++ b/fdts/fvp-base-gicv3-psci-common.dtsi @@ -24,7 +24,11 @@ #address-cells = <2>; #size-cells = <2>; - chosen { }; +#if (ENABLE_RME == 1) + chosen { bootargs = "mem=1G console=ttyAMA0 earlycon=pl011,0x1c090000 root=/dev/vda ip=on";}; +#else + chosen {}; +#endif aliases { serial0 = &v2m_serial0; @@ -135,8 +139,13 @@ memory@80000000 { device_type = "memory"; +#if (ENABLE_RME == 1) + reg = <0x00000000 0x80000000 0 0x7C000000>, + <0x00000008 0x80000000 0 0x80000000>; +#else reg = <0x00000000 0x80000000 0 0x7F000000>, <0x00000008 0x80000000 0 0x80000000>; +#endif }; gic: interrupt-controller@2f000000 { From 707f0710490a2e1c8442f16b12aa94edd4ac6cd3 Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Mon, 26 Jul 2021 21:28:42 -0500 Subject: [PATCH 16/21] refactor(plat/arm): rename ARM_DTB_DRAM_NS region macros The macros PLAT_HW_CONFIG_DTB_BASE and PLAT_HW_CONFIG_DTB_SIZE describe the range of memory where the HW_CONFIG_DTB can be loaded rather than the actual load address and size of the DTB. This patch changes the names to something more descriptive. Signed-off-by: Zelalem Aweke Change-Id: I98b81f3ce0c80fd76614f959667c25b07941e190 --- plat/arm/board/fvp/include/platform_def.h | 9 +++++---- plat/arm/board/juno/include/platform_def.h | 9 +++++---- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h index 6e72b5999..541842fde 100644 --- a/plat/arm/board/fvp/include/platform_def.h +++ b/plat/arm/board/fvp/include/platform_def.h @@ -66,12 +66,13 @@ #define PLAT_ARM_DRAM2_BASE ULL(0x880000000) #define PLAT_ARM_DRAM2_SIZE UL(0x80000000) -#define PLAT_HW_CONFIG_DTB_BASE ULL(0x82000000) -#define PLAT_HW_CONFIG_DTB_SIZE ULL(0x8000) +/* Range of kernel DTB load address */ +#define FVP_DTB_DRAM_MAP_START ULL(0x82000000) +#define FVP_DTB_DRAM_MAP_SIZE ULL(0x8000) #define ARM_DTB_DRAM_NS MAP_REGION_FLAT( \ - PLAT_HW_CONFIG_DTB_BASE, \ - PLAT_HW_CONFIG_DTB_SIZE, \ + FVP_DTB_DRAM_MAP_START, \ + FVP_DTB_DRAM_MAP_SIZE, \ MT_MEMORY | MT_RO | MT_NS) /* * Load address of BL33 for this platform port diff --git a/plat/arm/board/juno/include/platform_def.h b/plat/arm/board/juno/include/platform_def.h index 5299a7b8a..d61ba5d19 100644 --- a/plat/arm/board/juno/include/platform_def.h +++ b/plat/arm/board/juno/include/platform_def.h @@ -53,12 +53,13 @@ #define PLAT_ARM_DRAM2_BASE ULL(0x880000000) #define PLAT_ARM_DRAM2_SIZE ULL(0x180000000) -#define PLAT_HW_CONFIG_DTB_BASE ULL(0x82000000) -#define PLAT_HW_CONFIG_DTB_SIZE ULL(0x00008000) /* 32KB */ +/* Range of kernel DTB load address */ +#define JUNO_DTB_DRAM_MAP_START ULL(0x82000000) +#define JUNO_DTB_DRAM_MAP_SIZE ULL(0x00008000) /* 32KB */ #define ARM_DTB_DRAM_NS MAP_REGION_FLAT( \ - PLAT_HW_CONFIG_DTB_BASE, \ - PLAT_HW_CONFIG_DTB_SIZE, \ + JUNO_DTB_DRAM_MAP_START, \ + JUNO_DTB_DRAM_MAP_SIZE, \ MT_MEMORY | MT_RO | MT_NS) /* virtual address used by dynamic mem_protect for chunk_base */ From 672d669d6c72f92c6b81464d1d421e392bc1aa3e Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Mon, 26 Jul 2021 21:39:05 -0500 Subject: [PATCH 17/21] fix(plat/fvp): allow changing the kernel DTB load address We currently use ARM_PRELOADED_DTB_BASE build variable to pass the kernel DTB base address to the kernel when using the ARM_LINUX_KERNEL_AS_BL33 option. However this variable doesn't actually change the DTB load address. The DTB load address is actually specified in the FW_CONFIG DTS (fvp_fw_config.dts) as 'hw_config'. This patch passes the hw_config value instead of ARM_PRELOADED_DTB_BASE allowing us to change the kernel DTB load address through fvp_fw_config.dts. With this change we don't need the ARM_PRELOADED_DTB_BASE build variable if RESET_TO_BL31 is not set. Note that the hw_config value needs to be within the ARM_DTB_DRAM_NS region specified by FVP_DTB_DRAM_MAP_START and FVP_DTB_DRAM_MAP_SIZE. This patch also expands the ARM_DTB_DRAM_NS region to 32MB. Signed-off-by: Zelalem Aweke Change-Id: Idd74cdf5d2c649bb320644392ba5d69e175a53a9 --- plat/arm/board/fvp/include/platform_def.h | 2 +- plat/arm/common/arm_bl31_setup.c | 4 ++++ plat/arm/common/arm_common.mk | 9 ++++++--- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h index 541842fde..90bb13568 100644 --- a/plat/arm/board/fvp/include/platform_def.h +++ b/plat/arm/board/fvp/include/platform_def.h @@ -68,7 +68,7 @@ /* Range of kernel DTB load address */ #define FVP_DTB_DRAM_MAP_START ULL(0x82000000) -#define FVP_DTB_DRAM_MAP_SIZE ULL(0x8000) +#define FVP_DTB_DRAM_MAP_SIZE ULL(0x02000000) /* 32 MB */ #define ARM_DTB_DRAM_NS MAP_REGION_FLAT( \ FVP_DTB_DRAM_MAP_START, \ diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c index d76031282..d131bb95b 100644 --- a/plat/arm/common/arm_bl31_setup.c +++ b/plat/arm/common/arm_bl31_setup.c @@ -218,7 +218,11 @@ void __init arm_bl31_early_platform_setup(void *from_bl2, uintptr_t soc_fw_confi * tree blob (DTB) in x0, while x1-x3 are reserved for future use and * must be 0. */ +#if RESET_TO_BL31 bl33_image_ep_info.args.arg0 = (u_register_t)ARM_PRELOADED_DTB_BASE; +#else + bl33_image_ep_info.args.arg0 = (u_register_t)hw_config; +#endif bl33_image_ep_info.args.arg1 = 0U; bl33_image_ep_info.args.arg2 = 0U; bl33_image_ep_info.args.arg3 = 0U; diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk index dc8c6d01e..ae9afb795 100644 --- a/plat/arm/common/arm_common.mk +++ b/plat/arm/common/arm_common.mk @@ -94,10 +94,13 @@ ifeq (${ARM_LINUX_KERNEL_AS_BL33},1) ifndef PRELOADED_BL33_BASE $(error "PRELOADED_BL33_BASE must be set if ARM_LINUX_KERNEL_AS_BL33 is used.") endif - ifndef ARM_PRELOADED_DTB_BASE - $(error "ARM_PRELOADED_DTB_BASE must be set if ARM_LINUX_KERNEL_AS_BL33 is used.") + ifeq (${RESET_TO_BL31},1) + ifndef ARM_PRELOADED_DTB_BASE + $(error "ARM_PRELOADED_DTB_BASE must be set if ARM_LINUX_KERNEL_AS_BL33 is + used with RESET_TO_BL31.") + endif + $(eval $(call add_define,ARM_PRELOADED_DTB_BASE)) endif - $(eval $(call add_define,ARM_PRELOADED_DTB_BASE)) endif # Arm Ethos-N NPU SiP service From d22f1d358731f0f55f2f392fa587f0fa8d315aa5 Mon Sep 17 00:00:00 2001 From: Soby Mathew Date: Fri, 18 Jun 2021 12:25:35 +0100 Subject: [PATCH 18/21] fix(plat/fvp): bump BL2 stack size VERBOSE print logs need a larger stack size and the currently configured BL2 stack size was insufficient for FVP. This patch increases the same. Signed-off-by: Soby Mathew Change-Id: I316ba2ea467571161b5f4807e6e5fa0bf89d44c6 --- plat/arm/board/fvp/include/platform_def.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plat/arm/board/fvp/include/platform_def.h b/plat/arm/board/fvp/include/platform_def.h index 90bb13568..96574e526 100644 --- a/plat/arm/board/fvp/include/platform_def.h +++ b/plat/arm/board/fvp/include/platform_def.h @@ -196,7 +196,7 @@ # if TRUSTED_BOARD_BOOT # define PLATFORM_STACK_SIZE UL(0x1000) # else -# define PLATFORM_STACK_SIZE UL(0x440) +# define PLATFORM_STACK_SIZE UL(0x600) # endif #elif defined(IMAGE_BL2U) # define PLATFORM_STACK_SIZE UL(0x400) From 3cfa3497bace3e77fd1020fb125b130756c54355 Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Thu, 26 Aug 2021 15:29:47 -0500 Subject: [PATCH 19/21] docs(rme): add build and run instructions for FEAT_RME This patch adds instructions on how to build and run TF-A with FEAT_RME enabled. The patch also adds code owners for FEAT_RME. Signed-off-by: Zelalem Aweke Change-Id: Id16dc52cb76b1ea56ac5c3fc38cb0794a62ac2a1 --- docs/about/maintainers.rst | 10 + docs/components/index.rst | 1 + .../components/realm-management-extension.rst | 194 ++++++++++++++++++ 3 files changed, 205 insertions(+) create mode 100644 docs/components/realm-management-extension.rst diff --git a/docs/about/maintainers.rst b/docs/about/maintainers.rst index 1bd46663a..7db81fcc6 100644 --- a/docs/about/maintainers.rst +++ b/docs/about/maintainers.rst @@ -109,6 +109,16 @@ Exception Handling Framework (EHF) :|G|: `john-powell-arm`_ :|F|: bl31/ehf.c +Realm Management Extension (RME) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +:|M|: Bipin Ravi +:|G|: `bipinravi-arm`_ +:|M|: Mark Dykes +:|G|: `mardyk01`_ +:|M|: John Powell +:|G|: `john-powell-arm`_ +:|M|: Zelalem Aweke +:|G|: `zelalem-aweke`_ Drivers, Libraries and Framework Code ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/components/index.rst b/docs/components/index.rst index 2409f964b..f349d8dcc 100644 --- a/docs/components/index.rst +++ b/docs/components/index.rst @@ -22,3 +22,4 @@ Components ffa-manifest-binding xlat-tables-lib-v2-design cot-binding + realm-management-extension diff --git a/docs/components/realm-management-extension.rst b/docs/components/realm-management-extension.rst new file mode 100644 index 000000000..5c580f36d --- /dev/null +++ b/docs/components/realm-management-extension.rst @@ -0,0 +1,194 @@ + +Realm Management Extension (RME) +==================================== + +FEAT_RME (or RME for short) is an Armv9-A extension and is one component of the +`Arm Confidential Compute Architecture (Arm CCA)`_. TF-A supports RME starting +from version 2.6. This document provides instructions on how to build and run +TF-A with RME. + +Building and running TF-A with RME +------------------------------------ + +This section describes how you can build and run TF-A with RME enabled. +We assume you have all the :ref:`Prerequisites` to build TF-A. + +To enable RME, you need to set the ENABLE_RME build flag when building +TF-A. Currently, this feature is only supported for the FVP platform. + +The following instructions show you how to build and run TF-A with RME +for two scenarios: TF-A with TF-A Tests, and four-world execution with +Hafnium and TF-A Tests. The instructions assume you have already obtained +TF-A. You can use the following command to clone TF-A. + +.. code:: shell + + git clone https://git.trustedfirmware.org/TF-A/trusted-firmware-a.git + +To run the tests, you need an FVP model. You can download a model that supports +RME from the `Arm Architecture Models website`_. Please select the +*Base RevC AEM FVP* model. After extracting the downloaded file, you should be able to +find the *FVP_Base_RevC-2xAEMvA* binary. The instructions below have been tested +with model version 11.15 revision 18. + +.. note:: + + ENABLE_RME build option is currently experimental. + +Building TF-A with TF-A Tests +******************************************** +Use the following instructions to build TF-A with `TF-A Tests`_ as the +non-secure payload (BL33). + +**1. Obtain and build TF-A Tests** + +.. code:: shell + + git clone https://git.trustedfirmware.org/TF-A/tf-a-tests.git + cd tf-a-tests + make CROSS_COMPILE=aarch64-none-elf- PLAT=fvp DEBUG=1 + +This produces a TF-A Tests binary (*tftf.bin*) in the *build/fvp/debug* directory. + +**2. Build TF-A** + +.. code:: shell + + cd trusted-firmware-a + make CROSS_COMPILE=aarch64-none-elf- \ + PLAT=fvp \ + ENABLE_RME=1 \ + FVP_HW_CONFIG_DTS=fdts/fvp-base-gicv3-psci-1t.dts \ + DEBUG=1 \ + BL33= \ + all fip + +This produces *bl1.bin* and *fip.bin* binaries in the *build/fvp/debug* directory. +The above command also builds a Test Realm Payload (TRP), which is a small test +payload that implements Realm Monitor Management (RMM) functionalities and runs +in the realm world (R-EL2). The TRP binary is packaged in *fip.bin*. + +Four-world execution with Hafnium and TF-A Tests +**************************************************** +Four-world execution involves software components at each security state: root, +secure, realm and non-secure. This section describes how to build TF-A +with four-world support. We use TF-A as the root firmware, `Hafnium`_ as the +secure component, TRP as the realm-world firmware and TF-A Tests as the +non-secure payload. + +Before building TF-A, you first need to build the other software components. +You can find instructions on how to get and build TF-A Tests above. + +**1. Obtain and build Hafnium** + +.. code:: shell + + git clone --recurse-submodules https://git.trustedfirmware.org/hafnium/hafnium.git + cd hafnium + make PROJECT=reference + +The Hafnium binary should be located at +*out/reference/secure_aem_v8a_fvp_clang/hafnium.bin* + +**2. Build TF-A** + +Build TF-A with RME as well as SPM enabled. + +.. code:: shell + + make CROSS_COMPILE=aarch64-none-elf- \ + PLAT=fvp \ + ENABLE_RME=1 \ + FVP_HW_CONFIG_DTS=fdts/fvp-base-gicv3-psci-1t.dts \ + SPD=spmd \ + SPMD_SPM_AT_SEL2=1 \ + BRANCH_PROTECTION=1 \ + CTX_INCLUDE_PAUTH_REGS=1 \ + DEBUG=1 \ + SP_LAYOUT_FILE=/build/fvp/debug/sp_layout.json> \ + BL32= \ + BL33= \ + all fip + +Running the tests +********************* +Use the following command to run the tests on FVP. TF-A Tests should boot +and run the default tests including RME tests. + +.. code:: shell + + FVP_Base_RevC-2xAEMvA \ + -C bp.flashloader0.fname= \ + -C bp.secureflashloader.fname= \ + -C bp.refcounter.non_arch_start_at_default=1 \ + -C bp.refcounter.use_real_time=0 \ + -C bp.ve_sysregs.exit_on_shutdown=1 \ + -C cache_state_modelled=1 \ + -C cluster0.NUM_CORES=4 \ + -C cluster0.PA_SIZE=48 \ + -C cluster0.ecv_support_level=2 \ + -C cluster0.gicv3.cpuintf-mmap-access-level=2 \ + -C cluster0.gicv3.without-DS-support=1 \ + -C cluster0.gicv4.mask-virtual-interrupt=1 \ + -C cluster0.has_arm_v8-6=1 \ + -C cluster0.has_branch_target_exception=1 \ + -C cluster0.has_rme=1 \ + -C cluster0.has_rndr=1 \ + -C cluster0.has_amu=1 \ + -C cluster0.has_v8_7_pmu_extension=2 \ + -C cluster0.max_32bit_el=-1 \ + -C cluster0.restriction_on_speculative_execution=2 \ + -C cluster0.restriction_on_speculative_execution_aarch32=2 \ + -C cluster1.NUM_CORES=4 \ + -C cluster1.PA_SIZE=48 \ + -C cluster1.ecv_support_level=2 \ + -C cluster1.gicv3.cpuintf-mmap-access-level=2 \ + -C cluster1.gicv3.without-DS-support=1 \ + -C cluster1.gicv4.mask-virtual-interrupt=1 \ + -C cluster1.has_arm_v8-6=1 \ + -C cluster1.has_branch_target_exception=1 \ + -C cluster1.has_rme=1 \ + -C cluster1.has_rndr=1 \ + -C cluster1.has_amu=1 \ + -C cluster1.has_v8_7_pmu_extension=2 \ + -C cluster1.max_32bit_el=-1 \ + -C cluster1.restriction_on_speculative_execution=2 \ + -C cluster1.restriction_on_speculative_execution_aarch32=2 \ + -C pci.pci_smmuv3.mmu.SMMU_AIDR=2 \ + -C pci.pci_smmuv3.mmu.SMMU_IDR0=0x0046123B \ + -C pci.pci_smmuv3.mmu.SMMU_IDR1=0x00600002 \ + -C pci.pci_smmuv3.mmu.SMMU_IDR3=0x1714 \ + -C pci.pci_smmuv3.mmu.SMMU_IDR5=0xFFFF0475 \ + -C pci.pci_smmuv3.mmu.SMMU_S_IDR1=0xA0000002 \ + -C pci.pci_smmuv3.mmu.SMMU_S_IDR2=0 \ + -C pci.pci_smmuv3.mmu.SMMU_S_IDR3=0 \ + -C bp.pl011_uart0.out_file=uart0.log \ + -C bp.pl011_uart1.out_file=uart1.log \ + -C bp.pl011_uart2.out_file=uart2.log \ + -C pctl.startup=0.0.0.0 \ + -Q 1000 \ + "$@" + +The bottom of the output from *uart0* should look something like the following. + +.. code-block:: shell + + ... + + > Test suite 'FF-A Interrupt' + Passed + > Test suite 'SMMUv3 tests' + Passed + > Test suite 'PMU Leakage' + Passed + > Test suite 'DebugFS' + Passed + > Test suite 'Realm payload tests' + Passed + ... + + +.. _Arm Confidential Compute Architecture (Arm CCA): https://www.arm.com/why-arm/architecture/security-features/arm-confidential-compute-architecture +.. _Arm Architecture Models website: https://developer.arm.com/tools-and-software/simulation-models/fixed-virtual-platforms/arm-ecosystem-models +.. _TF-A Tests: https://trustedfirmware-a-tests.readthedocs.io/en/latest +.. _Hafnium: https://www.trustedfirmware.org/projects/hafnium From 07e96d1d2958b6f121476fd391ac67bf8c2c4735 Mon Sep 17 00:00:00 2001 From: Zelalem Aweke Date: Fri, 1 Oct 2021 12:30:49 -0500 Subject: [PATCH 20/21] feat(rme): disable Watchdog for Arm platforms if FEAT_RME enabled In the typical TF-A boot flow, the Trusted Watchdog is started at the beginning of BL1 and then stopped in BL1 after returning from BL2. However, in the RME boot flow there is no return path from BL2 to BL1. Therefore, disable the Watchdog if ENABLE_RME is set. Signed-off-by: Zelalem Aweke Change-Id: Id88fbfab8e8440642414bed48c50e3fcb23f3621 --- plat/arm/common/arm_common.mk | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/plat/arm/common/arm_common.mk b/plat/arm/common/arm_common.mk index ae9afb795..d14d10143 100644 --- a/plat/arm/common/arm_common.mk +++ b/plat/arm/common/arm_common.mk @@ -52,9 +52,10 @@ $(eval $(call assert_boolean,ARM_RECOM_STATE_ID_ENC)) $(eval $(call add_define,ARM_RECOM_STATE_ID_ENC)) # Process ARM_DISABLE_TRUSTED_WDOG flag -# By default, Trusted Watchdog is always enabled unless SPIN_ON_BL1_EXIT is set +# By default, Trusted Watchdog is always enabled unless +# SPIN_ON_BL1_EXIT or ENABLE_RME is set ARM_DISABLE_TRUSTED_WDOG := 0 -ifeq (${SPIN_ON_BL1_EXIT}, 1) +ifneq ($(filter 1,${SPIN_ON_BL1_EXIT} ${ENABLE_RME}),) ARM_DISABLE_TRUSTED_WDOG := 1 endif $(eval $(call assert_boolean,ARM_DISABLE_TRUSTED_WDOG)) From f19dc624a17c9df6aa444e33568b1f70ff4e9341 Mon Sep 17 00:00:00 2001 From: johpow01 Date: Wed, 16 Jun 2021 17:57:28 -0500 Subject: [PATCH 21/21] refactor(gpt): productize and refactor GPT library This patch updates and refactors the GPT library and fixes bugs. - Support all combinations of PGS, PPS, and L0GPTSZ parameters. - PPS and PGS are set at runtime, L0GPTSZ is read from GPCCR_EL3. - Use compiler definitions to simplify code. - Renaming functions to better suit intended uses. - MMU enabled before GPT APIs called. - Add comments to make function usage more clear in GPT library. - Added _rme suffix to file names to differentiate better from the GPT file system code. - Renamed gpt_defs.h to gpt_rme_private.h to better separate private and public code. - Renamed gpt_core.c to gpt_rme.c to better conform to TF-A precedent. Signed-off-by: John Powell Change-Id: I4cbb23b0f81e697baa9fb23ba458aa3f7d1ed919 --- bl2/bl2.mk | 2 +- bl31/aarch64/bl31_entrypoint.S | 21 +- bl31/bl31.mk | 2 +- include/arch/aarch64/arch.h | 78 -- include/lib/gpt/gpt.h | 86 -- include/lib/gpt/gpt_defs.h | 76 -- include/lib/gpt_rme/gpt_rme.h | 276 ++++++ include/plat/arm/common/arm_def.h | 98 +-- include/plat/arm/common/arm_pas_def.h | 47 +- lib/gpt/gpt_core.c | 767 ---------------- lib/gpt_rme/gpt_rme.c | 1112 ++++++++++++++++++++++++ lib/{gpt/gpt.mk => gpt_rme/gpt_rme.mk} | 4 +- lib/gpt_rme/gpt_rme_private.h | 228 +++++ plat/arm/common/arm_bl2_setup.c | 51 +- plat/arm/common/arm_bl31_setup.c | 40 +- services/std_svc/rmmd/rmmd_main.c | 16 +- 16 files changed, 1762 insertions(+), 1142 deletions(-) delete mode 100644 include/lib/gpt/gpt.h delete mode 100644 include/lib/gpt/gpt_defs.h create mode 100644 include/lib/gpt_rme/gpt_rme.h delete mode 100644 lib/gpt/gpt_core.c create mode 100644 lib/gpt_rme/gpt_rme.c rename lib/{gpt/gpt.mk => gpt_rme/gpt_rme.mk} (61%) create mode 100644 lib/gpt_rme/gpt_rme_private.h diff --git a/bl2/bl2.mk b/bl2/bl2.mk index fd8374795..7a973e512 100644 --- a/bl2/bl2.mk +++ b/bl2/bl2.mk @@ -17,7 +17,7 @@ endif ifeq (${ENABLE_RME},1) # Using RME, run BL2 at EL3 -include lib/gpt/gpt.mk +include lib/gpt_rme/gpt_rme.mk BL2_SOURCES += bl2/${ARCH}/bl2_rme_entrypoint.S \ bl2/${ARCH}/bl2_el3_exceptions.S \ diff --git a/bl31/aarch64/bl31_entrypoint.S b/bl31/aarch64/bl31_entrypoint.S index 2e9a39496..ed058648f 100644 --- a/bl31/aarch64/bl31_entrypoint.S +++ b/bl31/aarch64/bl31_entrypoint.S @@ -172,14 +172,6 @@ func bl31_warm_entrypoint _exception_vectors=runtime_exceptions \ _pie_fixup_size=0 -#if ENABLE_RME - /* - * Initialise and enable Granule Protection - * before enabling any stage of translation. - */ - bl gpt_enable -#endif - /* * We're about to enable MMU and participate in PSCI state coordination. * @@ -203,6 +195,19 @@ func bl31_warm_entrypoint #endif bl bl31_plat_enable_mmu +#if ENABLE_RME + /* + * At warm boot GPT data structures have already been initialized in RAM + * but the sysregs for this CPU need to be initialized. Note that the GPT + * accesses are controlled attributes in GPCCR and do not depend on the + * SCR_EL3.C bit. + */ + bl gpt_enable + cbz x0, 1f + no_ret plat_panic_handler +1: +#endif + #if ENABLE_PAUTH /* -------------------------------------------------------------------- * Program APIAKey_EL1 and enable pointer authentication diff --git a/bl31/bl31.mk b/bl31/bl31.mk index 5927fb1c9..106d4109d 100644 --- a/bl31/bl31.mk +++ b/bl31/bl31.mk @@ -112,7 +112,7 @@ BL31_SOURCES += services/std_svc/pci_svc.c endif ifeq (${ENABLE_RME},1) -include lib/gpt/gpt.mk +include lib/gpt_rme/gpt_rme.mk BL31_SOURCES += ${GPT_LIB_SRCS} \ ${RMMD_SOURCES} diff --git a/include/arch/aarch64/arch.h b/include/arch/aarch64/arch.h index 5949370e0..0ad97543b 100644 --- a/include/arch/aarch64/arch.h +++ b/include/arch/aarch64/arch.h @@ -1105,87 +1105,9 @@ /******************************************************************************* * Realm management extension register definitions ******************************************************************************/ - -/* GPCCR_EL3 definitions */ #define GPCCR_EL3 S3_6_C2_C1_6 - -/* Least significant address bits protected by each entry in level 0 GPT */ -#define GPCCR_L0GPTSZ_SHIFT U(20) -#define GPCCR_L0GPTSZ_MASK U(0xF) -#define GPCCR_L0GPTSZ_30BITS U(0x0) -#define GPCCR_L0GPTSZ_34BITS U(0x4) -#define GPCCR_L0GPTSZ_36BITS U(0x6) -#define GPCCR_L0GPTSZ_39BITS U(0x9) -#define SET_GPCCR_L0GPTSZ(x) \ - ((x & GPCCR_L0GPTSZ_MASK) << GPCCR_L0GPTSZ_SHIFT) - -/* Granule protection check priority bit definitions */ -#define GPCCR_GPCP_SHIFT U(17) -#define GPCCR_GPCP_BIT (ULL(1) << GPCCR_EL3_GPCP_SHIFT) - -/* Granule protection check bit definitions */ -#define GPCCR_GPC_SHIFT U(16) -#define GPCCR_GPC_BIT (ULL(1) << GPCCR_GPC_SHIFT) - -/* Physical granule size bit definitions */ -#define GPCCR_PGS_SHIFT U(14) -#define GPCCR_PGS_MASK U(0x3) -#define GPCCR_PGS_4K U(0x0) -#define GPCCR_PGS_16K U(0x2) -#define GPCCR_PGS_64K U(0x1) -#define SET_GPCCR_PGS(x) \ - ((x & GPCCR_PGS_MASK) << GPCCR_PGS_SHIFT) - -/* GPT fetch shareability attribute bit definitions */ -#define GPCCR_SH_SHIFT U(12) -#define GPCCR_SH_MASK U(0x3) -#define GPCCR_SH_NS U(0x0) -#define GPCCR_SH_OS U(0x2) -#define GPCCR_SH_IS U(0x3) -#define SET_GPCCR_SH(x) \ - ((x & GPCCR_SH_MASK) << GPCCR_SH_SHIFT) - -/* GPT fetch outer cacheability attribute bit definitions */ -#define GPCCR_ORGN_SHIFT U(10) -#define GPCCR_ORGN_MASK U(0x3) -#define GPCCR_ORGN_NC U(0x0) -#define GPCCR_ORGN_WB_RA_WA U(0x1) -#define GPCCR_ORGN_WT_RA_NWA U(0x2) -#define GPCCR_ORGN_WB_RA_NWA U(0x3) -#define SET_GPCCR_ORGN(x) \ - ((x & GPCCR_ORGN_MASK) << GPCCR_ORGN_SHIFT) - -/* GPT fetch inner cacheability attribute bit definitions */ -#define GPCCR_IRGN_SHIFT U(8) -#define GPCCR_IRGN_MASK U(0x3) -#define GPCCR_IRGN_NC U(0x0) -#define GPCCR_IRGN_WB_RA_WA U(0x1) -#define GPCCR_IRGN_WT_RA_NWA U(0x2) -#define GPCCR_IRGN_WB_RA_NWA U(0x3) -#define SET_GPCCR_IRGN(x) \ - ((x & GPCCR_IRGN_MASK) << GPCCR_IRGN_SHIFT) - -/* Protected physical address size bit definitions */ -#define GPCCR_PPS_SHIFT U(0) -#define GPCCR_PPS_MASK U(0x7) -#define GPCCR_PPS_4GB U(0x0) -#define GPCCR_PPS_64GB U(0x1) -#define GPCCR_PPS_1TB U(0x2) -#define GPCCR_PPS_4TB U(0x3) -#define GPCCR_PPS_16TB U(0x4) -#define GPCCR_PPS_256TB U(0x5) -#define GPCCR_PPS_4PB U(0x6) -#define SET_GPCCR_PPS(x) \ - ((x & GPCCR_PPS_MASK) << GPCCR_PPS_SHIFT) - -/* GPTBR_EL3 definitions */ #define GPTBR_EL3 S3_6_C2_C1_4 -/* Base Address for the GPT bit definitions */ -#define GPTBR_BADDR_SHIFT U(0) -#define GPTBR_BADDR_VAL_SHIFT U(12) -#define GPTBR_BADDR_MASK ULL(0xffffffffff) - /******************************************************************************* * RAS system registers ******************************************************************************/ diff --git a/include/lib/gpt/gpt.h b/include/lib/gpt/gpt.h deleted file mode 100644 index 89d30177d..000000000 --- a/include/lib/gpt/gpt.h +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright (c) 2021, Arm Limited. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef GPT_H -#define GPT_H - -#include - -#include - -#include "gpt_defs.h" - -#define GPT_DESC_ATTRS(_type, _gpi) \ - ((((_type) & PAS_REG_DESC_TYPE_MASK) \ - << PAS_REG_DESC_TYPE_SHIFT) | \ - (((_gpi) & PAS_REG_GPI_MASK) \ - << PAS_REG_GPI_SHIFT)) - -/* - * Macro to create a GPT entry for this PAS range either as a L0 block - * descriptor or L1 table descriptor depending upon the size of the range. - */ -#define MAP_GPT_REGION(_pa, _sz, _gpi) \ - { \ - .base_pa = (_pa), \ - .size = (_sz), \ - .attrs = GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_ANY, (_gpi)), \ - } - -/* - * Special macro to create a L1 table descriptor at L0 for a 1GB region as - * opposed to creating a block mapping by default. - */ -#define MAP_GPT_REGION_TBL(_pa, _sz, _gpi) \ - { \ - .base_pa = (_pa), \ - .size = (_sz), \ - .attrs = GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_TBL, (_gpi)), \ - } - -/* - * Structure for specifying a Granule range and its properties - */ -typedef struct pas_region { - unsigned long long base_pa; /**< Base address for PAS. */ - size_t size; /**< Size of the PAS. */ - unsigned int attrs; /**< PAS GPI and entry type. */ -} pas_region_t; - -/* - * Structure to initialise the Granule Protection Tables. - */ -typedef struct gpt_init_params { - unsigned int pgs; /**< Address Width of Phisical Granule Size. */ - unsigned int pps; /**< Protected Physical Address Size. */ - unsigned int l0gptsz; /**< Granule size on L0 table entry. */ - pas_region_t *pas_regions; /**< PAS regions to protect. */ - unsigned int pas_count; /**< Number of PAS regions to initialise. */ - uintptr_t l0_mem_base; /**< L0 Table base address. */ - size_t l0_mem_size; /**< Size of memory reserved for L0 tables. */ - uintptr_t l1_mem_base; /**< L1 Table base address. */ - size_t l1_mem_size; /**< Size of memory reserved for L1 tables. */ -} gpt_init_params_t; - -/** @brief Initialise the Granule Protection tables. - */ -int gpt_init(gpt_init_params_t *params); - -/** @brief Enable the Granule Protection Checks. - */ -void gpt_enable(void); - -/** @brief Disable the Granule Protection Checks. - */ -void gpt_disable(void); - -/** @brief Transition a granule between security states. - */ -int gpt_transition_pas(uint64_t pa, - unsigned int src_sec_state, - unsigned int target_pas); - -#endif /* GPT_H */ diff --git a/include/lib/gpt/gpt_defs.h b/include/lib/gpt/gpt_defs.h deleted file mode 100644 index 6122a126f..000000000 --- a/include/lib/gpt/gpt_defs.h +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2021, Arm Limited. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#ifndef GPT_DEFS_H -#define GPT_DEFS_H - -#include -#include - -#include "gpt.h" - -/* GPI values */ -#define GPI_NO_ACCESS U(0x0) -#define GPI_SECURE U(0x8) -#define GPI_NS U(0x9) -#define GPI_ROOT U(0xa) -#define GPI_REALM U(0xb) -#define GPI_ANY U(0xf) -#define GPI_VAL_MASK ULL(0xf) - -/* GPT descriptor bit definitions */ -#define GPT_L1_INDEX_MASK ULL(0xf) -#define GPT_L1_INDEX_SHIFT ULL(0x0) - -#define GPT_TBL_DESC ULL(0x3) -#define GPT_BLK_DESC ULL(0x1) - -#define GPT_TBL_DESC_ADDR_SHIFT ULL(12) -#define GPT_TBL_DESC_ADDR_MASK (((ULL(1) << \ - (51 - GPT_TBL_DESC_ADDR_SHIFT)) - 1) \ - << GPT_TBL_DESC_ADDR_SHIFT) - -#define GPT_BLOCK_DESC_GPI_VAL_SHIFT ULL(4) - -/* Each descriptor is 8 bytes long. */ -#define GPT_DESC_SIZE ULL(8) - -#define PPS_MAX_VAL PSTCR_EL3_PPS_4PB -#define PPS_NUM_1GB_ENTRIES ULL(1024) -#define PGS_4K_1GB_L1_TABLE_SZ (U(2) << 17) - -/* 2 << LOG2_8K = Bytes in 8K */ -#define LOG2_8K U(13) - -#define GPT_L1_SIZE ULL(0x40000) /* 256K */ -#define SZ_1G (ULL(0x1) << 30) /* 1GB */ - -#define GPT_MIN_PGS_SHIFT U(12) /* 4K */ - -#define L1_GPT_INDEX_MASK U(0x3fffffff) -#define GPT_GRAN_DESC_NUM_GPIS U(4) - -#define PAS_REG_GPI_SHIFT U(0) -#define PAS_REG_GPI_MASK U(0xf) - -/* .attrs field definitions */ -#define PAS_REG_DESC_TYPE_ANY U(0) -#define PAS_REG_DESC_TYPE_BLK U(1) -#define PAS_REG_DESC_TYPE_TBL U(2) -#define PAS_REG_DESC_TYPE_SHIFT U(4) -#define PAS_REG_DESC_TYPE_MASK U(0x3) -#define PAS_REG_DESC_TYPE(_attrs) (((_attrs) \ - >> PAS_REG_DESC_TYPE_SHIFT) \ - & PAS_REG_DESC_TYPE_MASK) - -#define PAS_REG_GPI(_attrs) (((_attrs) \ - >> PAS_REG_GPI_SHIFT) \ - & PAS_REG_GPI_MASK) - -#define SZ_1G_MASK (SZ_1G - U(1)) -#define IS_1GB_ALIGNED(addr) (((addr) & SZ_1G_MASK) == U(0)) - -#endif /* GPT_DEFS */ diff --git a/include/lib/gpt_rme/gpt_rme.h b/include/lib/gpt_rme/gpt_rme.h new file mode 100644 index 000000000..379b91562 --- /dev/null +++ b/include/lib/gpt_rme/gpt_rme.h @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef GPT_RME_H +#define GPT_RME_H + +#include + +#include + +/******************************************************************************/ +/* GPT helper macros and definitions */ +/******************************************************************************/ + +/* + * Structure for specifying a mapping range and it's properties. This should not + * be manually initialized, using the MAP_GPT_REGION_x macros is recommended as + * to avoid potential incompatibilities in the future. + */ +typedef struct pas_region { + uintptr_t base_pa; /* Base address for PAS. */ + size_t size; /* Size of the PAS. */ + unsigned int attrs; /* PAS GPI and entry type. */ +} pas_region_t; + +/* GPT GPI definitions */ +#define GPT_GPI_NO_ACCESS U(0x0) +#define GPT_GPI_SECURE U(0x8) +#define GPT_GPI_NS U(0x9) +#define GPT_GPI_ROOT U(0xA) +#define GPT_GPI_REALM U(0xB) +#define GPT_GPI_ANY U(0xF) +#define GPT_GPI_VAL_MASK UL(0xF) + +/* PAS attribute GPI definitions. */ +#define GPT_PAS_ATTR_GPI_SHIFT U(0) +#define GPT_PAS_ATTR_GPI_MASK U(0xF) +#define GPT_PAS_ATTR_GPI(_attrs) (((_attrs) \ + >> GPT_PAS_ATTR_GPI_SHIFT) \ + & GPT_PAS_ATTR_GPI_MASK) + +/* PAS attribute mapping type definitions */ +#define GPT_PAS_ATTR_MAP_TYPE_BLOCK U(0x0) +#define GPT_PAS_ATTR_MAP_TYPE_GRANULE U(0x1) +#define GPT_PAS_ATTR_MAP_TYPE_SHIFT U(4) +#define GPT_PAS_ATTR_MAP_TYPE_MASK U(0x1) +#define GPT_PAS_ATTR_MAP_TYPE(_attrs) (((_attrs) \ + >> GPT_PAS_ATTR_MAP_TYPE_SHIFT) \ + & GPT_PAS_ATTR_MAP_TYPE_MASK) + +/* + * Macro to initialize the attributes field in the pas_region_t structure. + * [31:5] Reserved + * [4] Mapping type (GPT_PAS_ATTR_MAP_TYPE_x definitions) + * [3:0] PAS GPI type (GPT_GPI_x definitions) + */ +#define GPT_PAS_ATTR(_type, _gpi) \ + ((((_type) & GPT_PAS_ATTR_MAP_TYPE_MASK) \ + << GPT_PAS_ATTR_MAP_TYPE_SHIFT) | \ + (((_gpi) & GPT_PAS_ATTR_GPI_MASK) \ + << GPT_PAS_ATTR_GPI_SHIFT)) + +/* + * Macro to create a GPT entry for this PAS range as a block descriptor. If this + * region does not fit the requirements for a block descriptor then GPT + * initialization will fail. + */ +#define GPT_MAP_REGION_BLOCK(_pa, _sz, _gpi) \ + { \ + .base_pa = (_pa), \ + .size = (_sz), \ + .attrs = GPT_PAS_ATTR(GPT_PAS_ATTR_MAP_TYPE_BLOCK, (_gpi)), \ + } + +/* + * Macro to create a GPT entry for this PAS range as a table descriptor. If this + * region does not fit the requirements for a table descriptor then GPT + * initialization will fail. + */ +#define GPT_MAP_REGION_GRANULE(_pa, _sz, _gpi) \ + { \ + .base_pa = (_pa), \ + .size = (_sz), \ + .attrs = GPT_PAS_ATTR(GPT_PAS_ATTR_MAP_TYPE_GRANULE, (_gpi)), \ + } + +/******************************************************************************/ +/* GPT register field definitions */ +/******************************************************************************/ + +/* + * Least significant address bits protected by each entry in level 0 GPT. This + * field is read-only. + */ +#define GPCCR_L0GPTSZ_SHIFT U(20) +#define GPCCR_L0GPTSZ_MASK U(0xF) + +typedef enum { + GPCCR_L0GPTSZ_30BITS = U(0x0), + GPCCR_L0GPTSZ_34BITS = U(0x4), + GPCCR_L0GPTSZ_36BITS = U(0x6), + GPCCR_L0GPTSZ_39BITS = U(0x9) +} gpccr_l0gptsz_e; + +/* Granule protection check priority bit definitions */ +#define GPCCR_GPCP_SHIFT U(17) +#define GPCCR_GPCP_BIT (ULL(1) << GPCCR_EL3_GPCP_SHIFT) + +/* Granule protection check bit definitions */ +#define GPCCR_GPC_SHIFT U(16) +#define GPCCR_GPC_BIT (ULL(1) << GPCCR_GPC_SHIFT) + +/* Physical granule size bit definitions */ +#define GPCCR_PGS_SHIFT U(14) +#define GPCCR_PGS_MASK U(0x3) +#define SET_GPCCR_PGS(x) (((x) & GPCCR_PGS_MASK) << GPCCR_PGS_SHIFT) + +typedef enum { + GPCCR_PGS_4K = U(0x0), + GPCCR_PGS_64K = U(0x1), + GPCCR_PGS_16K = U(0x2) +} gpccr_pgs_e; + +/* GPT fetch shareability attribute bit definitions */ +#define GPCCR_SH_SHIFT U(12) +#define GPCCR_SH_MASK U(0x3) +#define SET_GPCCR_SH(x) (((x) & GPCCR_SH_MASK) << GPCCR_SH_SHIFT) + +typedef enum { + GPCCR_SH_NS = U(0x0), + GPCCR_SH_OS = U(0x2), + GPCCR_SH_IS = U(0x3) +} gpccr_sh_e; + +/* GPT fetch outer cacheability attribute bit definitions */ +#define GPCCR_ORGN_SHIFT U(10) +#define GPCCR_ORGN_MASK U(0x3) +#define SET_GPCCR_ORGN(x) (((x) & GPCCR_ORGN_MASK) << GPCCR_ORGN_SHIFT) + +typedef enum { + GPCCR_ORGN_NC = U(0x0), + GPCCR_ORGN_WB_RA_WA = U(0x1), + GPCCR_ORGN_WT_RA_NWA = U(0x2), + GPCCR_ORGN_WB_RA_NWA = U(0x3) +} gpccr_orgn_e; + +/* GPT fetch inner cacheability attribute bit definitions */ +#define GPCCR_IRGN_SHIFT U(8) +#define GPCCR_IRGN_MASK U(0x3) +#define SET_GPCCR_IRGN(x) (((x) & GPCCR_IRGN_MASK) << GPCCR_IRGN_SHIFT) + +typedef enum { + GPCCR_IRGN_NC = U(0x0), + GPCCR_IRGN_WB_RA_WA = U(0x1), + GPCCR_IRGN_WT_RA_NWA = U(0x2), + GPCCR_IRGN_WB_RA_NWA = U(0x3) +} gpccr_irgn_e; + +/* Protected physical address size bit definitions */ +#define GPCCR_PPS_SHIFT U(0) +#define GPCCR_PPS_MASK U(0x7) +#define SET_GPCCR_PPS(x) (((x) & GPCCR_PPS_MASK) << GPCCR_PPS_SHIFT) + +typedef enum { + GPCCR_PPS_4GB = U(0x0), + GPCCR_PPS_64GB = U(0x1), + GPCCR_PPS_1TB = U(0x2), + GPCCR_PPS_4TB = U(0x3), + GPCCR_PPS_16TB = U(0x4), + GPCCR_PPS_256TB = U(0x5), + GPCCR_PPS_4PB = U(0x6) +} gpccr_pps_e; + +/* Base Address for the GPT bit definitions */ +#define GPTBR_BADDR_SHIFT U(0) +#define GPTBR_BADDR_VAL_SHIFT U(12) +#define GPTBR_BADDR_MASK ULL(0xffffffffff) + +/******************************************************************************/ +/* GPT public APIs */ +/******************************************************************************/ + +/* + * Public API that initializes the entire protected space to GPT_GPI_ANY using + * the L0 tables (block descriptors). Ideally, this function is invoked prior + * to DDR discovery and initialization. The MMU must be initialized before + * calling this function. + * + * Parameters + * pps PPS value to use for table generation + * l0_mem_base Base address of L0 tables in memory. + * l0_mem_size Total size of memory available for L0 tables. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_init_l0_tables(gpccr_pps_e pps, + uintptr_t l0_mem_base, + size_t l0_mem_size); + +/* + * Public API that carves out PAS regions from the L0 tables and builds any L1 + * tables that are needed. This function ideally is run after DDR discovery and + * initialization. The L0 tables must have already been initialized to GPI_ANY + * when this function is called. + * + * Parameters + * pgs PGS value to use for table generation. + * l1_mem_base Base address of memory used for L1 tables. + * l1_mem_size Total size of memory available for L1 tables. + * *pas_regions Pointer to PAS regions structure array. + * pas_count Total number of PAS regions. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, + uintptr_t l1_mem_base, + size_t l1_mem_size, + pas_region_t *pas_regions, + unsigned int pas_count); + +/* + * Public API to initialize the runtime gpt_config structure based on the values + * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization + * typically happens in a bootloader stage prior to setting up the EL3 runtime + * environment for the granule transition service so this function detects the + * initialization from a previous stage. Granule protection checks must be + * enabled already or this function will return an error. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_runtime_init(void); + +/* + * Public API to enable granule protection checks once the tables have all been + * initialized. This function is called at first initialization and then again + * later during warm boots of CPU cores. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_enable(void); + +/* + * Public API to disable granule protection checks. + */ +void gpt_disable(void); + +/* + * This function is the core of the granule transition service. When a granule + * transition request occurs it is routed to this function where the request is + * validated then fulfilled if possible. + * + * TODO: implement support for transitioning multiple granules at once. + * + * Parameters + * base: Base address of the region to transition, must be aligned to granule + * size. + * size: Size of region to transition, must be aligned to granule size. + * src_sec_state: Security state of the caller. + * target_pas: Target PAS of the specified memory region. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_transition_pas(uint64_t base, + size_t size, + unsigned int src_sec_state, + unsigned int target_pas); + +#endif /* GPT_RME_H */ diff --git a/include/plat/arm/common/arm_def.h b/include/plat/arm/common/arm_def.h index a8b5d26df..1993cb401 100644 --- a/include/plat/arm/common/arm_def.h +++ b/include/plat/arm/common/arm_def.h @@ -81,19 +81,19 @@ * - REALM DRAM: Reserved for Realm world if RME is enabled * - AP TZC DRAM: The remaining TZC secured DRAM reserved for AP use * - * RME enabled(64MB) RME not enabled(16MB) - * -------------------- ------------------- - * | | | | - * | AP TZC (~28MB) | | AP TZC (~14MB) | - * -------------------- ------------------- - * | | | | - * | REALM (32MB) | | EL3 TZC (2MB) | - * -------------------- ------------------- - * | | | | - * | EL3 TZC (3MB) | | SCP TZC | - * -------------------- 0xFFFF_FFFF------------------- - * | L1 GPT + SCP TZC | - * | (~1MB) | + * RME enabled(64MB) RME not enabled(16MB) + * -------------------- ------------------- + * | | | | + * | AP TZC (~28MB) | | AP TZC (~14MB) | + * -------------------- ------------------- + * | | | | + * | REALM (32MB) | | EL3 TZC (2MB) | + * -------------------- ------------------- + * | | | | + * | EL3 TZC (3MB) | | SCP TZC | + * -------------------- 0xFFFF_FFFF------------------- + * | L1 GPT + SCP TZC | + * | (~1MB) | * 0xFFFF_FFFF -------------------- */ #if ENABLE_RME @@ -252,56 +252,56 @@ INTR_PROP_DESC(ARM_IRQ_SEC_SGI_6, GIC_HIGHEST_SEC_PRIORITY, (grp), \ GIC_INTR_CFG_EDGE) -#define ARM_MAP_SHARED_RAM MAP_REGION_FLAT( \ - ARM_SHARED_RAM_BASE, \ - ARM_SHARED_RAM_SIZE, \ - MT_DEVICE | MT_RW | EL3_PAS) +#define ARM_MAP_SHARED_RAM MAP_REGION_FLAT( \ + ARM_SHARED_RAM_BASE, \ + ARM_SHARED_RAM_SIZE, \ + MT_DEVICE | MT_RW | EL3_PAS) -#define ARM_MAP_NS_DRAM1 MAP_REGION_FLAT( \ - ARM_NS_DRAM1_BASE, \ - ARM_NS_DRAM1_SIZE, \ - MT_MEMORY | MT_RW | MT_NS) +#define ARM_MAP_NS_DRAM1 MAP_REGION_FLAT( \ + ARM_NS_DRAM1_BASE, \ + ARM_NS_DRAM1_SIZE, \ + MT_MEMORY | MT_RW | MT_NS) -#define ARM_MAP_DRAM2 MAP_REGION_FLAT( \ - ARM_DRAM2_BASE, \ - ARM_DRAM2_SIZE, \ - MT_MEMORY | MT_RW | MT_NS) +#define ARM_MAP_DRAM2 MAP_REGION_FLAT( \ + ARM_DRAM2_BASE, \ + ARM_DRAM2_SIZE, \ + MT_MEMORY | MT_RW | MT_NS) -#define ARM_MAP_TSP_SEC_MEM MAP_REGION_FLAT( \ - TSP_SEC_MEM_BASE, \ - TSP_SEC_MEM_SIZE, \ - MT_MEMORY | MT_RW | MT_SECURE) +#define ARM_MAP_TSP_SEC_MEM MAP_REGION_FLAT( \ + TSP_SEC_MEM_BASE, \ + TSP_SEC_MEM_SIZE, \ + MT_MEMORY | MT_RW | MT_SECURE) #if ARM_BL31_IN_DRAM -#define ARM_MAP_BL31_SEC_DRAM MAP_REGION_FLAT( \ - BL31_BASE, \ - PLAT_ARM_MAX_BL31_SIZE, \ - MT_MEMORY | MT_RW | MT_SECURE) +#define ARM_MAP_BL31_SEC_DRAM MAP_REGION_FLAT( \ + BL31_BASE, \ + PLAT_ARM_MAX_BL31_SIZE, \ + MT_MEMORY | MT_RW | MT_SECURE) #endif -#define ARM_MAP_EL3_TZC_DRAM MAP_REGION_FLAT( \ - ARM_EL3_TZC_DRAM1_BASE, \ - ARM_EL3_TZC_DRAM1_SIZE, \ - MT_MEMORY | MT_RW | EL3_PAS) +#define ARM_MAP_EL3_TZC_DRAM MAP_REGION_FLAT( \ + ARM_EL3_TZC_DRAM1_BASE, \ + ARM_EL3_TZC_DRAM1_SIZE, \ + MT_MEMORY | MT_RW | EL3_PAS) #if defined(SPD_spmd) -#define ARM_MAP_TRUSTED_DRAM MAP_REGION_FLAT( \ - PLAT_ARM_TRUSTED_DRAM_BASE, \ - PLAT_ARM_TRUSTED_DRAM_SIZE, \ - MT_MEMORY | MT_RW | MT_SECURE) +#define ARM_MAP_TRUSTED_DRAM MAP_REGION_FLAT( \ + PLAT_ARM_TRUSTED_DRAM_BASE, \ + PLAT_ARM_TRUSTED_DRAM_SIZE, \ + MT_MEMORY | MT_RW | MT_SECURE) #endif #if ENABLE_RME -#define ARM_MAP_RMM_DRAM MAP_REGION_FLAT( \ - PLAT_ARM_RMM_BASE, \ - PLAT_ARM_RMM_SIZE, \ - MT_MEMORY | MT_RW | MT_REALM) +#define ARM_MAP_RMM_DRAM MAP_REGION_FLAT( \ + PLAT_ARM_RMM_BASE, \ + PLAT_ARM_RMM_SIZE, \ + MT_MEMORY | MT_RW | MT_REALM) -#define ARM_MAP_GPT_L1_DRAM MAP_REGION_FLAT( \ - ARM_L1_GPT_ADDR_BASE, \ - ARM_L1_GPT_SIZE, \ - MT_MEMORY | MT_RW | EL3_PAS) +#define ARM_MAP_GPT_L1_DRAM MAP_REGION_FLAT( \ + ARM_L1_GPT_ADDR_BASE, \ + ARM_L1_GPT_SIZE, \ + MT_MEMORY | MT_RW | EL3_PAS) #endif /* ENABLE_RME */ diff --git a/include/plat/arm/common/arm_pas_def.h b/include/plat/arm/common/arm_pas_def.h index d268ce613..4fee41b3f 100644 --- a/include/plat/arm/common/arm_pas_def.h +++ b/include/plat/arm/common/arm_pas_def.h @@ -6,6 +6,7 @@ #ifndef ARM_PAS_DEF_H #define ARM_PAS_DEF_H +#include #include /***************************************************************************** @@ -42,12 +43,12 @@ * * - 4KB of L0 GPT reside in TSRAM, on top of the CONFIG section. * - ~1MB of L1 GPTs reside at the top of DRAM1 (TZC area). - * - The first 1GB region has GPI_ANY and, therefore, is not protected by + * - The first 1GB region has GPT_GPI_ANY and, therefore, is not protected by * the GPT. * - The DRAM TZC area is split into three regions: the L1 GPT region and - * 3MB of region below that are defined as GPI_ROOT, 32MB Realm region - * below that is defined as GPI_REALM and the rest of it is defined as - * GPI_SECURE. + * 3MB of region below that are defined as GPT_GPI_ROOT, 32MB Realm region + * below that is defined as GPT_GPI_REALM and the rest of it is defined as + * GPT_GPI_SECURE. */ /* TODO: This might not be the best way to map the PAS */ @@ -64,32 +65,30 @@ #define ARM_PAS_3_BASE (ARM_AP_TZC_DRAM1_BASE) #define ARM_PAS_3_SIZE (ARM_AP_TZC_DRAM1_SIZE) -#define ARM_PAS_GPI_ANY MAP_GPT_REGION(ARM_PAS_1_BASE, \ - ARM_PAS_1_SIZE, \ - GPI_ANY) -#define ARM_PAS_KERNEL MAP_GPT_REGION_TBL(ARM_PAS_2_BASE, \ - ARM_PAS_2_SIZE, \ - GPI_NS) +#define ARM_PAS_GPI_ANY MAP_GPT_REGION(ARM_PAS_1_BASE, \ + ARM_PAS_1_SIZE, \ + GPT_GPI_ANY) +#define ARM_PAS_KERNEL GPT_MAP_REGION_GRANULE(ARM_PAS_2_BASE, \ + ARM_PAS_2_SIZE, \ + GPT_GPI_NS) -#define ARM_PAS_TZC MAP_GPT_REGION_TBL(ARM_PAS_3_BASE, \ - ARM_PAS_3_SIZE, \ - GPI_SECURE) +#define ARM_PAS_SECURE GPT_MAP_REGION_GRANULE(ARM_PAS_3_BASE, \ + ARM_PAS_3_SIZE, \ + GPT_GPI_SECURE) -#define ARM_PAS_REALM MAP_GPT_REGION_TBL(ARM_REALM_BASE, \ - ARM_REALM_SIZE, \ - GPI_REALM) +#define ARM_PAS_REALM GPT_MAP_REGION_GRANULE(ARM_REALM_BASE, \ + ARM_REALM_SIZE, \ + GPT_GPI_REALM) -#define ARM_PAS_EL3_DRAM MAP_GPT_REGION_TBL(ARM_EL3_TZC_DRAM1_BASE, \ - ARM_EL3_TZC_DRAM1_SIZE, \ - GPI_ROOT) +#define ARM_PAS_EL3_DRAM GPT_MAP_REGION_GRANULE(ARM_EL3_TZC_DRAM1_BASE, \ + ARM_EL3_TZC_DRAM1_SIZE, \ + GPT_GPI_ROOT) -#define ARM_PAS_GPTS MAP_GPT_REGION_TBL(ARM_L1_GPT_ADDR_BASE, \ - ARM_L1_GPT_SIZE, \ - GPI_ROOT) +#define ARM_PAS_GPTS GPT_MAP_REGION_GRANULE(ARM_L1_GPT_ADDR_BASE, \ + ARM_L1_GPT_SIZE, \ + GPT_GPI_ROOT) /* GPT Configuration options */ -#define PLATFORM_PGS GPCCR_PGS_4K -#define PLATFORM_PPS GPCCR_PPS_4GB #define PLATFORM_L0GPTSZ GPCCR_L0GPTSZ_30BITS #endif /* ARM_PAS_DEF_H */ diff --git a/lib/gpt/gpt_core.c b/lib/gpt/gpt_core.c deleted file mode 100644 index 8a3afd2fa..000000000 --- a/lib/gpt/gpt_core.c +++ /dev/null @@ -1,767 +0,0 @@ -/* - * Copyright (c) 2021, Arm Limited. All rights reserved. - * - * SPDX-License-Identifier: BSD-3-Clause - */ - -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#if !ENABLE_RME -#error "ENABLE_RME must be enabled to use the GPT library." -#endif - -typedef struct { - uintptr_t plat_gpt_l0_base; - uintptr_t plat_gpt_l1_base; - size_t plat_gpt_l0_size; - size_t plat_gpt_l1_size; - unsigned int plat_gpt_pps; - unsigned int plat_gpt_pgs; - unsigned int plat_gpt_l0gptsz; -} gpt_config_t; - -gpt_config_t gpt_config; - -#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) -/* Helper function that cleans the data cache only if it is enabled. */ -static inline - void gpt_clean_dcache_range(uintptr_t addr, size_t size) -{ - if ((read_sctlr_el3() & SCTLR_C_BIT) != 0U) { - clean_dcache_range(addr, size); - } -} - -/* Helper function that invalidates the data cache only if it is enabled. */ -static inline - void gpt_inv_dcache_range(uintptr_t addr, size_t size) -{ - if ((read_sctlr_el3() & SCTLR_C_BIT) != 0U) { - inv_dcache_range(addr, size); - } -} -#endif - -typedef struct l1_gpt_attr_desc { - size_t t_sz; /** Table size */ - size_t g_sz; /** Granularity size */ - unsigned int p_val; /** Associated P value */ -} l1_gpt_attr_desc_t; - -/* - * Lookup table to find out the size in bytes of the L1 tables as well - * as the index mask, given the Width of Physical Granule Size (PGS). - * L1 tables are indexed by PA[29:p+4], being 'p' the width in bits of the - * aforementioned Physical Granule Size. - */ -static const l1_gpt_attr_desc_t l1_gpt_attr_lookup[] = { - [GPCCR_PGS_4K] = {U(1) << U(17), /* 16384B x 64bit entry = 128KB */ - PAGE_SIZE_4KB, /* 4KB Granularity */ - U(12)}, - [GPCCR_PGS_64K] = {U(1) << U(13), /* Table size = 8KB */ - PAGE_SIZE_64KB, /* 64KB Granularity */ - U(16)}, - [GPCCR_PGS_16K] = {U(1) << U(15), /* Table size = 32KB */ - PAGE_SIZE_16KB, /* 16KB Granularity */ - U(14)} -}; - -typedef struct l0_gpt_attr_desc { - size_t sz; - unsigned int t_val_mask; -} l0_gpt_attr_desc_t; - -/* - * Lookup table to find out the size in bytes of the L0 table as well - * as the index mask, given the Protected Physical Address Size (PPS). - * L0 table is indexed by PA[t-1:30], being 't' the size in bits - * of the aforementioned Protected Physical Address Size. - */ -static const l0_gpt_attr_desc_t l0_gpt_attr_lookup[] = { - - [GPCCR_PPS_4GB] = {U(1) << U(5), /* 4 x 64 bit entry = 32 bytes */ - 0x3}, /* Bits[31:30] */ - - [GPCCR_PPS_64GB] = {U(1) << U(9), /* 512 bytes */ - 0x3f}, /* Bits[35:30] */ - - [GPCCR_PPS_1TB] = {U(1) << U(13), /* 8KB */ - 0x3ff}, /* Bits[39:30] */ - - [GPCCR_PPS_4TB] = {U(1) << U(15), /* 32KB */ - 0xfff}, /* Bits[41:30] */ - - [GPCCR_PPS_16TB] = {U(1) << U(17), /* 128KB */ - 0x3fff}, /* Bits[43:30] */ - - [GPCCR_PPS_256TB] = {U(1) << U(21), /* 2MB */ - 0x3ffff}, /* Bits[47:30] */ - - [GPCCR_PPS_4PB] = {U(1) << U(25), /* 32MB */ - 0x3fffff}, /* Bits[51:30] */ - -}; - -static unsigned int get_l1_gpt_index(unsigned int pgs, uintptr_t pa) -{ - unsigned int l1_gpt_arr_idx; - - /* - * Mask top 2 bits to obtain the 30 bits required to - * generate the L1 GPT index - */ - l1_gpt_arr_idx = (unsigned int)(pa & L1_GPT_INDEX_MASK); - - /* Shift by 'p' value + 4 to obtain the index */ - l1_gpt_arr_idx >>= (l1_gpt_attr_lookup[pgs].p_val + 4); - - return l1_gpt_arr_idx; -} - -unsigned int plat_is_my_cpu_primary(void); - -/* The granule partition tables can only be configured on BL2 */ -#ifdef IMAGE_BL2 - -/* Global to keep track of next available index in array of L1 GPTs */ -static unsigned int l1_gpt_mem_avlbl_index; - -static int validate_l0_gpt_params(gpt_init_params_t *params) -{ - /* Only 1GB of address space per L0 entry is allowed */ - if (params->l0gptsz != GPCCR_L0GPTSZ_30BITS) { - WARN("Invalid L0GPTSZ %u.\n", params->l0gptsz); - } - - /* Only 4K granule is supported for now */ - if (params->pgs != GPCCR_PGS_4K) { - WARN("Invalid GPT PGS %u.\n", params->pgs); - return -EINVAL; - } - - /* Only 4GB of protected physical address space is supported for now */ - if (params->pps != GPCCR_PPS_4GB) { - WARN("Invalid GPT PPS %u.\n", params->pps); - return -EINVAL; - } - - /* Check if GPT base address is aligned with the system granule */ - if (!IS_PAGE_ALIGNED(params->l0_mem_base)) { - ERROR("Unaligned L0 GPT base address.\n"); - return -EFAULT; - } - - /* Check if there is enough memory for L0 GPTs */ - if (params->l0_mem_size < l0_gpt_attr_lookup[params->pps].sz) { - ERROR("Inadequate memory for L0 GPTs. "); - ERROR("Expected 0x%lx bytes. Got 0x%lx bytes\n", - l0_gpt_attr_lookup[params->pps].sz, - params->l0_mem_size); - return -ENOMEM; - } - - return 0; -} - -/* - * A L1 GPT is required if any one of the following conditions is true: - * - * - The base address is not 1GB aligned - * - The size of the memory region is not a multiple of 1GB - * - A L1 GPT has been explicitly requested (attrs == PAS_REG_DESC_TYPE_TBL) - * - * This function: - * - iterates over all the PAS regions to determine whether they - * will need a 2 stage look up (and therefore a L1 GPT will be required) or - * if it would be enough with a single level lookup table. - * - Updates the attr field of the PAS regions. - * - Returns the total count of L1 tables needed. - * - * In the future wwe should validate that the PAS range does not exceed the - * configured PPS. (and maybe rename this function as it is validating PAS - * regions). - */ -static unsigned int update_gpt_type(pas_region_t *pas_regions, - unsigned int pas_region_cnt) -{ - unsigned int idx, cnt = 0U; - - for (idx = 0U; idx < pas_region_cnt; idx++) { - if (PAS_REG_DESC_TYPE(pas_regions[idx].attrs) == - PAS_REG_DESC_TYPE_TBL) { - cnt++; - continue; - } - if (!(IS_1GB_ALIGNED(pas_regions[idx].base_pa) && - IS_1GB_ALIGNED(pas_regions[idx].size))) { - - /* Current region will need L1 GPTs. */ - assert(PAS_REG_DESC_TYPE(pas_regions[idx].attrs) - == PAS_REG_DESC_TYPE_ANY); - - pas_regions[idx].attrs = - GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_TBL, - PAS_REG_GPI(pas_regions[idx].attrs)); - cnt++; - continue; - } - - /* The PAS can be mapped on a one stage lookup table */ - assert(PAS_REG_DESC_TYPE(pas_regions[idx].attrs) != - PAS_REG_DESC_TYPE_TBL); - - pas_regions[idx].attrs = GPT_DESC_ATTRS(PAS_REG_DESC_TYPE_BLK, - PAS_REG_GPI(pas_regions[idx].attrs)); - } - - return cnt; -} - -static int validate_l1_gpt_params(gpt_init_params_t *params, - unsigned int l1_gpt_cnt) -{ - size_t l1_gpt_sz, l1_gpt_mem_sz; - - /* Check if the granularity is supported */ - assert(xlat_arch_is_granule_size_supported( - l1_gpt_attr_lookup[params->pgs].g_sz)); - - - /* Check if naturally aligned L1 GPTs can be created */ - l1_gpt_sz = l1_gpt_attr_lookup[params->pgs].g_sz; - if (params->l1_mem_base & (l1_gpt_sz - 1)) { - WARN("Unaligned L1 GPT base address.\n"); - return -EFAULT; - } - - /* Check if there is enough memory for L1 GPTs */ - l1_gpt_mem_sz = l1_gpt_cnt * l1_gpt_sz; - if (params->l1_mem_size < l1_gpt_mem_sz) { - WARN("Inadequate memory for L1 GPTs. "); - WARN("Expected 0x%lx bytes. Got 0x%lx bytes\n", - l1_gpt_mem_sz, params->l1_mem_size); - return -ENOMEM; - } - - INFO("Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz); - return 0; -} - -/* - * Helper function to determine if the end physical address lies in the same GB - * as the current physical address. If true, the end physical address is - * returned else, the start address of the next GB is returned. - */ -static uintptr_t get_l1_gpt_end_pa(uintptr_t cur_pa, uintptr_t end_pa) -{ - uintptr_t cur_gb, end_gb; - - cur_gb = cur_pa >> ONE_GB_SHIFT; - end_gb = end_pa >> ONE_GB_SHIFT; - - assert(cur_gb <= end_gb); - - if (cur_gb == end_gb) { - return end_pa; - } - - return (cur_gb + 1) << ONE_GB_SHIFT; -} - -static void generate_l0_blk_desc(gpt_init_params_t *params, - unsigned int idx) -{ - uint64_t gpt_desc; - uintptr_t end_addr; - unsigned int end_idx, start_idx; - pas_region_t *pas = params->pas_regions + idx; - uint64_t *l0_gpt_arr = (uint64_t *)params->l0_mem_base; - - /* Create the GPT Block descriptor for this PAS region */ - gpt_desc = GPT_BLK_DESC; - gpt_desc |= PAS_REG_GPI(pas->attrs) - << GPT_BLOCK_DESC_GPI_VAL_SHIFT; - - /* Start index of this region in L0 GPTs */ - start_idx = pas->base_pa >> ONE_GB_SHIFT; - - /* - * Determine number of L0 GPT descriptors covered by - * this PAS region and use the count to populate these - * descriptors. - */ - end_addr = pas->base_pa + pas->size; - assert(end_addr \ - <= (ULL(l0_gpt_attr_lookup[params->pps].t_val_mask + 1)) << 30); - end_idx = end_addr >> ONE_GB_SHIFT; - - for (; start_idx < end_idx; start_idx++) { - l0_gpt_arr[start_idx] = gpt_desc; - INFO("L0 entry (BLOCK) index %u [%p]: GPI = 0x%llx (0x%llx)\n", - start_idx, &l0_gpt_arr[start_idx], - (gpt_desc >> GPT_BLOCK_DESC_GPI_VAL_SHIFT) & - GPT_L1_INDEX_MASK, l0_gpt_arr[start_idx]); - } -} - -static void generate_l0_tbl_desc(gpt_init_params_t *params, - unsigned int idx) -{ - uint64_t gpt_desc = 0U, *l1_gpt_arr; - uintptr_t start_pa, end_pa, cur_pa, next_pa; - unsigned int start_idx, l1_gpt_idx; - unsigned int p_val, gran_sz; - pas_region_t *pas = params->pas_regions + idx; - uint64_t *l0_gpt_base = (uint64_t *)params->l0_mem_base; - uint64_t *l1_gpt_base = (uint64_t *)params->l1_mem_base; - - start_pa = pas->base_pa; - end_pa = start_pa + pas->size; - p_val = l1_gpt_attr_lookup[params->pgs].p_val; - gran_sz = 1 << p_val; - - /* - * end_pa cannot be larger than the maximum protected physical memory. - */ - assert(((1ULL<<30) << l0_gpt_attr_lookup[params->pps].t_val_mask) - > end_pa); - - for (cur_pa = start_pa; cur_pa < end_pa;) { - /* - * Determine the PA range that will be covered - * in this loop iteration. - */ - next_pa = get_l1_gpt_end_pa(cur_pa, end_pa); - - INFO("PAS[%u]: start: 0x%lx, end: 0x%lx, next_pa: 0x%lx.\n", - idx, cur_pa, end_pa, next_pa); - - /* Index of this PA in L0 GPTs */ - start_idx = cur_pa >> ONE_GB_SHIFT; - - /* - * If cur_pa is on a 1GB boundary then determine - * the base address of next available L1 GPT - * memory region - */ - if (IS_1GB_ALIGNED(cur_pa)) { - l1_gpt_arr = (uint64_t *)((uint64_t)l1_gpt_base + - (l1_gpt_attr_lookup[params->pgs].t_sz * - l1_gpt_mem_avlbl_index)); - - assert(l1_gpt_arr < - (l1_gpt_base + params->l1_mem_size)); - - /* Create the L0 GPT descriptor for this PAS region */ - gpt_desc = GPT_TBL_DESC | - ((uintptr_t)l1_gpt_arr - & GPT_TBL_DESC_ADDR_MASK); - - l0_gpt_base[start_idx] = gpt_desc; - - /* - * Update index to point to next available L1 - * GPT memory region - */ - l1_gpt_mem_avlbl_index++; - } else { - /* Use the existing L1 GPT */ - l1_gpt_arr = (uint64_t *)(l0_gpt_base[start_idx] - & ~((1U<<12) - 1U)); - } - - INFO("L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%llx)\n", - start_idx, &l0_gpt_base[start_idx], - (unsigned long long)(l1_gpt_arr), - l0_gpt_base[start_idx]); - - /* - * Fill up L1 GPT entries between these two - * addresses. - */ - for (; cur_pa < next_pa; cur_pa += gran_sz) { - unsigned int gpi_idx, gpi_idx_shift; - - /* Obtain index of L1 GPT entry */ - l1_gpt_idx = get_l1_gpt_index(params->pgs, cur_pa); - - /* - * Obtain index of GPI in L1 GPT entry - * (i = PA[p_val+3:p_val]) - */ - gpi_idx = (cur_pa >> p_val) & GPT_L1_INDEX_MASK; - - /* - * Shift by index * 4 to reach correct - * GPI entry in L1 GPT descriptor. - * GPI = gpt_desc[(4*idx)+3:(4*idx)] - */ - gpi_idx_shift = gpi_idx << 2; - - gpt_desc = l1_gpt_arr[l1_gpt_idx]; - - /* Clear existing GPI encoding */ - gpt_desc &= ~(GPT_L1_INDEX_MASK << gpi_idx_shift); - - /* Set the GPI encoding */ - gpt_desc |= ((uint64_t)PAS_REG_GPI(pas->attrs) - << gpi_idx_shift); - - l1_gpt_arr[l1_gpt_idx] = gpt_desc; - - if (gpi_idx == 15U) { - VERBOSE("\tEntry %u [%p] = 0x%llx\n", - l1_gpt_idx, - &l1_gpt_arr[l1_gpt_idx], gpt_desc); - } - } - } -} - -static void create_gpt(gpt_init_params_t *params) -{ - unsigned int idx; - pas_region_t *pas_regions = params->pas_regions; - - INFO("pgs = 0x%x, pps = 0x%x, l0gptsz = 0x%x\n", - params->pgs, params->pps, params->l0gptsz); - INFO("pas_region_cnt = 0x%x L1 base = 0x%lx, L1 sz = 0x%lx\n", - params->pas_count, params->l1_mem_base, params->l1_mem_size); - -#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) - gpt_inv_dcache_range(params->l0_mem_base, params->l0_mem_size); - gpt_inv_dcache_range(params->l1_mem_base, params->l1_mem_size); -#endif - - for (idx = 0U; idx < params->pas_count; idx++) { - - INFO("PAS[%u]: base 0x%llx, sz 0x%lx, GPI 0x%x, type 0x%x\n", - idx, pas_regions[idx].base_pa, pas_regions[idx].size, - PAS_REG_GPI(pas_regions[idx].attrs), - PAS_REG_DESC_TYPE(pas_regions[idx].attrs)); - - /* Check if a block or table descriptor is required */ - if (PAS_REG_DESC_TYPE(pas_regions[idx].attrs) == - PAS_REG_DESC_TYPE_BLK) { - generate_l0_blk_desc(params, idx); - - } else { - generate_l0_tbl_desc(params, idx); - } - } - -#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) - gpt_clean_dcache_range(params->l0_mem_base, params->l0_mem_size); - gpt_clean_dcache_range(params->l1_mem_base, params->l1_mem_size); -#endif - - /* Make sure that all the entries are written to the memory. */ - dsbishst(); -} - -#endif /* IMAGE_BL2 */ - -int gpt_init(gpt_init_params_t *params) -{ -#ifdef IMAGE_BL2 - unsigned int l1_gpt_cnt; - int ret; -#endif - /* Validate arguments */ - assert(params != NULL); - assert(params->pgs <= GPCCR_PGS_16K); - assert(params->pps <= GPCCR_PPS_4PB); - assert(params->l0_mem_base != (uintptr_t)0); - assert(params->l0_mem_size > 0U); - assert(params->l1_mem_base != (uintptr_t)0); - assert(params->l1_mem_size > 0U); - -#ifdef IMAGE_BL2 - /* - * The Granule Protection Tables are initialised only in BL2. - * BL31 is not allowed to initialise them again in case - * these are modified by any other image loaded by BL2. - */ - assert(params->pas_regions != NULL); - assert(params->pas_count > 0U); - - ret = validate_l0_gpt_params(params); - if (ret < 0) { - - return ret; - } - - /* Check if L1 GPTs are required and how many. */ - l1_gpt_cnt = update_gpt_type(params->pas_regions, - params->pas_count); - INFO("%u L1 GPTs requested.\n", l1_gpt_cnt); - - if (l1_gpt_cnt > 0U) { - ret = validate_l1_gpt_params(params, l1_gpt_cnt); - if (ret < 0) { - return ret; - } - } - - create_gpt(params); -#else - /* If running in BL31, only primary CPU can initialise GPTs */ - assert(plat_is_my_cpu_primary() == 1U); - - /* - * If the primary CPU is calling this function from BL31 - * we expect that the tables are aready initialised from - * BL2 and GPCCR_EL3 is already configured with - * Granule Protection Check Enable bit set. - */ - assert((read_gpccr_el3() & GPCCR_GPC_BIT) != 0U); -#endif /* IMAGE_BL2 */ - -#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) - gpt_inv_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config)); -#endif - gpt_config.plat_gpt_l0_base = params->l0_mem_base; - gpt_config.plat_gpt_l1_base = params->l1_mem_base; - gpt_config.plat_gpt_l0_size = params->l0_mem_size; - gpt_config.plat_gpt_l1_size = params->l1_mem_size; - - /* Backup the parameters used to configure GPCCR_EL3 on every PE. */ - gpt_config.plat_gpt_pgs = params->pgs; - gpt_config.plat_gpt_pps = params->pps; - gpt_config.plat_gpt_l0gptsz = params->l0gptsz; - -#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) - gpt_clean_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config)); -#endif - - return 0; -} - -void gpt_enable(void) -{ - u_register_t gpccr_el3; - - /* Invalidate any stale TLB entries */ - tlbipaallos(); - -#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) - gpt_inv_dcache_range((uintptr_t)&gpt_config, sizeof(gpt_config)); -#endif - -#ifdef IMAGE_BL2 - /* - * Granule tables must be initialised before enabling - * granule protection. - */ - assert(gpt_config.plat_gpt_l0_base != (uintptr_t)NULL); -#endif - write_gptbr_el3(gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT); - - /* GPCCR_EL3.L0GPTSZ */ - gpccr_el3 = SET_GPCCR_L0GPTSZ(gpt_config.plat_gpt_l0gptsz); - - /* GPCCR_EL3.PPS */ - gpccr_el3 |= SET_GPCCR_PPS(gpt_config.plat_gpt_pps); - - /* GPCCR_EL3.PGS */ - gpccr_el3 |= SET_GPCCR_PGS(gpt_config.plat_gpt_pgs); - - /* Set shareability attribute to Outher Shareable */ - gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_OS); - - /* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */ - gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA); - gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA); - - /* Enable GPT */ - gpccr_el3 |= GPCCR_GPC_BIT; - - write_gpccr_el3(gpccr_el3); - dsbsy(); - - VERBOSE("Granule Protection Checks enabled\n"); -} - -void gpt_disable(void) -{ - u_register_t gpccr_el3 = read_gpccr_el3(); - - write_gpccr_el3(gpccr_el3 &= ~GPCCR_GPC_BIT); - dsbsy(); -} - -#ifdef IMAGE_BL31 - -/* - * Each L1 descriptor is protected by 1 spinlock. The number of descriptors is - * equal to the size of the total protected memory area divided by the size of - * protected memory area covered by each descriptor. - * - * The size of memory covered by each descriptor is the 'size of the granule' x - * 'number of granules' in a descriptor. The former is PLAT_ARM_GPT_PGS and - * latter is always 16. - */ -static spinlock_t gpt_lock; - -static unsigned int get_l0_gpt_index(unsigned int pps, uint64_t pa) -{ - unsigned int idx; - - /* Get the index into the L0 table */ - idx = pa >> ONE_GB_SHIFT; - - /* Check if the pa lies within the PPS */ - if (idx & ~(l0_gpt_attr_lookup[pps].t_val_mask)) { - WARN("Invalid address 0x%llx.\n", pa); - return -EINVAL; - } - - return idx; -} - -int gpt_transition_pas(uint64_t pa, - unsigned int src_sec_state, - unsigned int target_pas) -{ - int idx; - unsigned int idx_shift; - unsigned int gpi; - uint64_t gpt_l1_desc; - uint64_t *gpt_l1_addr, *gpt_addr; - - /* - * Check if caller is allowed to transition the granule's PAS. - * - * - Secure world caller can only request S <-> NS transitions on a - * granule that is already in either S or NS PAS. - * - * - Realm world caller can only request R <-> NS transitions on a - * granule that is already in either R or NS PAS. - */ - if (src_sec_state == SMC_FROM_REALM) { - if ((target_pas != GPI_REALM) && (target_pas != GPI_NS)) { - WARN("Invalid caller (%s) and PAS (%d) combination.\n", - "realm world", target_pas); - return -EINVAL; - } - } else if (src_sec_state == SMC_FROM_SECURE) { - if ((target_pas != GPI_SECURE) && (target_pas != GPI_NS)) { - WARN("Invalid caller (%s) and PAS (%d) combination.\n", - "secure world", target_pas); - return -EINVAL; - } - } else { - WARN("Invalid caller security state 0x%x\n", src_sec_state); - return -EINVAL; - } - - /* Obtain the L0 GPT address. */ - gpt_addr = (uint64_t *)gpt_config.plat_gpt_l0_base; - - /* Validate physical address and obtain index into L0 GPT table */ - idx = get_l0_gpt_index(gpt_config.plat_gpt_pps, pa); - if (idx < 0U) { - return idx; - } - - VERBOSE("PA 0x%llx, L0 base addr 0x%llx, L0 index %u\n", - pa, (uint64_t)gpt_addr, idx); - - /* Obtain the L0 descriptor */ - gpt_l1_desc = gpt_addr[idx]; - - /* - * Check if it is a table descriptor. Granule transition only applies to - * memory ranges for which L1 tables were created at boot time. So there - * is no possibility of splitting and coalescing tables. - */ - if ((gpt_l1_desc & GPT_L1_INDEX_MASK) != GPT_TBL_DESC) { - WARN("Invalid address 0x%llx.\n", pa); - return -EPERM; - } - - /* Obtain the L1 table address from L0 descriptor. */ - gpt_l1_addr = (uint64_t *)(gpt_l1_desc & ~(0xFFF)); - - /* Obtain the index into the L1 table */ - idx = get_l1_gpt_index(gpt_config.plat_gpt_pgs, pa); - - VERBOSE("L1 table base addr 0x%llx, L1 table index %u\n", (uint64_t)gpt_l1_addr, idx); - - /* Lock access to the granule */ - spin_lock(&gpt_lock); - - /* Obtain the L1 descriptor */ - gpt_l1_desc = gpt_l1_addr[idx]; - - /* Obtain the shift for GPI in L1 GPT entry */ - idx_shift = (pa >> 12) & GPT_L1_INDEX_MASK; - idx_shift <<= 2; - - /* Obtain the current GPI encoding for this PA */ - gpi = (gpt_l1_desc >> idx_shift) & GPT_L1_INDEX_MASK; - - if (src_sec_state == SMC_FROM_REALM) { - /* - * Realm world is only allowed to transition a NS or Realm world - * granule. - */ - if ((gpi != GPI_REALM) && (gpi != GPI_NS)) { - WARN("Invalid transition request from %s.\n", - "realm world"); - spin_unlock(&gpt_lock); - return -EPERM; - } - } else if (src_sec_state == SMC_FROM_SECURE) { - /* - * Secure world is only allowed to transition a NS or Secure world - * granule. - */ - if ((gpi != GPI_SECURE) && (gpi != GPI_NS)) { - WARN("Invalid transition request from %s.\n", - "secure world"); - spin_unlock(&gpt_lock); - return -EPERM; - } - } - /* We don't need an else here since we already handle that above. */ - - VERBOSE("L1 table desc 0x%llx before mod \n", gpt_l1_desc); - - /* Clear existing GPI encoding */ - gpt_l1_desc &= ~(GPT_L1_INDEX_MASK << idx_shift); - - /* Transition the granule to the new PAS */ - gpt_l1_desc |= ((uint64_t)target_pas << idx_shift); - - /* Update the L1 GPT entry */ - gpt_l1_addr[idx] = gpt_l1_desc; - - VERBOSE("L1 table desc 0x%llx after mod \n", gpt_l1_desc); - - /* Make sure change is propagated to other CPUs. */ -#if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY) - gpt_clean_dcache_range((uintptr_t)&gpt_addr[idx], sizeof(uint64_t)); -#endif - - gpt_tlbi_by_pa(pa, PAGE_SIZE_4KB); - - /* Make sure that all the entries are written to the memory. */ - dsbishst(); - - /* Unlock access to the granule */ - spin_unlock(&gpt_lock); - - return 0; -} - -#endif /* IMAGE_BL31 */ diff --git a/lib/gpt_rme/gpt_rme.c b/lib/gpt_rme/gpt_rme.c new file mode 100644 index 000000000..1f90e64cf --- /dev/null +++ b/lib/gpt_rme/gpt_rme.c @@ -0,0 +1,1112 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include +#include +#include +#include + +#include +#include +#include +#include "gpt_rme_private.h" +#include +#include +#include +#include + +#if !ENABLE_RME +#error "ENABLE_RME must be enabled to use the GPT library." +#endif + +/* + * Lookup T from PPS + * + * PPS Size T + * 0b000 4GB 32 + * 0b001 64GB 36 + * 0b010 1TB 40 + * 0b011 4TB 42 + * 0b100 16TB 44 + * 0b101 256TB 48 + * 0b110 4PB 52 + * + * See section 15.1.27 of the RME specification. + */ +static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T, + PPS_1TB_T, PPS_4TB_T, + PPS_16TB_T, PPS_256TB_T, + PPS_4PB_T}; + +/* + * Lookup P from PGS + * + * PGS Size P + * 0b00 4KB 12 + * 0b10 16KB 14 + * 0b01 64KB 16 + * + * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo. + * + * See section 15.1.27 of the RME specification. + */ +static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P}; + +/* + * This structure contains GPT configuration data. + */ +typedef struct { + uintptr_t plat_gpt_l0_base; + gpccr_pps_e pps; + gpt_t_val_e t; + gpccr_pgs_e pgs; + gpt_p_val_e p; +} gpt_config_t; + +static gpt_config_t gpt_config; + +/* These variables are used during initialization of the L1 tables. */ +static unsigned int gpt_next_l1_tbl_idx; +static uintptr_t gpt_l1_tbl; + +/* + * This function checks to see if a GPI value is valid. + * + * These are valid GPI values. + * GPT_GPI_NO_ACCESS U(0x0) + * GPT_GPI_SECURE U(0x8) + * GPT_GPI_NS U(0x9) + * GPT_GPI_ROOT U(0xA) + * GPT_GPI_REALM U(0xB) + * GPT_GPI_ANY U(0xF) + * + * Parameters + * gpi GPI to check for validity. + * + * Return + * true for a valid GPI, false for an invalid one. + */ +static bool gpt_is_gpi_valid(unsigned int gpi) +{ + if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) || + ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) { + return true; + } else { + return false; + } +} + +/* + * This function checks to see if two PAS regions overlap. + * + * Parameters + * base_1: base address of first PAS + * size_1: size of first PAS + * base_2: base address of second PAS + * size_2: size of second PAS + * + * Return + * True if PAS regions overlap, false if they do not. + */ +static bool gpt_check_pas_overlap(uintptr_t base_1, size_t size_1, + uintptr_t base_2, size_t size_2) +{ + if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) { + return true; + } else { + return false; + } +} + +/* + * This helper function checks to see if a PAS region from index 0 to + * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table. + * + * Parameters + * l0_idx: Index of the L0 entry to check + * pas_regions: PAS region array + * pas_idx: Upper bound of the PAS array index. + * + * Return + * True if a PAS region occupies the L0 region in question, false if not. + */ +static bool gpt_does_previous_pas_exist_here(unsigned int l0_idx, + pas_region_t *pas_regions, + unsigned int pas_idx) +{ + /* Iterate over PAS regions up to pas_idx. */ + for (unsigned int i = 0U; i < pas_idx; i++) { + if (gpt_check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx), + GPT_L0GPTSZ_ACTUAL_SIZE, + pas_regions[i].base_pa, pas_regions[i].size)) { + return true; + } + } + return false; +} + +/* + * This function iterates over all of the PAS regions and checks them to ensure + * proper alignment of base and size, that the GPI is valid, and that no regions + * overlap. As a part of the overlap checks, this function checks existing L0 + * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables + * is called multiple times to place L1 tables in different areas of memory. It + * also counts the number of L1 tables needed and returns it on success. + * + * Parameters + * *pas_regions Pointer to array of PAS region structures. + * pas_region_cnt Total number of PAS regions in the array. + * + * Return + * Negative Linux error code in the event of a failure, number of L1 regions + * required when successful. + */ +static int gpt_validate_pas_mappings(pas_region_t *pas_regions, + unsigned int pas_region_cnt) +{ + unsigned int idx; + unsigned int l1_cnt = 0U; + unsigned int pas_l1_cnt; + uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base; + + assert(pas_regions != NULL); + assert(pas_region_cnt != 0U); + + for (idx = 0U; idx < pas_region_cnt; idx++) { + /* Check for arithmetic overflow in region. */ + if ((ULONG_MAX - pas_regions[idx].base_pa) < + pas_regions[idx].size) { + ERROR("[GPT] Address overflow in PAS[%u]!\n", idx); + return -EOVERFLOW; + } + + /* Initial checks for PAS validity. */ + if (((pas_regions[idx].base_pa + pas_regions[idx].size) > + GPT_PPS_ACTUAL_SIZE(gpt_config.t)) || + !gpt_is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) { + ERROR("[GPT] PAS[%u] is invalid!\n", idx); + return -EFAULT; + } + + /* + * Make sure this PAS does not overlap with another one. We + * start from idx + 1 instead of 0 since prior PAS mappings will + * have already checked themselves against this one. + */ + for (unsigned int i = idx + 1; i < pas_region_cnt; i++) { + if (gpt_check_pas_overlap(pas_regions[idx].base_pa, + pas_regions[idx].size, + pas_regions[i].base_pa, + pas_regions[i].size)) { + ERROR("[GPT] PAS[%u] overlaps with PAS[%u]\n", + i, idx); + return -EFAULT; + } + } + + /* + * Since this function can be called multiple times with + * separate L1 tables we need to check the existing L0 mapping + * to see if this PAS would fall into one that has already been + * initialized. + */ + for (unsigned int i = GPT_L0_IDX(pas_regions[idx].base_pa); + i <= GPT_L0_IDX(pas_regions[idx].base_pa + pas_regions[idx].size - 1); + i++) { + if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) && + (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) { + /* This descriptor is unused so continue. */ + continue; + } + + /* + * This descriptor has been initialized in a previous + * call to this function so cannot be initialized again. + */ + ERROR("[GPT] PAS[%u] overlaps with previous L0[%d]!\n", + idx, i); + return -EFAULT; + } + + /* Check for block mapping (L0) type. */ + if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == + GPT_PAS_ATTR_MAP_TYPE_BLOCK) { + /* Make sure base and size are block-aligned. */ + if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) || + !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) { + ERROR("[GPT] PAS[%u] is not block-aligned!\n", + idx); + return -EFAULT; + } + + continue; + } + + /* Check for granule mapping (L1) type. */ + if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == + GPT_PAS_ATTR_MAP_TYPE_GRANULE) { + /* Make sure base and size are granule-aligned. */ + if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) || + !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) { + ERROR("[GPT] PAS[%u] is not granule-aligned!\n", + idx); + return -EFAULT; + } + + /* Find how many L1 tables this PAS occupies. */ + pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa + + pas_regions[idx].size - 1) - + GPT_L0_IDX(pas_regions[idx].base_pa) + 1); + + /* + * This creates a situation where, if multiple PAS + * regions occupy the same table descriptor, we can get + * an artificially high total L1 table count. The way we + * handle this is by checking each PAS against those + * before it in the array, and if they both occupy the + * same PAS we subtract from pas_l1_cnt and only the + * first PAS in the array gets to count it. + */ + + /* + * If L1 count is greater than 1 we know the start and + * end PAs are in different L0 regions so we must check + * both for overlap against other PAS. + */ + if (pas_l1_cnt > 1) { + if (gpt_does_previous_pas_exist_here( + GPT_L0_IDX(pas_regions[idx].base_pa + + pas_regions[idx].size - 1), + pas_regions, idx)) { + pas_l1_cnt = pas_l1_cnt - 1; + } + } + + if (gpt_does_previous_pas_exist_here( + GPT_L0_IDX(pas_regions[idx].base_pa), + pas_regions, idx)) { + pas_l1_cnt = pas_l1_cnt - 1; + } + + l1_cnt += pas_l1_cnt; + continue; + } + + /* If execution reaches this point, mapping type is invalid. */ + ERROR("[GPT] PAS[%u] has invalid mapping type 0x%x.\n", idx, + GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs)); + return -EINVAL; + } + + return l1_cnt; +} + +/* + * This function validates L0 initialization parameters. + * + * Parameters + * l0_mem_base Base address of memory used for L0 tables. + * l1_mem_size Size of memory available for L0 tables. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +static int gpt_validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base, + size_t l0_mem_size) +{ + size_t l0_alignment; + + /* + * Make sure PPS is valid and then store it since macros need this value + * to work. + */ + if (pps > GPT_PPS_MAX) { + ERROR("[GPT] Invalid PPS: 0x%x\n", pps); + return -EINVAL; + } + gpt_config.pps = pps; + gpt_config.t = gpt_t_lookup[pps]; + + /* Alignment must be the greater of 4k or l0 table size. */ + l0_alignment = PAGE_SIZE_4KB; + if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) { + l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t); + } + + /* Check base address. */ + if ((l0_mem_base == 0U) || ((l0_mem_base & (l0_alignment - 1)) != 0U)) { + ERROR("[GPT] Invalid L0 base address: 0x%lx\n", l0_mem_base); + return -EFAULT; + } + + /* Check size. */ + if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) { + ERROR("[GPT] Inadequate L0 memory: need 0x%lx, have 0x%lx)\n", + GPT_L0_TABLE_SIZE(gpt_config.t), + l0_mem_size); + return -ENOMEM; + } + + return 0; +} + +/* + * In the event that L1 tables are needed, this function validates + * the L1 table generation parameters. + * + * Parameters + * l1_mem_base Base address of memory used for L1 table allocation. + * l1_mem_size Total size of memory available for L1 tables. + * l1_gpt_cnt Number of L1 tables needed. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +static int gpt_validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size, + unsigned int l1_gpt_cnt) +{ + size_t l1_gpt_mem_sz; + + /* Check if the granularity is supported */ + if (!xlat_arch_is_granule_size_supported( + GPT_PGS_ACTUAL_SIZE(gpt_config.p))) { + return -EPERM; + } + + /* Make sure L1 tables are aligned to their size. */ + if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1)) != 0U) { + ERROR("[GPT] Unaligned L1 GPT base address: 0x%lx\n", + l1_mem_base); + return -EFAULT; + } + + /* Get total memory needed for L1 tables. */ + l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p); + + /* Check for overflow. */ + if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) { + ERROR("[GPT] Overflow calculating L1 memory size.\n"); + return -ENOMEM; + } + + /* Make sure enough space was supplied. */ + if (l1_mem_size < l1_gpt_mem_sz) { + ERROR("[GPT] Inadequate memory for L1 GPTs. "); + ERROR(" Expected 0x%lx bytes. Got 0x%lx bytes\n", + l1_gpt_mem_sz, l1_mem_size); + return -ENOMEM; + } + + VERBOSE("[GPT] Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz); + return 0; +} + +/* + * This function initializes L0 block descriptors (regions that cannot be + * transitioned at the granule level) according to the provided PAS. + * + * Parameters + * *pas Pointer to the structure defining the PAS region to + * initialize. + */ +static void gpt_generate_l0_blk_desc(pas_region_t *pas) +{ + uint64_t gpt_desc; + unsigned int end_idx; + unsigned int idx; + uint64_t *l0_gpt_arr; + + assert(gpt_config.plat_gpt_l0_base != 0U); + assert(pas != NULL); + + /* + * Checking of PAS parameters has already been done in + * gpt_validate_pas_mappings so no need to check the same things again. + */ + + l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base; + + /* Create the GPT Block descriptor for this PAS region */ + gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs)); + + /* Start index of this region in L0 GPTs */ + idx = pas->base_pa >> GPT_L0_IDX_SHIFT; + + /* + * Determine number of L0 GPT descriptors covered by + * this PAS region and use the count to populate these + * descriptors. + */ + end_idx = (pas->base_pa + pas->size) >> GPT_L0_IDX_SHIFT; + + /* Generate the needed block descriptors. */ + for (; idx < end_idx; idx++) { + l0_gpt_arr[idx] = gpt_desc; + VERBOSE("[GPT] L0 entry (BLOCK) index %u [%p]: GPI = 0x%llx (0x%llx)\n", + idx, &l0_gpt_arr[idx], + (gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) & + GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]); + } +} + +/* + * Helper function to determine if the end physical address lies in the same L0 + * region as the current physical address. If true, the end physical address is + * returned else, the start address of the next region is returned. + * + * Parameters + * cur_pa Physical address of the current PA in the loop through + * the range. + * end_pa Physical address of the end PA in a PAS range. + * + * Return + * The PA of the end of the current range. + */ +static uintptr_t gpt_get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa) +{ + uintptr_t cur_idx; + uintptr_t end_idx; + + cur_idx = cur_pa >> GPT_L0_IDX_SHIFT; + end_idx = end_pa >> GPT_L0_IDX_SHIFT; + + assert(cur_idx <= end_idx); + + if (cur_idx == end_idx) { + return end_pa; + } + + return (cur_idx + 1U) << GPT_L0_IDX_SHIFT; +} + +/* + * Helper function to fill out GPI entries in a single L1 table. This function + * fills out entire L1 descriptors at a time to save memory writes. + * + * Parameters + * gpi GPI to set this range to + * l1 Pointer to L1 table to fill out + * first Address of first granule in range. + * last Address of last granule in range (inclusive). + */ +static void gpt_fill_l1_tbl(uint64_t gpi, uint64_t *l1, uintptr_t first, + uintptr_t last) +{ + uint64_t gpi_field = GPT_BUILD_L1_DESC(gpi); + uint64_t gpi_mask = 0xFFFFFFFFFFFFFFFF; + + assert(first <= last); + assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U); + assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U); + assert(GPT_L0_IDX(first) == GPT_L0_IDX(last)); + assert(l1 != NULL); + + /* Shift the mask if we're starting in the middle of an L1 entry. */ + gpi_mask = gpi_mask << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2); + + /* Fill out each L1 entry for this region. */ + for (unsigned int i = GPT_L1_IDX(gpt_config.p, first); + i <= GPT_L1_IDX(gpt_config.p, last); i++) { + /* Account for stopping in the middle of an L1 entry. */ + if (i == GPT_L1_IDX(gpt_config.p, last)) { + gpi_mask &= (gpi_mask >> ((15 - + GPT_L1_GPI_IDX(gpt_config.p, last)) << 2)); + } + + /* Write GPI values. */ + assert((l1[i] & gpi_mask) == + (GPT_BUILD_L1_DESC(GPT_GPI_ANY) & gpi_mask)); + l1[i] = (l1[i] & ~gpi_mask) | (gpi_mask & gpi_field); + + /* Reset mask. */ + gpi_mask = 0xFFFFFFFFFFFFFFFF; + } +} + +/* + * This function finds the next available unused L1 table and initializes all + * granules descriptor entries to GPI_ANY. This ensures that there are no chunks + * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the + * event that a PAS region stops midway through an L1 table, thus guaranteeing + * that all memory not explicitly assigned is GPI_ANY. This function does not + * check for overflow conditions, that should be done by the caller. + * + * Return + * Pointer to the next available L1 table. + */ +static uint64_t *gpt_get_new_l1_tbl(void) +{ + /* Retrieve the next L1 table. */ + uint64_t *l1 = (uint64_t *)((uint64_t)(gpt_l1_tbl) + + (GPT_L1_TABLE_SIZE(gpt_config.p) * + gpt_next_l1_tbl_idx)); + + /* Increment L1 counter. */ + gpt_next_l1_tbl_idx++; + + /* Initialize all GPIs to GPT_GPI_ANY */ + for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) { + l1[i] = GPT_BUILD_L1_DESC(GPT_GPI_ANY); + } + + return l1; +} + +/* + * When L1 tables are needed, this function creates the necessary L0 table + * descriptors and fills out the L1 table entries according to the supplied + * PAS range. + * + * Parameters + * *pas Pointer to the structure defining the PAS region. + */ +static void gpt_generate_l0_tbl_desc(pas_region_t *pas) +{ + uintptr_t end_pa; + uintptr_t cur_pa; + uintptr_t last_gran_pa; + uint64_t *l0_gpt_base; + uint64_t *l1_gpt_arr; + unsigned int l0_idx; + + assert(gpt_config.plat_gpt_l0_base != 0U); + assert(pas != NULL); + + /* + * Checking of PAS parameters has already been done in + * gpt_validate_pas_mappings so no need to check the same things again. + */ + + end_pa = pas->base_pa + pas->size; + l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base; + + /* We start working from the granule at base PA */ + cur_pa = pas->base_pa; + + /* Iterate over each L0 region in this memory range. */ + for (l0_idx = GPT_L0_IDX(pas->base_pa); + l0_idx <= GPT_L0_IDX(end_pa - 1U); + l0_idx++) { + + /* + * See if the L0 entry is already a table descriptor or if we + * need to create one. + */ + if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) { + /* Get the L1 array from the L0 entry. */ + l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]); + } else { + /* Get a new L1 table from the L1 memory space. */ + l1_gpt_arr = gpt_get_new_l1_tbl(); + + /* Fill out the L0 descriptor and flush it. */ + l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr); + } + + VERBOSE("[GPT] L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%llx)\n", + l0_idx, &l0_gpt_base[l0_idx], + (unsigned long long)(l1_gpt_arr), + l0_gpt_base[l0_idx]); + + /* + * Determine the PA of the last granule in this L0 descriptor. + */ + last_gran_pa = gpt_get_l1_end_pa(cur_pa, end_pa) - + GPT_PGS_ACTUAL_SIZE(gpt_config.p); + + /* + * Fill up L1 GPT entries between these two addresses. This + * function needs the addresses of the first granule and last + * granule in the range. + */ + gpt_fill_l1_tbl(GPT_PAS_ATTR_GPI(pas->attrs), l1_gpt_arr, + cur_pa, last_gran_pa); + + /* Advance cur_pa to first granule in next L0 region. */ + cur_pa = gpt_get_l1_end_pa(cur_pa, end_pa); + } +} + +/* + * This function flushes a range of L0 descriptors used by a given PAS region + * array. There is a chance that some unmodified L0 descriptors would be flushed + * in the case that there are "holes" in an array of PAS regions but overall + * this should be faster than individually flushing each modified L0 descriptor + * as they are created. + * + * Parameters + * *pas Pointer to an array of PAS regions. + * pas_count Number of entries in the PAS array. + */ +static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count) +{ + unsigned int idx; + unsigned int start_idx; + unsigned int end_idx; + uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base; + + assert(pas != NULL); + assert(pas_count > 0); + + /* Initial start and end values. */ + start_idx = GPT_L0_IDX(pas[0].base_pa); + end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1); + + /* Find lowest and highest L0 indices used in this PAS array. */ + for (idx = 1; idx < pas_count; idx++) { + if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) { + start_idx = GPT_L0_IDX(pas[idx].base_pa); + } + if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1) > end_idx) { + end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1); + } + } + + /* + * Flush all covered L0 descriptors, add 1 because we need to include + * the end index value. + */ + flush_dcache_range((uintptr_t)&l0[start_idx], + ((end_idx + 1) - start_idx) * sizeof(uint64_t)); +} + +/* + * Public API to enable granule protection checks once the tables have all been + * initialized. This function is called at first initialization and then again + * later during warm boots of CPU cores. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_enable(void) +{ + u_register_t gpccr_el3; + + /* + * Granule tables must be initialised before enabling + * granule protection. + */ + if (gpt_config.plat_gpt_l0_base == 0U) { + ERROR("[GPT] Tables have not been initialized!\n"); + return -EPERM; + } + + /* Invalidate any stale TLB entries */ + tlbipaallos(); + dsb(); + + /* Write the base address of the L0 tables into GPTBR */ + write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT) + >> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK); + + /* GPCCR_EL3.PPS */ + gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps); + + /* GPCCR_EL3.PGS */ + gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs); + + /* Set shareability attribute to Outher Shareable */ + gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_OS); + + /* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */ + gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA); + gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA); + + /* Enable GPT */ + gpccr_el3 |= GPCCR_GPC_BIT; + + /* TODO: Configure GPCCR_EL3_GPCP for Fault control. */ + write_gpccr_el3(gpccr_el3); + tlbipaallos(); + dsb(); + isb(); + + return 0; +} + +/* + * Public API to disable granule protection checks. + */ +void gpt_disable(void) +{ + u_register_t gpccr_el3 = read_gpccr_el3(); + + write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT); + dsbsy(); + isb(); +} + +/* + * Public API that initializes the entire protected space to GPT_GPI_ANY using + * the L0 tables (block descriptors). Ideally, this function is invoked prior + * to DDR discovery and initialization. The MMU must be initialized before + * calling this function. + * + * Parameters + * pps PPS value to use for table generation + * l0_mem_base Base address of L0 tables in memory. + * l0_mem_size Total size of memory available for L0 tables. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_init_l0_tables(unsigned int pps, uintptr_t l0_mem_base, + size_t l0_mem_size) +{ + int ret; + uint64_t gpt_desc; + + /* Ensure that MMU and caches are enabled. */ + assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U); + + /* Validate other parameters. */ + ret = gpt_validate_l0_params(pps, l0_mem_base, l0_mem_size); + if (ret < 0) { + return ret; + } + + /* Create the descriptor to initialize L0 entries with. */ + gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY); + + /* Iterate through all L0 entries */ + for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) { + ((uint64_t *)l0_mem_base)[i] = gpt_desc; + } + + /* Flush updated L0 tables to memory. */ + flush_dcache_range((uintptr_t)l0_mem_base, + (size_t)GPT_L0_TABLE_SIZE(gpt_config.t)); + + /* Stash the L0 base address once initial setup is complete. */ + gpt_config.plat_gpt_l0_base = l0_mem_base; + + return 0; +} + +/* + * Public API that carves out PAS regions from the L0 tables and builds any L1 + * tables that are needed. This function ideally is run after DDR discovery and + * initialization. The L0 tables must have already been initialized to GPI_ANY + * when this function is called. + * + * This function can be called multiple times with different L1 memory ranges + * and PAS regions if it is desirable to place L1 tables in different locations + * in memory. (ex: you have multiple DDR banks and want to place the L1 tables + * in the DDR bank that they control) + * + * Parameters + * pgs PGS value to use for table generation. + * l1_mem_base Base address of memory used for L1 tables. + * l1_mem_size Total size of memory available for L1 tables. + * *pas_regions Pointer to PAS regions structure array. + * pas_count Total number of PAS regions. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base, + size_t l1_mem_size, pas_region_t *pas_regions, + unsigned int pas_count) +{ + int ret; + int l1_gpt_cnt; + + /* Ensure that MMU and caches are enabled. */ + assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U); + + /* PGS is needed for gpt_validate_pas_mappings so check it now. */ + if (pgs > GPT_PGS_MAX) { + ERROR("[GPT] Invalid PGS: 0x%x\n", pgs); + return -EINVAL; + } + gpt_config.pgs = pgs; + gpt_config.p = gpt_p_lookup[pgs]; + + /* Make sure L0 tables have been initialized. */ + if (gpt_config.plat_gpt_l0_base == 0U) { + ERROR("[GPT] L0 tables must be initialized first!\n"); + return -EPERM; + } + + /* Check if L1 GPTs are required and how many. */ + l1_gpt_cnt = gpt_validate_pas_mappings(pas_regions, pas_count); + if (l1_gpt_cnt < 0) { + return l1_gpt_cnt; + } + + VERBOSE("[GPT] %u L1 GPTs requested.\n", l1_gpt_cnt); + + /* If L1 tables are needed then validate the L1 parameters. */ + if (l1_gpt_cnt > 0) { + ret = gpt_validate_l1_params(l1_mem_base, l1_mem_size, + l1_gpt_cnt); + if (ret < 0) { + return ret; + } + + /* Set up parameters for L1 table generation. */ + gpt_l1_tbl = l1_mem_base; + gpt_next_l1_tbl_idx = 0U; + } + + INFO("[GPT] Boot Configuration\n"); + INFO(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t); + INFO(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p); + INFO(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL); + INFO(" PAS count: 0x%x\n", pas_count); + INFO(" L0 base: 0x%lx\n", gpt_config.plat_gpt_l0_base); + + /* Generate the tables in memory. */ + for (unsigned int idx = 0U; idx < pas_count; idx++) { + INFO("[GPT] PAS[%u]: base 0x%lx, size 0x%lx, GPI 0x%x, type 0x%x\n", + idx, pas_regions[idx].base_pa, pas_regions[idx].size, + GPT_PAS_ATTR_GPI(pas_regions[idx].attrs), + GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs)); + + /* Check if a block or table descriptor is required */ + if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) == + GPT_PAS_ATTR_MAP_TYPE_BLOCK) { + gpt_generate_l0_blk_desc(&pas_regions[idx]); + + } else { + gpt_generate_l0_tbl_desc(&pas_regions[idx]); + } + } + + /* Flush modified L0 tables. */ + flush_l0_for_pas_array(pas_regions, pas_count); + + /* Flush L1 tables if needed. */ + if (l1_gpt_cnt > 0) { + flush_dcache_range(l1_mem_base, + GPT_L1_TABLE_SIZE(gpt_config.p) * + l1_gpt_cnt); + } + + /* Make sure that all the entries are written to the memory. */ + dsbishst(); + + return 0; +} + +/* + * Public API to initialize the runtime gpt_config structure based on the values + * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization + * typically happens in a bootloader stage prior to setting up the EL3 runtime + * environment for the granule transition service so this function detects the + * initialization from a previous stage. Granule protection checks must be + * enabled already or this function will return an error. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_runtime_init(void) +{ + u_register_t reg; + + /* Ensure that MMU and caches are enabled. */ + assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U); + + /* Ensure GPC are already enabled. */ + if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) { + ERROR("[GPT] Granule protection checks are not enabled!\n"); + return -EPERM; + } + + /* + * Read the L0 table address from GPTBR, we don't need the L1 base + * address since those are included in the L0 tables as needed. + */ + reg = read_gptbr_el3(); + gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) & + GPTBR_BADDR_MASK) << + GPTBR_BADDR_VAL_SHIFT; + + /* Read GPCCR to get PGS and PPS values. */ + reg = read_gpccr_el3(); + gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK; + gpt_config.t = gpt_t_lookup[gpt_config.pps]; + gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK; + gpt_config.p = gpt_p_lookup[gpt_config.pgs]; + + VERBOSE("[GPT] Runtime Configuration\n"); + VERBOSE(" PPS/T: 0x%x/%u\n", gpt_config.pps, gpt_config.t); + VERBOSE(" PGS/P: 0x%x/%u\n", gpt_config.pgs, gpt_config.p); + VERBOSE(" L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL); + VERBOSE(" L0 base: 0x%lx\n", gpt_config.plat_gpt_l0_base); + + return 0; +} + +/* + * The L1 descriptors are protected by a spinlock to ensure that multiple + * CPUs do not attempt to change the descriptors at once. In the future it + * would be better to have separate spinlocks for each L1 descriptor. + */ +static spinlock_t gpt_lock; + +/* + * Check if caller is allowed to transition a PAS. + * + * - Secure world caller can only request S <-> NS transitions on a + * granule that is already in either S or NS PAS. + * + * - Realm world caller can only request R <-> NS transitions on a + * granule that is already in either R or NS PAS. + * + * Parameters + * src_sec_state Security state of the caller. + * current_gpi Current GPI of the granule. + * target_gpi Requested new GPI for the granule. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +static int gpt_check_transition_gpi(unsigned int src_sec_state, + unsigned int current_gpi, + unsigned int target_gpi) +{ + unsigned int check_gpi; + + /* Cannot transition a granule to the state it is already in. */ + if (current_gpi == target_gpi) { + return -EINVAL; + } + + /* Check security state, only secure and realm can transition. */ + if (src_sec_state == SMC_FROM_REALM) { + check_gpi = GPT_GPI_REALM; + } else if (src_sec_state == SMC_FROM_SECURE) { + check_gpi = GPT_GPI_SECURE; + } else { + return -EINVAL; + } + + /* Make sure security state is allowed to make the transition. */ + if ((target_gpi != check_gpi) && (target_gpi != GPT_GPI_NS)) { + return -EINVAL; + } + if ((current_gpi != check_gpi) && (current_gpi != GPT_GPI_NS)) { + return -EINVAL; + } + + return 0; +} + +/* + * This function is the core of the granule transition service. When a granule + * transition request occurs it is routed to this function where the request is + * validated then fulfilled if possible. + * + * TODO: implement support for transitioning multiple granules at once. + * + * Parameters + * base Base address of the region to transition, must be + * aligned to granule size. + * size Size of region to transition, must be aligned to granule + * size. + * src_sec_state Security state of the caller. + * target_pas Target PAS of the specified memory region. + * + * Return + * Negative Linux error code in the event of a failure, 0 for success. + */ +int gpt_transition_pas(uint64_t base, size_t size, unsigned int src_sec_state, + unsigned int target_pas) +{ + int idx; + unsigned int gpi_shift; + unsigned int gpi; + uint64_t gpt_l0_desc; + uint64_t gpt_l1_desc; + uint64_t *gpt_l1_addr; + uint64_t *gpt_l0_base; + + /* Ensure that the tables have been set up before taking requests. */ + assert(gpt_config.plat_gpt_l0_base != 0U); + + /* Check for address range overflow. */ + if ((ULONG_MAX - base) < size) { + VERBOSE("[GPT] Transition request address overflow!\n"); + VERBOSE(" Base=0x%llx\n", base); + VERBOSE(" Size=0x%lx\n", size); + return -EINVAL; + } + + /* Make sure base and size are valid. */ + if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0U) || + ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0U) || + (size == 0U) || + ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) { + VERBOSE("[GPT] Invalid granule transition address range!\n"); + VERBOSE(" Base=0x%llx\n", base); + VERBOSE(" Size=0x%lx\n", size); + return -EINVAL; + } + + /* See if this is a single granule transition or a range of granules. */ + if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) { + /* + * TODO: Add support for transitioning multiple granules with a + * single call to this function. + */ + panic(); + } + + /* Get the L0 descriptor and make sure it is for a table. */ + gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base; + gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)]; + if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) { + VERBOSE("[GPT] Granule is not covered by a table descriptor!\n"); + VERBOSE(" Base=0x%llx\n", base); + return -EINVAL; + } + + /* Get the table index and GPI shift from PA. */ + gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc); + idx = GPT_L1_IDX(gpt_config.p, base); + gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2; + + /* + * Access to L1 tables is controlled by a global lock to ensure + * that no more than one CPU is allowed to make changes at any + * given time. + */ + spin_lock(&gpt_lock); + gpt_l1_desc = gpt_l1_addr[idx]; + gpi = (gpt_l1_desc >> gpi_shift) & GPT_L1_GRAN_DESC_GPI_MASK; + + /* Make sure caller state and source/target PAS are allowed. */ + if (gpt_check_transition_gpi(src_sec_state, gpi, target_pas) < 0) { + spin_unlock(&gpt_lock); + VERBOSE("[GPT] Invalid caller state and PAS combo!\n"); + VERBOSE(" Caller: %u, Current GPI: %u, Target GPI: %u\n", + src_sec_state, gpi, target_pas); + return -EPERM; + } + + /* Clear existing GPI encoding and transition granule. */ + gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift); + gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift); + gpt_l1_addr[idx] = gpt_l1_desc; + + /* Ensure that the write operation happens before the unlock. */ + dmbishst(); + + /* Unlock access to the L1 tables. */ + spin_unlock(&gpt_lock); + + /* Cache maintenance. */ + clean_dcache_range((uintptr_t)&gpt_l1_addr[idx], + sizeof(uint64_t)); + gpt_tlbi_by_pa(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p)); + dsbishst(); + + VERBOSE("[GPT] Granule 0x%llx, GPI 0x%x->0x%x\n", base, gpi, + target_pas); + + return 0; +} diff --git a/lib/gpt/gpt.mk b/lib/gpt_rme/gpt_rme.mk similarity index 61% rename from lib/gpt/gpt.mk rename to lib/gpt_rme/gpt_rme.mk index 611e50457..60176f4e1 100644 --- a/lib/gpt/gpt.mk +++ b/lib/gpt_rme/gpt_rme.mk @@ -4,5 +4,5 @@ # SPDX-License-Identifier: BSD-3-Clause # -GPT_LIB_SRCS := $(addprefix lib/gpt/, \ - gpt_core.c) +GPT_LIB_SRCS := $(addprefix lib/gpt_rme/, \ + gpt_rme.c) diff --git a/lib/gpt_rme/gpt_rme_private.h b/lib/gpt_rme/gpt_rme_private.h new file mode 100644 index 000000000..5770bf7d6 --- /dev/null +++ b/lib/gpt_rme/gpt_rme_private.h @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2021, Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#ifndef GPT_RME_PRIVATE_H +#define GPT_RME_PRIVATE_H + +#include +#include +#include + +/******************************************************************************/ +/* GPT descriptor definitions */ +/******************************************************************************/ + +/* GPT level 0 descriptor bit definitions. */ +#define GPT_L0_TYPE_MASK UL(0xF) +#define GPT_L0_TYPE_SHIFT U(0) + +/* For now, we don't support contiguous descriptors, only table and block. */ +#define GPT_L0_TYPE_TBL_DESC UL(0x3) +#define GPT_L0_TYPE_BLK_DESC UL(0x1) + +#define GPT_L0_TBL_DESC_L1ADDR_MASK UL(0xFFFFFFFFFF) +#define GPT_L0_TBL_DESC_L1ADDR_SHIFT U(12) + +#define GPT_L0_BLK_DESC_GPI_MASK UL(0xF) +#define GPT_L0_BLK_DESC_GPI_SHIFT U(4) + +/* GPT level 1 descriptor bit definitions */ +#define GPT_L1_GRAN_DESC_GPI_MASK UL(0xF) + +/* + * This macro fills out every GPI entry in a granules descriptor to the same + * value. + */ +#define GPT_BUILD_L1_DESC(_gpi) (((uint64_t)(_gpi) << 4*0) | \ + ((uint64_t)(_gpi) << 4*1) | \ + ((uint64_t)(_gpi) << 4*2) | \ + ((uint64_t)(_gpi) << 4*3) | \ + ((uint64_t)(_gpi) << 4*4) | \ + ((uint64_t)(_gpi) << 4*5) | \ + ((uint64_t)(_gpi) << 4*6) | \ + ((uint64_t)(_gpi) << 4*7) | \ + ((uint64_t)(_gpi) << 4*8) | \ + ((uint64_t)(_gpi) << 4*9) | \ + ((uint64_t)(_gpi) << 4*10) | \ + ((uint64_t)(_gpi) << 4*11) | \ + ((uint64_t)(_gpi) << 4*12) | \ + ((uint64_t)(_gpi) << 4*13) | \ + ((uint64_t)(_gpi) << 4*14) | \ + ((uint64_t)(_gpi) << 4*15)) + +/******************************************************************************/ +/* GPT platform configuration */ +/******************************************************************************/ + +/* This value comes from GPCCR_EL3 so no externally supplied definition. */ +#define GPT_L0GPTSZ ((unsigned int)((read_gpccr_el3() >> \ + GPCCR_L0GPTSZ_SHIFT) & GPCCR_L0GPTSZ_MASK)) + +/* The "S" value is directly related to L0GPTSZ */ +#define GPT_S_VAL (GPT_L0GPTSZ + 30U) + +/* + * Map PPS values to T values. + * + * PPS Size T + * 0b000 4GB 32 + * 0b001 64GB 36 + * 0b010 1TB 40 + * 0b011 4TB 42 + * 0b100 16TB 44 + * 0b101 256TB 48 + * 0b110 4PB 52 + * + * See section 15.1.27 of the RME specification. + */ +typedef enum { + PPS_4GB_T = 32U, + PPS_64GB_T = 36U, + PPS_1TB_T = 40U, + PPS_4TB_T = 42U, + PPS_16TB_T = 44U, + PPS_256TB_T = 48U, + PPS_4PB_T = 52U +} gpt_t_val_e; + +/* + * Map PGS values to P values. + * + * PGS Size P + * 0b00 4KB 12 + * 0b10 16KB 14 + * 0b01 64KB 16 + * + * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo. + * + * See section 15.1.27 of the RME specification. + */ +typedef enum { + PGS_4KB_P = 12U, + PGS_16KB_P = 14U, + PGS_64KB_P = 16U +} gpt_p_val_e; + +/* Max valid value for PGS. */ +#define GPT_PGS_MAX (2U) + +/* Max valid value for PPS. */ +#define GPT_PPS_MAX (6U) + +/******************************************************************************/ +/* L0 address attribute macros */ +/******************************************************************************/ + +/* + * If S is greater than or equal to T then there is a single L0 region covering + * the entire protected space so there is no L0 index, so the width (and the + * derivative mask value) are both zero. If we don't specifically handle this + * special case we'll get a negative width value which does not make sense and + * could cause a lot of problems. + */ +#define GPT_L0_IDX_WIDTH(_t) (((_t) > GPT_S_VAL) ? \ + ((_t) - GPT_S_VAL) : (0U)) + +/* Bit shift for the L0 index field in a PA. */ +#define GPT_L0_IDX_SHIFT (GPT_S_VAL) + +/* Mask for the L0 index field, must be shifted. */ +#define GPT_L0_IDX_MASK(_t) (0xFFFFFFFFFFFFFFFFUL >> \ + (64U - (GPT_L0_IDX_WIDTH(_t)))) + +/* Total number of L0 regions. */ +#define GPT_L0_REGION_COUNT(_t) ((GPT_L0_IDX_MASK(_t)) + 1U) + +/* Total size of each GPT L0 region in bytes. */ +#define GPT_L0_REGION_SIZE (1UL << (GPT_L0_IDX_SHIFT)) + +/* Total size in bytes of the whole L0 table. */ +#define GPT_L0_TABLE_SIZE(_t) ((GPT_L0_REGION_COUNT(_t)) << 3U) + +/******************************************************************************/ +/* L1 address attribute macros */ +/******************************************************************************/ + +/* Width of the L1 index field. */ +#define GPT_L1_IDX_WIDTH(_p) ((GPT_S_VAL - 1U) - ((_p) + 3U)) + +/* Bit shift for the L1 index field. */ +#define GPT_L1_IDX_SHIFT(_p) ((_p) + 4U) + +/* Mask for the L1 index field, must be shifted. */ +#define GPT_L1_IDX_MASK(_p) (0xFFFFFFFFFFFFFFFFUL >> \ + (64U - (GPT_L1_IDX_WIDTH(_p)))) + +/* Bit shift for the index of the L1 GPI in a PA. */ +#define GPT_L1_GPI_IDX_SHIFT(_p) (_p) + +/* Mask for the index of the L1 GPI in a PA. */ +#define GPT_L1_GPI_IDX_MASK (0xF) + +/* Total number of entries in each L1 table. */ +#define GPT_L1_ENTRY_COUNT(_p) ((GPT_L1_IDX_MASK(_p)) + 1U) + +/* Total size in bytes of each L1 table. */ +#define GPT_L1_TABLE_SIZE(_p) ((GPT_L1_ENTRY_COUNT(_p)) << 3U) + +/******************************************************************************/ +/* General helper macros */ +/******************************************************************************/ + +/* Protected space actual size in bytes. */ +#define GPT_PPS_ACTUAL_SIZE(_t) (1UL << (_t)) + +/* Granule actual size in bytes. */ +#define GPT_PGS_ACTUAL_SIZE(_p) (1UL << (_p)) + +/* L0 GPT region size in bytes. */ +#define GPT_L0GPTSZ_ACTUAL_SIZE (1UL << GPT_S_VAL) + +/* Get the index of the L0 entry from a physical address. */ +#define GPT_L0_IDX(_pa) ((_pa) >> GPT_L0_IDX_SHIFT) + +/* + * This definition is used to determine if a physical address lies on an L0 + * region boundary. + */ +#define GPT_IS_L0_ALIGNED(_pa) (((_pa) & (GPT_L0_REGION_SIZE - U(1))) == U(0)) + +/* Get the type field from an L0 descriptor. */ +#define GPT_L0_TYPE(_desc) (((_desc) >> GPT_L0_TYPE_SHIFT) & \ + GPT_L0_TYPE_MASK) + +/* Create an L0 block descriptor. */ +#define GPT_L0_BLK_DESC(_gpi) (GPT_L0_TYPE_BLK_DESC | \ + (((_gpi) & GPT_L0_BLK_DESC_GPI_MASK) << \ + GPT_L0_BLK_DESC_GPI_SHIFT)) + +/* Create an L0 table descriptor with an L1 table address. */ +#define GPT_L0_TBL_DESC(_pa) (GPT_L0_TYPE_TBL_DESC | ((uint64_t)(_pa) & \ + (GPT_L0_TBL_DESC_L1ADDR_MASK << \ + GPT_L0_TBL_DESC_L1ADDR_SHIFT))) + +/* Get the GPI from an L0 block descriptor. */ +#define GPT_L0_BLKD_GPI(_desc) (((_desc) >> GPT_L0_BLK_DESC_GPI_SHIFT) & \ + GPT_L0_BLK_DESC_GPI_MASK) + +/* Get the L1 address from an L0 table descriptor. */ +#define GPT_L0_TBLD_ADDR(_desc) ((uint64_t *)(((_desc) & \ + (GPT_L0_TBL_DESC_L1ADDR_MASK << \ + GPT_L0_TBL_DESC_L1ADDR_SHIFT)))) + +/* Get the index into the L1 table from a physical address. */ +#define GPT_L1_IDX(_p, _pa) (((_pa) >> GPT_L1_IDX_SHIFT(_p)) & \ + GPT_L1_IDX_MASK(_p)) + +/* Get the index of the GPI within an L1 table entry from a physical address. */ +#define GPT_L1_GPI_IDX(_p, _pa) (((_pa) >> GPT_L1_GPI_IDX_SHIFT(_p)) & \ + GPT_L1_GPI_IDX_MASK) + +/* Determine if an address is granule-aligned. */ +#define GPT_IS_L1_ALIGNED(_p, _pa) (((_pa) & (GPT_PGS_ACTUAL_SIZE(_p) - U(1))) \ + == U(0)) + +#endif /* GPT_RME_PRIVATE_H */ diff --git a/plat/arm/common/arm_bl2_setup.c b/plat/arm/common/arm_bl2_setup.c index ef372068a..2871b1bf0 100644 --- a/plat/arm/common/arm_bl2_setup.c +++ b/plat/arm/common/arm_bl2_setup.c @@ -18,12 +18,16 @@ #include #include #include -#include +#if ENABLE_RME +#include +#endif /* ENABLE_RME */ #ifdef SPD_opteed #include #endif #include +#if ENABLE_RME #include +#endif /* ENABLE_RME */ #include #include @@ -130,6 +134,7 @@ void bl2_platform_setup(void) } #if ENABLE_RME + static void arm_bl2_plat_gpt_setup(void) { /* @@ -137,32 +142,38 @@ static void arm_bl2_plat_gpt_setup(void) * the layout, so the array cannot be constant. */ pas_region_t pas_regions[] = { - ARM_PAS_GPI_ANY, ARM_PAS_KERNEL, - ARM_PAS_TZC, + ARM_PAS_SECURE, ARM_PAS_REALM, ARM_PAS_EL3_DRAM, ARM_PAS_GPTS }; - gpt_init_params_t gpt_params = { - PLATFORM_PGS, - PLATFORM_PPS, - PLATFORM_L0GPTSZ, - pas_regions, - (unsigned int)(sizeof(pas_regions)/sizeof(pas_region_t)), - ARM_L0_GPT_ADDR_BASE, ARM_L0_GPT_SIZE, - ARM_L1_GPT_ADDR_BASE, ARM_L1_GPT_SIZE - }; - - /* Initialise the global granule tables */ - INFO("Enabling Granule Protection Checks\n"); - if (gpt_init(&gpt_params) < 0) { + /* Initialize entire protected space to GPT_GPI_ANY. */ + if (gpt_init_l0_tables(GPCCR_PPS_4GB, ARM_L0_GPT_ADDR_BASE, + ARM_L0_GPT_SIZE) < 0) { + ERROR("gpt_init_l0_tables() failed!\n"); panic(); } - gpt_enable(); + /* Carve out defined PAS ranges. */ + if (gpt_init_pas_l1_tables(GPCCR_PGS_4K, + ARM_L1_GPT_ADDR_BASE, + ARM_L1_GPT_SIZE, + pas_regions, + (unsigned int)(sizeof(pas_regions) / + sizeof(pas_region_t))) < 0) { + ERROR("gpt_init_pas_l1_tables() failed!\n"); + panic(); + } + + INFO("Enabling Granule Protection Checks\n"); + if (gpt_enable() < 0) { + ERROR("gpt_enable() failed!\n"); + panic(); + } } + #endif /* ENABLE_RME */ /******************************************************************************* @@ -201,9 +212,6 @@ void arm_bl2_plat_arch_setup(void) #if ENABLE_RME /* Initialise the secure environment */ plat_arm_security_setup(); - - /* Initialise and enable Granule Protection */ - arm_bl2_plat_gpt_setup(); #endif setup_page_tables(bl_regions, plat_arm_get_mmap()); @@ -212,6 +220,9 @@ void arm_bl2_plat_arch_setup(void) /* BL2 runs in EL3 when RME enabled. */ assert(get_armv9_2_feat_rme_support() != 0U); enable_mmu_el3(0); + + /* Initialise and enable granule protection after MMU. */ + arm_bl2_plat_gpt_setup(); #else enable_mmu_el1(0); #endif diff --git a/plat/arm/common/arm_bl31_setup.c b/plat/arm/common/arm_bl31_setup.c index d131bb95b..6472590f3 100644 --- a/plat/arm/common/arm_bl31_setup.c +++ b/plat/arm/common/arm_bl31_setup.c @@ -13,10 +13,11 @@ #include #include #include -#include +#if ENABLE_RME +#include +#endif #include #include -#include #include #include #include @@ -235,28 +236,6 @@ void __init arm_bl31_early_platform_setup(void *from_bl2, uintptr_t soc_fw_confi */ bl33_image_ep_info.args.arg0 = (u_register_t)ARM_DRAM1_BASE; #endif - -#if ENABLE_RME - /* - * Initialise Granule Protection library and enable GPC - * for the primary processor. The tables were initialised - * in BL2, so there is no need to provide any PAS here. - */ - gpt_init_params_t gpt_params = { - PLATFORM_PGS, - PLATFORM_PPS, - PLATFORM_L0GPTSZ, - NULL, - 0U, - ARM_L0_GPT_ADDR_BASE, ARM_L0_GPT_SIZE, - ARM_L1_GPT_ADDR_BASE, ARM_L1_GPT_SIZE - }; - - /* Initialise the global granule tables. */ - if (gpt_init(&gpt_params) < 0) { - panic(); - } -#endif /* ENABLE_RME */ } void bl31_early_platform_setup2(u_register_t arg0, u_register_t arg1, @@ -430,6 +409,19 @@ void __init arm_bl31_plat_arch_setup(void) enable_mmu_el3(0); +#if ENABLE_RME + /* + * Initialise Granule Protection library and enable GPC for the primary + * processor. The tables have already been initialized by a previous BL + * stage, so there is no need to provide any PAS here. This function + * sets up pointers to those tables. + */ + if (gpt_runtime_init() < 0) { + ERROR("gpt_runtime_init() failed!\n"); + panic(); + } +#endif /* ENABLE_RME */ + arm_setup_romlib(); } diff --git a/services/std_svc/rmmd/rmmd_main.c b/services/std_svc/rmmd/rmmd_main.c index 26a5b8464..dacd15087 100644 --- a/services/std_svc/rmmd/rmmd_main.c +++ b/services/std_svc/rmmd/rmmd_main.c @@ -16,7 +16,7 @@ #include #include #include -#include +#include #include #include @@ -296,12 +296,18 @@ static int gtsi_transition_granule(uint64_t pa, { int ret; - ret = gpt_transition_pas(pa, src_sec_state, target_pas); + ret = gpt_transition_pas(pa, PAGE_SIZE_4KB, src_sec_state, target_pas); /* Convert TF-A error codes into GTSI error codes */ if (ret == -EINVAL) { + ERROR("[GTSI] Transition failed: invalid %s\n", "address"); + ERROR(" PA: 0x%llx, SRC: %d, PAS: %d\n", pa, + src_sec_state, target_pas); ret = GRAN_TRANS_RET_BAD_ADDR; } else if (ret == -EPERM) { + ERROR("[GTSI] Transition failed: invalid %s\n", "caller/PAS"); + ERROR(" PA: 0x%llx, SRC: %d, PAS: %d\n", pa, + src_sec_state, target_pas); ret = GRAN_TRANS_RET_BAD_PAS; } @@ -328,12 +334,10 @@ uint64_t rmmd_gtsi_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, switch (smc_fid) { case SMC_ASC_MARK_REALM: SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM, - GPI_REALM)); - break; + GPT_GPI_REALM)); case SMC_ASC_MARK_NONSECURE: SMC_RET1(handle, gtsi_transition_granule(x1, SMC_FROM_REALM, - GPI_NS)); - break; + GPT_GPI_NS)); default: WARN("RMM: Unsupported GTF call 0x%08x\n", smc_fid); SMC_RET1(handle, SMC_UNK);