mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-04-16 09:34:18 +00:00

Add header file to help with creation of SPMD logical partitions. Also update linker files to create sections to record SPMD logical partitions declared. This follows the same pattern as the EL3 SPMC's logical partitions. This patch also adds initialization of SPMD logical partitions when the SPMD comes up. ENABLE_SPMD_LP is a build flag that is used to enable support for SPMD logical partitions. Note that the approach chosen is to keep SPMD and SPMC logical partition support separate, as opposed to extend the existing SPMC logical partition support since the code would need to have a number of ifdefs and the interactions with various build options such as SPMC_AT_EL3 needs to be accounted for, which would make code more complicated. Signed-off-by: Raghu Krishnamurthy <raghu.ncstate@gmail.com> Change-Id: I9642ddbf6ea26dd3f4a283baec598d61c07e3661
241 lines
6.2 KiB
C
241 lines
6.2 KiB
C
/*
|
|
* Copyright (c) 2020-2022, ARM Limited and Contributors. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#ifndef BL_COMMON_LD_H
|
|
#define BL_COMMON_LD_H
|
|
|
|
#include <platform_def.h>
|
|
|
|
#ifdef __aarch64__
|
|
#define STRUCT_ALIGN 8
|
|
#define BSS_ALIGN 16
|
|
#else
|
|
#define STRUCT_ALIGN 4
|
|
#define BSS_ALIGN 8
|
|
#endif
|
|
|
|
#ifndef DATA_ALIGN
|
|
#define DATA_ALIGN 1
|
|
#endif
|
|
|
|
#define CPU_OPS \
|
|
. = ALIGN(STRUCT_ALIGN); \
|
|
__CPU_OPS_START__ = .; \
|
|
KEEP(*(.cpu_ops)) \
|
|
__CPU_OPS_END__ = .;
|
|
|
|
#define PARSER_LIB_DESCS \
|
|
. = ALIGN(STRUCT_ALIGN); \
|
|
__PARSER_LIB_DESCS_START__ = .; \
|
|
KEEP(*(.img_parser_lib_descs)) \
|
|
__PARSER_LIB_DESCS_END__ = .;
|
|
|
|
#define RT_SVC_DESCS \
|
|
. = ALIGN(STRUCT_ALIGN); \
|
|
__RT_SVC_DESCS_START__ = .; \
|
|
KEEP(*(.rt_svc_descs)) \
|
|
__RT_SVC_DESCS_END__ = .;
|
|
|
|
#if SPMC_AT_EL3
|
|
#define EL3_LP_DESCS \
|
|
. = ALIGN(STRUCT_ALIGN); \
|
|
__EL3_LP_DESCS_START__ = .; \
|
|
KEEP(*(.el3_lp_descs)) \
|
|
__EL3_LP_DESCS_END__ = .;
|
|
#else
|
|
#define EL3_LP_DESCS
|
|
#endif
|
|
|
|
#if ENABLE_SPMD_LP
|
|
#define SPMD_LP_DESCS \
|
|
. = ALIGN(STRUCT_ALIGN); \
|
|
__SPMD_LP_DESCS_START__ = .; \
|
|
KEEP(*(.spmd_lp_descs)) \
|
|
__SPMD_LP_DESCS_END__ = .;
|
|
#else
|
|
#define SPMD_LP_DESCS
|
|
#endif
|
|
#define PMF_SVC_DESCS \
|
|
. = ALIGN(STRUCT_ALIGN); \
|
|
__PMF_SVC_DESCS_START__ = .; \
|
|
KEEP(*(.pmf_svc_descs)) \
|
|
__PMF_SVC_DESCS_END__ = .;
|
|
|
|
#define FCONF_POPULATOR \
|
|
. = ALIGN(STRUCT_ALIGN); \
|
|
__FCONF_POPULATOR_START__ = .; \
|
|
KEEP(*(.fconf_populator)) \
|
|
__FCONF_POPULATOR_END__ = .;
|
|
|
|
/*
|
|
* Keep the .got section in the RO section as it is patched prior to enabling
|
|
* the MMU and having the .got in RO is better for security. GOT is a table of
|
|
* addresses so ensure pointer size alignment.
|
|
*/
|
|
#define GOT \
|
|
. = ALIGN(STRUCT_ALIGN); \
|
|
__GOT_START__ = .; \
|
|
*(.got) \
|
|
__GOT_END__ = .;
|
|
|
|
/*
|
|
* The base xlat table
|
|
*
|
|
* It is put into the rodata section if PLAT_RO_XLAT_TABLES=1,
|
|
* or into the bss section otherwise.
|
|
*/
|
|
#define BASE_XLAT_TABLE \
|
|
. = ALIGN(16); \
|
|
__BASE_XLAT_TABLE_START__ = .; \
|
|
*(.base_xlat_table) \
|
|
__BASE_XLAT_TABLE_END__ = .;
|
|
|
|
#if PLAT_RO_XLAT_TABLES
|
|
#define BASE_XLAT_TABLE_RO BASE_XLAT_TABLE
|
|
#define BASE_XLAT_TABLE_BSS
|
|
#else
|
|
#define BASE_XLAT_TABLE_RO
|
|
#define BASE_XLAT_TABLE_BSS BASE_XLAT_TABLE
|
|
#endif
|
|
|
|
#define RODATA_COMMON \
|
|
RT_SVC_DESCS \
|
|
FCONF_POPULATOR \
|
|
PMF_SVC_DESCS \
|
|
PARSER_LIB_DESCS \
|
|
CPU_OPS \
|
|
GOT \
|
|
BASE_XLAT_TABLE_RO \
|
|
EL3_LP_DESCS \
|
|
SPMD_LP_DESCS
|
|
|
|
/*
|
|
* .data must be placed at a lower address than the stacks if the stack
|
|
* protector is enabled. Alternatively, the .data.stack_protector_canary
|
|
* section can be placed independently of the main .data section.
|
|
*/
|
|
#define DATA_SECTION \
|
|
.data . : ALIGN(DATA_ALIGN) { \
|
|
__DATA_START__ = .; \
|
|
*(SORT_BY_ALIGNMENT(.data*)) \
|
|
__DATA_END__ = .; \
|
|
}
|
|
|
|
/*
|
|
* .rela.dyn needs to come after .data for the read-elf utility to parse
|
|
* this section correctly.
|
|
*/
|
|
#if __aarch64__
|
|
#define RELA_DYN_NAME .rela.dyn
|
|
#define RELOC_SECTIONS_PATTERN *(.rela*)
|
|
#else
|
|
#define RELA_DYN_NAME .rel.dyn
|
|
#define RELOC_SECTIONS_PATTERN *(.rel*)
|
|
#endif
|
|
|
|
#define RELA_SECTION \
|
|
RELA_DYN_NAME : ALIGN(STRUCT_ALIGN) { \
|
|
__RELA_START__ = .; \
|
|
RELOC_SECTIONS_PATTERN \
|
|
__RELA_END__ = .; \
|
|
}
|
|
|
|
#if !(defined(IMAGE_BL31) && RECLAIM_INIT_CODE)
|
|
#define STACK_SECTION \
|
|
.stacks (NOLOAD) : { \
|
|
__STACKS_START__ = .; \
|
|
*(.tzfw_normal_stacks) \
|
|
__STACKS_END__ = .; \
|
|
}
|
|
#endif
|
|
|
|
/*
|
|
* If BL doesn't use any bakery lock then __PERCPU_BAKERY_LOCK_SIZE__
|
|
* will be zero. For this reason, the only two valid values for
|
|
* __PERCPU_BAKERY_LOCK_SIZE__ are 0 or the platform defined value
|
|
* PLAT_PERCPU_BAKERY_LOCK_SIZE.
|
|
*/
|
|
#ifdef PLAT_PERCPU_BAKERY_LOCK_SIZE
|
|
#define BAKERY_LOCK_SIZE_CHECK \
|
|
ASSERT((__PERCPU_BAKERY_LOCK_SIZE__ == 0) || \
|
|
(__PERCPU_BAKERY_LOCK_SIZE__ == PLAT_PERCPU_BAKERY_LOCK_SIZE), \
|
|
"PLAT_PERCPU_BAKERY_LOCK_SIZE does not match bakery lock requirements");
|
|
#else
|
|
#define BAKERY_LOCK_SIZE_CHECK
|
|
#endif
|
|
|
|
/*
|
|
* Bakery locks are stored in normal .bss memory
|
|
*
|
|
* Each lock's data is spread across multiple cache lines, one per CPU,
|
|
* but multiple locks can share the same cache line.
|
|
* The compiler will allocate enough memory for one CPU's bakery locks,
|
|
* the remaining cache lines are allocated by the linker script
|
|
*/
|
|
#if !USE_COHERENT_MEM
|
|
#define BAKERY_LOCK_NORMAL \
|
|
. = ALIGN(CACHE_WRITEBACK_GRANULE); \
|
|
__BAKERY_LOCK_START__ = .; \
|
|
__PERCPU_BAKERY_LOCK_START__ = .; \
|
|
*(.bakery_lock) \
|
|
. = ALIGN(CACHE_WRITEBACK_GRANULE); \
|
|
__PERCPU_BAKERY_LOCK_END__ = .; \
|
|
__PERCPU_BAKERY_LOCK_SIZE__ = ABSOLUTE(__PERCPU_BAKERY_LOCK_END__ - __PERCPU_BAKERY_LOCK_START__); \
|
|
. = . + (__PERCPU_BAKERY_LOCK_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
|
|
__BAKERY_LOCK_END__ = .; \
|
|
BAKERY_LOCK_SIZE_CHECK
|
|
#else
|
|
#define BAKERY_LOCK_NORMAL
|
|
#endif
|
|
|
|
/*
|
|
* Time-stamps are stored in normal .bss memory
|
|
*
|
|
* The compiler will allocate enough memory for one CPU's time-stamps,
|
|
* the remaining memory for other CPUs is allocated by the
|
|
* linker script
|
|
*/
|
|
#define PMF_TIMESTAMP \
|
|
. = ALIGN(CACHE_WRITEBACK_GRANULE); \
|
|
__PMF_TIMESTAMP_START__ = .; \
|
|
KEEP(*(.pmf_timestamp_array)) \
|
|
. = ALIGN(CACHE_WRITEBACK_GRANULE); \
|
|
__PMF_PERCPU_TIMESTAMP_END__ = .; \
|
|
__PERCPU_TIMESTAMP_SIZE__ = ABSOLUTE(. - __PMF_TIMESTAMP_START__); \
|
|
. = . + (__PERCPU_TIMESTAMP_SIZE__ * (PLATFORM_CORE_COUNT - 1)); \
|
|
__PMF_TIMESTAMP_END__ = .;
|
|
|
|
|
|
/*
|
|
* The .bss section gets initialised to 0 at runtime.
|
|
* Its base address has bigger alignment for better performance of the
|
|
* zero-initialization code.
|
|
*/
|
|
#define BSS_SECTION \
|
|
.bss (NOLOAD) : ALIGN(BSS_ALIGN) { \
|
|
__BSS_START__ = .; \
|
|
*(SORT_BY_ALIGNMENT(.bss*)) \
|
|
*(COMMON) \
|
|
BAKERY_LOCK_NORMAL \
|
|
PMF_TIMESTAMP \
|
|
BASE_XLAT_TABLE_BSS \
|
|
__BSS_END__ = .; \
|
|
}
|
|
|
|
/*
|
|
* The .xlat_table section is for full, aligned page tables (4K).
|
|
* Removing them from .bss avoids forcing 4K alignment on
|
|
* the .bss section. The tables are initialized to zero by the translation
|
|
* tables library.
|
|
*/
|
|
#define XLAT_TABLE_SECTION \
|
|
.xlat_table (NOLOAD) : { \
|
|
__XLAT_TABLE_START__ = .; \
|
|
*(.xlat_table) \
|
|
__XLAT_TABLE_END__ = .; \
|
|
}
|
|
|
|
#endif /* BL_COMMON_LD_H */
|