mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-04-12 23:54:17 +00:00

Enforce full include path for includes. Deprecate old paths. The following folders inside include/lib have been left unchanged: - include/lib/cpus/${ARCH} - include/lib/el3_runtime/${ARCH} The reason for this change is that having a global namespace for includes isn't a good idea. It defeats one of the advantages of having folders and it introduces problems that are sometimes subtle (because you may not know the header you are actually including if there are two of them). For example, this patch had to be created because two headers were called the same way:e0ea0928d5
("Fix gpio includes of mt8173 platform to avoid collision."). More recently, this patch has had similar problems:46f9b2c3a2
("drivers: add tzc380 support"). This problem was introduced in commit4ecca33988
("Move include and source files to logical locations"). At that time, there weren't too many headers so it wasn't a real issue. However, time has shown that this creates problems. Platforms that want to preserve the way they include headers may add the removed paths to PLAT_INCLUDES, but this is discouraged. Change-Id: I39dc53ed98f9e297a5966e723d1936d6ccf2fc8f Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
215 lines
6.8 KiB
ArmAsm
215 lines
6.8 KiB
ArmAsm
/*
|
|
* Copyright (c) 2013-2018, ARM Limited and Contributors. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#include <platform_def.h>
|
|
|
|
#include <arch.h>
|
|
#include <common/bl_common.h>
|
|
#include <el3_common_macros.S>
|
|
#include <lib/pmf/pmf_asm_macros.S>
|
|
#include <lib/runtime_instr.h>
|
|
#include <lib/xlat_tables/xlat_mmu_helpers.h>
|
|
|
|
.globl bl31_entrypoint
|
|
.globl bl31_warm_entrypoint
|
|
|
|
/* -----------------------------------------------------
|
|
* bl31_entrypoint() is the cold boot entrypoint,
|
|
* executed only by the primary cpu.
|
|
* -----------------------------------------------------
|
|
*/
|
|
|
|
func bl31_entrypoint
|
|
#if !RESET_TO_BL31
|
|
/* ---------------------------------------------------------------
|
|
* Stash the previous bootloader arguments x0 - x3 for later use.
|
|
* ---------------------------------------------------------------
|
|
*/
|
|
mov x20, x0
|
|
mov x21, x1
|
|
mov x22, x2
|
|
mov x23, x3
|
|
|
|
/* ---------------------------------------------------------------------
|
|
* For !RESET_TO_BL31 systems, only the primary CPU ever reaches
|
|
* bl31_entrypoint() during the cold boot flow, so the cold/warm boot
|
|
* and primary/secondary CPU logic should not be executed in this case.
|
|
*
|
|
* Also, assume that the previous bootloader has already initialised the
|
|
* SCTLR_EL3, including the endianness, and has initialised the memory.
|
|
* ---------------------------------------------------------------------
|
|
*/
|
|
el3_entrypoint_common \
|
|
_init_sctlr=0 \
|
|
_warm_boot_mailbox=0 \
|
|
_secondary_cold_boot=0 \
|
|
_init_memory=0 \
|
|
_init_c_runtime=1 \
|
|
_exception_vectors=runtime_exceptions
|
|
#else
|
|
/* ---------------------------------------------------------------------
|
|
* For RESET_TO_BL31 systems which have a programmable reset address,
|
|
* bl31_entrypoint() is executed only on the cold boot path so we can
|
|
* skip the warm boot mailbox mechanism.
|
|
* ---------------------------------------------------------------------
|
|
*/
|
|
el3_entrypoint_common \
|
|
_init_sctlr=1 \
|
|
_warm_boot_mailbox=!PROGRAMMABLE_RESET_ADDRESS \
|
|
_secondary_cold_boot=!COLD_BOOT_SINGLE_CPU \
|
|
_init_memory=1 \
|
|
_init_c_runtime=1 \
|
|
_exception_vectors=runtime_exceptions
|
|
|
|
/* ---------------------------------------------------------------------
|
|
* For RESET_TO_BL31 systems, BL31 is the first bootloader to run so
|
|
* there's no argument to relay from a previous bootloader. Zero the
|
|
* arguments passed to the platform layer to reflect that.
|
|
* ---------------------------------------------------------------------
|
|
*/
|
|
mov x20, 0
|
|
mov x21, 0
|
|
mov x22, 0
|
|
mov x23, 0
|
|
#endif /* RESET_TO_BL31 */
|
|
|
|
/* --------------------------------------------------------------------
|
|
* If PIE is enabled, fixup the Global descriptor Table and dynamic
|
|
* relocations
|
|
* --------------------------------------------------------------------
|
|
*/
|
|
#if ENABLE_PIE
|
|
mov_imm x0, BL31_BASE
|
|
mov_imm x1, BL31_LIMIT
|
|
bl fixup_gdt_reloc
|
|
#endif /* ENABLE_PIE */
|
|
|
|
/* ---------------------------------------------
|
|
* Perform platform specific early arch. setup
|
|
* ---------------------------------------------
|
|
*/
|
|
mov x0, x20
|
|
mov x1, x21
|
|
mov x2, x22
|
|
mov x3, x23
|
|
bl bl31_early_platform_setup2
|
|
bl bl31_plat_arch_setup
|
|
|
|
/* ---------------------------------------------
|
|
* Jump to main function.
|
|
* ---------------------------------------------
|
|
*/
|
|
bl bl31_main
|
|
|
|
/* -------------------------------------------------------------
|
|
* Clean the .data & .bss sections to main memory. This ensures
|
|
* that any global data which was initialised by the primary CPU
|
|
* is visible to secondary CPUs before they enable their data
|
|
* caches and participate in coherency.
|
|
* -------------------------------------------------------------
|
|
*/
|
|
adr x0, __DATA_START__
|
|
adr x1, __DATA_END__
|
|
sub x1, x1, x0
|
|
bl clean_dcache_range
|
|
|
|
adr x0, __BSS_START__
|
|
adr x1, __BSS_END__
|
|
sub x1, x1, x0
|
|
bl clean_dcache_range
|
|
|
|
b el3_exit
|
|
endfunc bl31_entrypoint
|
|
|
|
/* --------------------------------------------------------------------
|
|
* This CPU has been physically powered up. It is either resuming from
|
|
* suspend or has simply been turned on. In both cases, call the BL31
|
|
* warmboot entrypoint
|
|
* --------------------------------------------------------------------
|
|
*/
|
|
func bl31_warm_entrypoint
|
|
#if ENABLE_RUNTIME_INSTRUMENTATION
|
|
|
|
/*
|
|
* This timestamp update happens with cache off. The next
|
|
* timestamp collection will need to do cache maintenance prior
|
|
* to timestamp update.
|
|
*/
|
|
pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_HW_LOW_PWR
|
|
mrs x1, cntpct_el0
|
|
str x1, [x0]
|
|
#endif
|
|
|
|
/*
|
|
* On the warm boot path, most of the EL3 initialisations performed by
|
|
* 'el3_entrypoint_common' must be skipped:
|
|
*
|
|
* - Only when the platform bypasses the BL1/BL31 entrypoint by
|
|
* programming the reset address do we need to initialise SCTLR_EL3.
|
|
* In other cases, we assume this has been taken care by the
|
|
* entrypoint code.
|
|
*
|
|
* - No need to determine the type of boot, we know it is a warm boot.
|
|
*
|
|
* - Do not try to distinguish between primary and secondary CPUs, this
|
|
* notion only exists for a cold boot.
|
|
*
|
|
* - No need to initialise the memory or the C runtime environment,
|
|
* it has been done once and for all on the cold boot path.
|
|
*/
|
|
el3_entrypoint_common \
|
|
_init_sctlr=PROGRAMMABLE_RESET_ADDRESS \
|
|
_warm_boot_mailbox=0 \
|
|
_secondary_cold_boot=0 \
|
|
_init_memory=0 \
|
|
_init_c_runtime=0 \
|
|
_exception_vectors=runtime_exceptions
|
|
|
|
/*
|
|
* We're about to enable MMU and participate in PSCI state coordination.
|
|
*
|
|
* The PSCI implementation invokes platform routines that enable CPUs to
|
|
* participate in coherency. On a system where CPUs are not
|
|
* cache-coherent without appropriate platform specific programming,
|
|
* having caches enabled until such time might lead to coherency issues
|
|
* (resulting from stale data getting speculatively fetched, among
|
|
* others). Therefore we keep data caches disabled even after enabling
|
|
* the MMU for such platforms.
|
|
*
|
|
* On systems with hardware-assisted coherency, or on single cluster
|
|
* platforms, such platform specific programming is not required to
|
|
* enter coherency (as CPUs already are); and there's no reason to have
|
|
* caches disabled either.
|
|
*/
|
|
#if HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY
|
|
mov x0, xzr
|
|
#else
|
|
mov x0, #DISABLE_DCACHE
|
|
#endif
|
|
bl bl31_plat_enable_mmu
|
|
|
|
bl psci_warmboot_entrypoint
|
|
|
|
#if ENABLE_RUNTIME_INSTRUMENTATION
|
|
pmf_calc_timestamp_addr rt_instr_svc, RT_INSTR_EXIT_PSCI
|
|
mov x19, x0
|
|
|
|
/*
|
|
* Invalidate before updating timestamp to ensure previous timestamp
|
|
* updates on the same cache line with caches disabled are properly
|
|
* seen by the same core. Without the cache invalidate, the core might
|
|
* write into a stale cache line.
|
|
*/
|
|
mov x1, #PMF_TS_SIZE
|
|
mov x20, x30
|
|
bl inv_dcache_range
|
|
mov x30, x20
|
|
|
|
mrs x0, cntpct_el0
|
|
str x0, [x19]
|
|
#endif
|
|
b el3_exit
|
|
endfunc bl31_warm_entrypoint
|