mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-04-18 02:24:18 +00:00

During the warm boot sequence: 1. The MMU is enabled with the data cache disabled. The MMU table walker is set up to access the translation tables as in cacheable memory, but its accesses are non-cacheable because SCTLR_EL3.C controls them as well. 2. The interconnect is set up and the CPU enters coherency with the rest of the system. 3. The data cache is enabled. If the support for dynamic translation tables is enabled and another CPU makes changes to a region, the changes may only be present in the data cache, not in RAM. The CPU that is booting isn't in coherency with the rest of the system, so the table walker of that CPU isn't either. This means that it may read old entries from RAM and it may have invalid TLB entries corresponding to the dynamic mappings. This is not a problem for the boot code because the mapping is 1:1 and the regions are static. However, the code that runs after the boot sequence may need to access the dynamically mapped regions. This patch invalidates all TLBs during warm boot when the dynamic translation tables support is enabled to prevent this problem. Change-Id: I80264802dc0aa1cb3edd77d0b66b91db6961af3d Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
164 lines
4.9 KiB
ArmAsm
164 lines
4.9 KiB
ArmAsm
/*
|
|
* Copyright (c) 2014-2018, ARM Limited and Contributors. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#include <asm_macros.S>
|
|
#include <assert_macros.S>
|
|
#include <platform_def.h>
|
|
#include <psci.h>
|
|
|
|
.globl psci_do_pwrdown_cache_maintenance
|
|
.globl psci_do_pwrup_cache_maintenance
|
|
.globl psci_power_down_wfi
|
|
#if !ERROR_DEPRECATED
|
|
.globl psci_entrypoint
|
|
#endif
|
|
|
|
/* -----------------------------------------------------------------------
|
|
* void psci_do_pwrdown_cache_maintenance(unsigned int power level);
|
|
*
|
|
* This function performs cache maintenance for the specified power
|
|
* level. The levels of cache affected are determined by the power
|
|
* level which is passed as the argument i.e. level 0 results
|
|
* in a flush of the L1 cache. Both the L1 and L2 caches are flushed
|
|
* for a higher power level.
|
|
*
|
|
* Additionally, this function also ensures that stack memory is correctly
|
|
* flushed out to avoid coherency issues due to a change in its memory
|
|
* attributes after the data cache is disabled.
|
|
* -----------------------------------------------------------------------
|
|
*/
|
|
func psci_do_pwrdown_cache_maintenance
|
|
stp x29, x30, [sp,#-16]!
|
|
stp x19, x20, [sp,#-16]!
|
|
|
|
/* ---------------------------------------------
|
|
* Invoke CPU-specific power down operations for
|
|
* the appropriate level
|
|
* ---------------------------------------------
|
|
*/
|
|
bl prepare_cpu_pwr_dwn
|
|
|
|
/* ---------------------------------------------
|
|
* Do stack maintenance by flushing the used
|
|
* stack to the main memory and invalidating the
|
|
* remainder.
|
|
* ---------------------------------------------
|
|
*/
|
|
bl plat_get_my_stack
|
|
|
|
/* ---------------------------------------------
|
|
* Calculate and store the size of the used
|
|
* stack memory in x1.
|
|
* ---------------------------------------------
|
|
*/
|
|
mov x19, x0
|
|
mov x1, sp
|
|
sub x1, x0, x1
|
|
mov x0, sp
|
|
bl flush_dcache_range
|
|
|
|
/* ---------------------------------------------
|
|
* Calculate and store the size of the unused
|
|
* stack memory in x1. Calculate and store the
|
|
* stack base address in x0.
|
|
* ---------------------------------------------
|
|
*/
|
|
sub x0, x19, #PLATFORM_STACK_SIZE
|
|
sub x1, sp, x0
|
|
bl inv_dcache_range
|
|
|
|
ldp x19, x20, [sp], #16
|
|
ldp x29, x30, [sp], #16
|
|
ret
|
|
endfunc psci_do_pwrdown_cache_maintenance
|
|
|
|
|
|
/* -----------------------------------------------------------------------
|
|
* void psci_do_pwrup_cache_maintenance(void);
|
|
*
|
|
* This function performs cache maintenance after this cpu is powered up.
|
|
* Currently, this involves managing the used stack memory before turning
|
|
* on the data cache.
|
|
* -----------------------------------------------------------------------
|
|
*/
|
|
func psci_do_pwrup_cache_maintenance
|
|
stp x29, x30, [sp,#-16]!
|
|
|
|
/* ---------------------------------------------
|
|
* Ensure any inflight stack writes have made it
|
|
* to main memory.
|
|
* ---------------------------------------------
|
|
*/
|
|
dmb st
|
|
|
|
/* ---------------------------------------------
|
|
* Calculate and store the size of the used
|
|
* stack memory in x1. Calculate and store the
|
|
* stack base address in x0.
|
|
* ---------------------------------------------
|
|
*/
|
|
bl plat_get_my_stack
|
|
mov x1, sp
|
|
sub x1, x0, x1
|
|
mov x0, sp
|
|
bl inv_dcache_range
|
|
|
|
/* ---------------------------------------------
|
|
* Enable the data cache.
|
|
* ---------------------------------------------
|
|
*/
|
|
mrs x0, sctlr_el3
|
|
orr x0, x0, #SCTLR_C_BIT
|
|
msr sctlr_el3, x0
|
|
isb
|
|
|
|
#if PLAT_XLAT_TABLES_DYNAMIC
|
|
/* ---------------------------------------------
|
|
* During warm boot the MMU is enabled with data
|
|
* cache disabled, then the interconnect is set
|
|
* up and finally the data cache is enabled.
|
|
*
|
|
* During this period, if another CPU modifies
|
|
* the translation tables, the MMU table walker
|
|
* may read the old entries. This is only a
|
|
* problem for dynamic regions, the warm boot
|
|
* code isn't affected because it is static.
|
|
*
|
|
* Invalidate all TLB entries loaded while the
|
|
* CPU wasn't coherent with the rest of the
|
|
* system.
|
|
* ---------------------------------------------
|
|
*/
|
|
tlbi alle3
|
|
dsb ish
|
|
isb
|
|
#endif
|
|
|
|
ldp x29, x30, [sp], #16
|
|
ret
|
|
endfunc psci_do_pwrup_cache_maintenance
|
|
|
|
/* -----------------------------------------------------------------------
|
|
* void psci_power_down_wfi(void);
|
|
* This function is called to indicate to the power controller that it
|
|
* is safe to power down this cpu. It should not exit the wfi and will
|
|
* be released from reset upon power up.
|
|
* -----------------------------------------------------------------------
|
|
*/
|
|
func psci_power_down_wfi
|
|
dsb sy // ensure write buffer empty
|
|
wfi
|
|
no_ret plat_panic_handler
|
|
endfunc psci_power_down_wfi
|
|
|
|
/* -----------------------------------------------------------------------
|
|
* void psci_entrypoint(void);
|
|
* The deprecated entry point for PSCI on warm boot for AArch64.
|
|
* -----------------------------------------------------------------------
|
|
*/
|
|
func_deprecated psci_entrypoint
|
|
b bl31_warm_entrypoint
|
|
endfunc_deprecated psci_entrypoint
|