mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-04-16 01:24:27 +00:00

Invalidate the Branch Target Buffer (BTB) on entry to EL3 by disabling and enabling the MMU. To achieve this without performing any branch instruction, a per-cpu vbar is installed which executes the workaround and then branches off to the corresponding vector entry in the main vector table. A side effect of this change is that the main vbar is configured before any reset handling. This is to allow the per-cpu reset function to override the vbar setting. This workaround is enabled by default on the affected CPUs. Change-Id: I97788d38463a5840a410e3cea85ed297a1678265 Signed-off-by: Dimitris Papastamos <dimitris.papastamos@arm.com>
114 lines
3.2 KiB
ArmAsm
114 lines
3.2 KiB
ArmAsm
/*
|
|
* Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#include <arch.h>
|
|
#include <asm_macros.S>
|
|
#include <context.h>
|
|
|
|
.globl workaround_mmu_runtime_exceptions
|
|
|
|
vector_base workaround_mmu_runtime_exceptions
|
|
|
|
.macro apply_workaround
|
|
stp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
|
|
mrs x0, sctlr_el3
|
|
/* Disable MMU */
|
|
bic x1, x0, #SCTLR_M_BIT
|
|
msr sctlr_el3, x1
|
|
isb
|
|
/* Restore MMU config */
|
|
msr sctlr_el3, x0
|
|
isb
|
|
ldp x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
|
|
.endm
|
|
|
|
/* ---------------------------------------------------------------------
|
|
* Current EL with SP_EL0 : 0x0 - 0x200
|
|
* ---------------------------------------------------------------------
|
|
*/
|
|
vector_entry workaround_mmu_sync_exception_sp_el0
|
|
b sync_exception_sp_el0
|
|
check_vector_size workaround_mmu_sync_exception_sp_el0
|
|
|
|
vector_entry workaround_mmu_irq_sp_el0
|
|
b irq_sp_el0
|
|
check_vector_size workaround_mmu_irq_sp_el0
|
|
|
|
vector_entry workaround_mmu_fiq_sp_el0
|
|
b fiq_sp_el0
|
|
check_vector_size workaround_mmu_fiq_sp_el0
|
|
|
|
vector_entry workaround_mmu_serror_sp_el0
|
|
b serror_sp_el0
|
|
check_vector_size workaround_mmu_serror_sp_el0
|
|
|
|
/* ---------------------------------------------------------------------
|
|
* Current EL with SP_ELx: 0x200 - 0x400
|
|
* ---------------------------------------------------------------------
|
|
*/
|
|
vector_entry workaround_mmu_sync_exception_sp_elx
|
|
b sync_exception_sp_elx
|
|
check_vector_size workaround_mmu_sync_exception_sp_elx
|
|
|
|
vector_entry workaround_mmu_irq_sp_elx
|
|
b irq_sp_elx
|
|
check_vector_size workaround_mmu_irq_sp_elx
|
|
|
|
vector_entry workaround_mmu_fiq_sp_elx
|
|
b fiq_sp_elx
|
|
check_vector_size workaround_mmu_fiq_sp_elx
|
|
|
|
vector_entry workaround_mmu_serror_sp_elx
|
|
b serror_sp_elx
|
|
check_vector_size workaround_mmu_serror_sp_elx
|
|
|
|
/* ---------------------------------------------------------------------
|
|
* Lower EL using AArch64 : 0x400 - 0x600
|
|
* ---------------------------------------------------------------------
|
|
*/
|
|
vector_entry workaround_mmu_sync_exception_aarch64
|
|
apply_workaround
|
|
b sync_exception_aarch64
|
|
check_vector_size workaround_mmu_sync_exception_aarch64
|
|
|
|
vector_entry workaround_mmu_irq_aarch64
|
|
apply_workaround
|
|
b irq_aarch64
|
|
check_vector_size workaround_mmu_irq_aarch64
|
|
|
|
vector_entry workaround_mmu_fiq_aarch64
|
|
apply_workaround
|
|
b fiq_aarch64
|
|
check_vector_size workaround_mmu_fiq_aarch64
|
|
|
|
vector_entry workaround_mmu_serror_aarch64
|
|
apply_workaround
|
|
b serror_aarch64
|
|
check_vector_size workaround_mmu_serror_aarch64
|
|
|
|
/* ---------------------------------------------------------------------
|
|
* Lower EL using AArch32 : 0x600 - 0x800
|
|
* ---------------------------------------------------------------------
|
|
*/
|
|
vector_entry workaround_mmu_sync_exception_aarch32
|
|
apply_workaround
|
|
b sync_exception_aarch32
|
|
check_vector_size workaround_mmu_sync_exception_aarch32
|
|
|
|
vector_entry workaround_mmu_irq_aarch32
|
|
apply_workaround
|
|
b irq_aarch32
|
|
check_vector_size workaround_mmu_irq_aarch32
|
|
|
|
vector_entry workaround_mmu_fiq_aarch32
|
|
apply_workaround
|
|
b fiq_aarch32
|
|
check_vector_size workaround_mmu_fiq_aarch32
|
|
|
|
vector_entry workaround_mmu_serror_aarch32
|
|
apply_workaround
|
|
b serror_aarch32
|
|
check_vector_size workaround_mmu_serror_aarch32
|