mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-05-01 08:05:46 +00:00
Initialise CPU contexts from entry_point_info
Consolidate all BL3-1 CPU context initialization for cold boot, PSCI and SPDs into two functions: * The first uses entry_point_info to initialize the relevant cpu_context for first entry into a lower exception level on a CPU * The second populates the EL1 and EL2 system registers as needed from the cpu_context to ensure correct entry into the lower EL This patch alters the way that BL3-1 determines which exception level is used when first entering EL1 or EL2 during cold boot - this is now fully determined by the SPSR value in the entry_point_info for BL3-3, as set up by the platform code in BL2 (or otherwise provided to BL3-1). In the situation that EL1 (or svc mode) is selected for a processor that supports EL2, the context management code will now configure all essential EL2 register state to ensure correct execution of EL1. This allows the platform code to run non-secure EL1 payloads directly without requiring a small EL2 stub or OS loader. Change-Id: If9fbb2417e82d2226e47568203d5a369f39d3b0f
This commit is contained in:
parent
5298f2cb98
commit
167a935733
17 changed files with 304 additions and 340 deletions
|
@ -46,11 +46,10 @@ void bl1_arch_setup(void)
|
||||||
isb();
|
isb();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable HVCs, route FIQs to EL3, set the next EL to be AArch64, route
|
* Set the next EL to be AArch64, route external abort and SError
|
||||||
* external abort and SError interrupts to EL3
|
* interrupts to EL3
|
||||||
*/
|
*/
|
||||||
tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_HCE_BIT | SCR_EA_BIT |
|
tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_EA_BIT;
|
||||||
SCR_FIQ_BIT;
|
|
||||||
write_scr(tmp_reg);
|
write_scr(tmp_reg);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -51,11 +51,11 @@ void bl31_arch_setup(void)
|
||||||
write_sctlr_el3(tmp_reg);
|
write_sctlr_el3(tmp_reg);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Enable HVCs, route FIQs to EL3, set the next EL to be AArch64, route
|
* Route external abort and SError interrupts to EL3
|
||||||
* external abort and SError interrupts to EL3
|
* other SCR bits will be configured before exiting to a lower exception
|
||||||
|
* level
|
||||||
*/
|
*/
|
||||||
tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_HCE_BIT | SCR_EA_BIT |
|
tmp_reg = SCR_RES1_BITS | SCR_EA_BIT;
|
||||||
SCR_FIQ_BIT;
|
|
||||||
write_scr(tmp_reg);
|
write_scr(tmp_reg);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -68,39 +68,3 @@ void bl31_arch_setup(void)
|
||||||
counter_freq = plat_get_syscnt_freq();
|
counter_freq = plat_get_syscnt_freq();
|
||||||
write_cntfrq_el0(counter_freq);
|
write_cntfrq_el0(counter_freq);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*******************************************************************************
|
|
||||||
* Detect what the security state of the next EL is and setup the minimum
|
|
||||||
* required architectural state: program SCTRL to reflect the RES1 bits, and to
|
|
||||||
* have MMU and caches disabled
|
|
||||||
******************************************************************************/
|
|
||||||
void bl31_next_el_arch_setup(uint32_t security_state)
|
|
||||||
{
|
|
||||||
unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1();
|
|
||||||
unsigned long next_sctlr;
|
|
||||||
unsigned long el_status;
|
|
||||||
unsigned long scr = read_scr();
|
|
||||||
|
|
||||||
/* Use the same endianness than the current BL */
|
|
||||||
next_sctlr = (read_sctlr_el3() & SCTLR_EE_BIT);
|
|
||||||
|
|
||||||
/* Find out which EL we are going to */
|
|
||||||
el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) & ID_AA64PFR0_ELX_MASK;
|
|
||||||
|
|
||||||
if (security_state == NON_SECURE) {
|
|
||||||
/* Check if EL2 is supported */
|
|
||||||
if (el_status && (scr & SCR_HCE_BIT)) {
|
|
||||||
/* Set SCTLR EL2 */
|
|
||||||
next_sctlr |= SCTLR_EL2_RES1;
|
|
||||||
write_sctlr_el2(next_sctlr);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* SCTLR_EL1 needs the same programming irrespective of the
|
|
||||||
* security state of EL1.
|
|
||||||
*/
|
|
||||||
next_sctlr |= SCTLR_EL1_RES1;
|
|
||||||
write_sctlr_el1(next_sctlr);
|
|
||||||
}
|
|
||||||
|
|
|
@ -43,9 +43,8 @@
|
||||||
.global el3_sysregs_context_save
|
.global el3_sysregs_context_save
|
||||||
func el3_sysregs_context_save
|
func el3_sysregs_context_save
|
||||||
|
|
||||||
mrs x9, scr_el3
|
|
||||||
mrs x10, sctlr_el3
|
mrs x10, sctlr_el3
|
||||||
stp x9, x10, [x0, #CTX_SCR_EL3]
|
str x10, [x0, #CTX_SCTLR_EL3]
|
||||||
|
|
||||||
mrs x11, cptr_el3
|
mrs x11, cptr_el3
|
||||||
stp x11, xzr, [x0, #CTX_CPTR_EL3]
|
stp x11, xzr, [x0, #CTX_CPTR_EL3]
|
||||||
|
@ -98,8 +97,7 @@ func el3_sysregs_context_restore
|
||||||
/* Make sure all the above changes are observed */
|
/* Make sure all the above changes are observed */
|
||||||
isb
|
isb
|
||||||
|
|
||||||
ldp x9, x10, [x0, #CTX_SCR_EL3]
|
ldr x10, [x0, #CTX_SCTLR_EL3]
|
||||||
msr scr_el3, x9
|
|
||||||
msr sctlr_el3, x10
|
msr sctlr_el3, x10
|
||||||
isb
|
isb
|
||||||
|
|
||||||
|
|
|
@ -140,53 +140,18 @@ uint32_t bl31_get_next_image_type(void)
|
||||||
void bl31_prepare_next_image_entry()
|
void bl31_prepare_next_image_entry()
|
||||||
{
|
{
|
||||||
entry_point_info_t *next_image_info;
|
entry_point_info_t *next_image_info;
|
||||||
uint32_t scr, image_type;
|
uint32_t image_type;
|
||||||
cpu_context_t *ctx;
|
|
||||||
gp_regs_t *gp_regs;
|
|
||||||
|
|
||||||
/* Determine which image to execute next */
|
/* Determine which image to execute next */
|
||||||
image_type = bl31_get_next_image_type();
|
image_type = bl31_get_next_image_type();
|
||||||
|
|
||||||
/*
|
|
||||||
* Setup minimal architectural state of the next highest EL to
|
|
||||||
* allow execution in it immediately upon entering it.
|
|
||||||
*/
|
|
||||||
bl31_next_el_arch_setup(image_type);
|
|
||||||
|
|
||||||
/* Program EL3 registers to enable entry into the next EL */
|
/* Program EL3 registers to enable entry into the next EL */
|
||||||
next_image_info = bl31_plat_get_next_image_ep_info(image_type);
|
next_image_info = bl31_plat_get_next_image_ep_info(image_type);
|
||||||
assert(next_image_info);
|
assert(next_image_info);
|
||||||
assert(image_type == GET_SECURITY_STATE(next_image_info->h.attr));
|
assert(image_type == GET_SECURITY_STATE(next_image_info->h.attr));
|
||||||
|
|
||||||
scr = read_scr();
|
cm_init_context(read_mpidr_el1(), next_image_info);
|
||||||
scr &= ~SCR_NS_BIT;
|
cm_prepare_el3_exit(image_type);
|
||||||
if (image_type == NON_SECURE)
|
|
||||||
scr |= SCR_NS_BIT;
|
|
||||||
|
|
||||||
scr &= ~SCR_RW_BIT;
|
|
||||||
if ((next_image_info->spsr & (1 << MODE_RW_SHIFT)) ==
|
|
||||||
(MODE_RW_64 << MODE_RW_SHIFT))
|
|
||||||
scr |= SCR_RW_BIT;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Tell the context mgmt. library to ensure that SP_EL3 points to
|
|
||||||
* the right context to exit from EL3 correctly.
|
|
||||||
*/
|
|
||||||
cm_set_el3_eret_context(image_type,
|
|
||||||
next_image_info->pc,
|
|
||||||
next_image_info->spsr,
|
|
||||||
scr);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Save the args generated in BL2 for the image in the right context
|
|
||||||
* used on its entry
|
|
||||||
*/
|
|
||||||
ctx = cm_get_context(image_type);
|
|
||||||
gp_regs = get_gpregs_ctx(ctx);
|
|
||||||
memcpy(gp_regs, (void *)&next_image_info->args, sizeof(aapcs64_params_t));
|
|
||||||
|
|
||||||
/* Finally set the next context */
|
|
||||||
cm_set_next_eret_context(image_type);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
|
|
|
@ -40,6 +40,7 @@
|
||||||
#include <platform.h>
|
#include <platform.h>
|
||||||
#include <platform_def.h>
|
#include <platform_def.h>
|
||||||
#include <runtime_svc.h>
|
#include <runtime_svc.h>
|
||||||
|
#include <string.h>
|
||||||
|
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
|
@ -86,6 +87,177 @@ void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_st
|
||||||
set_cpu_data_by_mpidr(mpidr, cpu_context[security_state], context);
|
set_cpu_data_by_mpidr(mpidr, cpu_context[security_state], context);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*******************************************************************************
|
||||||
|
* This function is used to program the context that's used for exception
|
||||||
|
* return. This initializes the SP_EL3 to a pointer to a 'cpu_context' set for
|
||||||
|
* the required security state
|
||||||
|
******************************************************************************/
|
||||||
|
static inline void cm_set_next_context(void *context)
|
||||||
|
{
|
||||||
|
#if DEBUG
|
||||||
|
uint64_t sp_mode;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check that this function is called with SP_EL0 as the stack
|
||||||
|
* pointer
|
||||||
|
*/
|
||||||
|
__asm__ volatile("mrs %0, SPSel\n"
|
||||||
|
: "=r" (sp_mode));
|
||||||
|
|
||||||
|
assert(sp_mode == MODE_SP_EL0);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
__asm__ volatile("msr spsel, #1\n"
|
||||||
|
"mov sp, %0\n"
|
||||||
|
"msr spsel, #0\n"
|
||||||
|
: : "r" (context));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*******************************************************************************
|
||||||
|
* The following function initializes a cpu_context for the current CPU for
|
||||||
|
* first use, and sets the initial entrypoint state as specified by the
|
||||||
|
* entry_point_info structure.
|
||||||
|
*
|
||||||
|
* The security state to initialize is determined by the SECURE attribute
|
||||||
|
* of the entry_point_info. The function returns a pointer to the initialized
|
||||||
|
* context and sets this as the next context to return to.
|
||||||
|
*
|
||||||
|
* The EE and ST attributes are used to configure the endianess and secure
|
||||||
|
* timer availability for the new excution context.
|
||||||
|
*
|
||||||
|
* To prepare the register state for entry call cm_prepare_el3_exit() and
|
||||||
|
* el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
|
||||||
|
* cm_e1_sysreg_context_restore().
|
||||||
|
******************************************************************************/
|
||||||
|
void cm_init_context(uint64_t mpidr, const entry_point_info_t *ep)
|
||||||
|
{
|
||||||
|
uint32_t security_state;
|
||||||
|
cpu_context_t *ctx;
|
||||||
|
uint32_t scr_el3;
|
||||||
|
el3_state_t *state;
|
||||||
|
gp_regs_t *gp_regs;
|
||||||
|
unsigned long sctlr_elx;
|
||||||
|
|
||||||
|
security_state = GET_SECURITY_STATE(ep->h.attr);
|
||||||
|
ctx = cm_get_context_by_mpidr(mpidr, security_state);
|
||||||
|
assert(ctx);
|
||||||
|
|
||||||
|
/* Clear any residual register values from the context */
|
||||||
|
memset(ctx, 0, sizeof(*ctx));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Base the context SCR on the current value, adjust for entry point
|
||||||
|
* specific requirements and set trap bits from the IMF
|
||||||
|
* TODO: provide the base/global SCR bits using another mechanism?
|
||||||
|
*/
|
||||||
|
scr_el3 = read_scr();
|
||||||
|
scr_el3 &= ~(SCR_NS_BIT | SCR_RW_BIT | SCR_FIQ_BIT | SCR_IRQ_BIT |
|
||||||
|
SCR_ST_BIT | SCR_HCE_BIT);
|
||||||
|
|
||||||
|
if (security_state != SECURE)
|
||||||
|
scr_el3 |= SCR_NS_BIT;
|
||||||
|
|
||||||
|
if (GET_RW(ep->spsr) == MODE_RW_64)
|
||||||
|
scr_el3 |= SCR_RW_BIT;
|
||||||
|
|
||||||
|
if (EP_GET_ST(ep->h.attr))
|
||||||
|
scr_el3 |= SCR_ST_BIT;
|
||||||
|
|
||||||
|
scr_el3 |= get_scr_el3_from_routing_model(security_state);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set up SCTLR_ELx for the target exception level:
|
||||||
|
* EE bit is taken from the entrpoint attributes
|
||||||
|
* M, C and I bits must be zero (as required by PSCI specification)
|
||||||
|
*
|
||||||
|
* The target exception level is based on the spsr mode requested.
|
||||||
|
* If execution is requested to EL2 or hyp mode, HVC is enabled
|
||||||
|
* via SCR_EL3.HCE.
|
||||||
|
*
|
||||||
|
* Always compute the SCTLR_EL1 value and save in the cpu_context
|
||||||
|
* - the EL2 registers are set up by cm_preapre_ns_entry() as they
|
||||||
|
* are not part of the stored cpu_context
|
||||||
|
*
|
||||||
|
* TODO: In debug builds the spsr should be validated and checked
|
||||||
|
* against the CPU support, security state, endianess and pc
|
||||||
|
*/
|
||||||
|
sctlr_elx = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0;
|
||||||
|
sctlr_elx |= SCTLR_EL1_RES1;
|
||||||
|
write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_elx);
|
||||||
|
|
||||||
|
if ((GET_RW(ep->spsr) == MODE_RW_64
|
||||||
|
&& GET_EL(ep->spsr) == MODE_EL2)
|
||||||
|
|| (GET_RW(ep->spsr) != MODE_RW_64
|
||||||
|
&& GET_M32(ep->spsr) == MODE32_hyp)) {
|
||||||
|
scr_el3 |= SCR_HCE_BIT;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Populate EL3 state so that we've the right context before doing ERET */
|
||||||
|
state = get_el3state_ctx(ctx);
|
||||||
|
write_ctx_reg(state, CTX_SCR_EL3, scr_el3);
|
||||||
|
write_ctx_reg(state, CTX_ELR_EL3, ep->pc);
|
||||||
|
write_ctx_reg(state, CTX_SPSR_EL3, ep->spsr);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Store the X0-X7 value from the entrypoint into the context
|
||||||
|
* Use memcpy as we are in control of the layout of the structures
|
||||||
|
*/
|
||||||
|
gp_regs = get_gpregs_ctx(ctx);
|
||||||
|
memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
|
||||||
|
}
|
||||||
|
|
||||||
|
/*******************************************************************************
|
||||||
|
* Prepare the CPU system registers for first entry into secure or normal world
|
||||||
|
*
|
||||||
|
* If execution is requested to EL2 or hyp mode, SCTLR_EL2 is initialized
|
||||||
|
* If execution is requested to non-secure EL1 or svc mode, and the CPU supports
|
||||||
|
* EL2 then EL2 is disabled by configuring all necessary EL2 registers.
|
||||||
|
* For all entries, the EL1 registers are initialized from the cpu_context
|
||||||
|
******************************************************************************/
|
||||||
|
void cm_prepare_el3_exit(uint32_t security_state)
|
||||||
|
{
|
||||||
|
uint32_t sctlr_elx, scr_el3, cptr_el2;
|
||||||
|
cpu_context_t *ctx = cm_get_context(security_state);
|
||||||
|
|
||||||
|
assert(ctx);
|
||||||
|
|
||||||
|
if (security_state == NON_SECURE) {
|
||||||
|
scr_el3 = read_ctx_reg(get_el3state_ctx(ctx), CTX_SCR_EL3);
|
||||||
|
if (scr_el3 & SCR_HCE_BIT) {
|
||||||
|
/* Use SCTLR_EL1.EE value to initialise sctlr_el2 */
|
||||||
|
sctlr_elx = read_ctx_reg(get_sysregs_ctx(ctx),
|
||||||
|
CTX_SCTLR_EL1);
|
||||||
|
sctlr_elx &= ~SCTLR_EE_BIT;
|
||||||
|
sctlr_elx |= SCTLR_EL2_RES1;
|
||||||
|
write_sctlr_el2(sctlr_elx);
|
||||||
|
} else if (read_id_aa64pfr0_el1() &
|
||||||
|
(ID_AA64PFR0_ELX_MASK << ID_AA64PFR0_EL2_SHIFT)) {
|
||||||
|
/* EL2 present but unused, need to disable safely */
|
||||||
|
|
||||||
|
/* HCR_EL2 = 0, except RW bit set to match SCR_EL3 */
|
||||||
|
write_hcr_el2((scr_el3 & SCR_RW_BIT) ? HCR_RW_BIT : 0);
|
||||||
|
|
||||||
|
/* SCTLR_EL2 : can be ignored when bypassing */
|
||||||
|
|
||||||
|
/* CPTR_EL2 : disable all traps TCPAC, TTA, TFP */
|
||||||
|
cptr_el2 = read_cptr_el2();
|
||||||
|
cptr_el2 &= ~(TCPAC_BIT | TTA_BIT | TFP_BIT);
|
||||||
|
write_cptr_el2(cptr_el2);
|
||||||
|
|
||||||
|
/* Enable EL1 access to timer */
|
||||||
|
write_cnthctl_el2(EL1PCEN_BIT | EL1PCTEN_BIT);
|
||||||
|
|
||||||
|
/* Set VPIDR, VMPIDR to match MIDR, MPIDR */
|
||||||
|
write_vpidr_el2(read_midr_el1());
|
||||||
|
write_vmpidr_el2(read_mpidr_el1());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
el1_sysregs_context_restore(get_sysregs_ctx(ctx));
|
||||||
|
|
||||||
|
cm_set_next_context(ctx);
|
||||||
|
}
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* The next four functions are used by runtime services to save and restore EL3
|
* The next four functions are used by runtime services to save and restore EL3
|
||||||
* and EL1 contexts on the 'cpu_context' structure for the specified security
|
* and EL1 contexts on the 'cpu_context' structure for the specified security
|
||||||
|
@ -131,33 +303,6 @@ void cm_el1_sysregs_context_restore(uint32_t security_state)
|
||||||
el1_sysregs_context_restore(get_sysregs_ctx(ctx));
|
el1_sysregs_context_restore(get_sysregs_ctx(ctx));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*******************************************************************************
|
|
||||||
* This function populates 'cpu_context' pertaining to the given security state
|
|
||||||
* with the entrypoint, SPSR and SCR values so that an ERET from this security
|
|
||||||
* state correctly restores corresponding values to drop the CPU to the next
|
|
||||||
* exception level
|
|
||||||
******************************************************************************/
|
|
||||||
void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint,
|
|
||||||
uint32_t spsr, uint32_t scr)
|
|
||||||
{
|
|
||||||
cpu_context_t *ctx;
|
|
||||||
el3_state_t *state;
|
|
||||||
|
|
||||||
ctx = cm_get_context(security_state);
|
|
||||||
assert(ctx);
|
|
||||||
|
|
||||||
/* Program the interrupt routing model for this security state */
|
|
||||||
scr &= ~SCR_FIQ_BIT;
|
|
||||||
scr &= ~SCR_IRQ_BIT;
|
|
||||||
scr |= get_scr_el3_from_routing_model(security_state);
|
|
||||||
|
|
||||||
/* Populate EL3 state so that we've the right context before doing ERET */
|
|
||||||
state = get_el3state_ctx(ctx);
|
|
||||||
write_ctx_reg(state, CTX_SPSR_EL3, spsr);
|
|
||||||
write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
|
|
||||||
write_ctx_reg(state, CTX_SCR_EL3, scr);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* This function populates ELR_EL3 member of 'cpu_context' pertaining to the
|
* This function populates ELR_EL3 member of 'cpu_context' pertaining to the
|
||||||
* given security state with the given entrypoint
|
* given security state with the given entrypoint
|
||||||
|
@ -175,6 +320,25 @@ void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint)
|
||||||
write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
|
write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*******************************************************************************
|
||||||
|
* This function populates ELR_EL3 and SPSR_EL3 members of 'cpu_context'
|
||||||
|
* pertaining to the given security state
|
||||||
|
******************************************************************************/
|
||||||
|
void cm_set_elr_spsr_el3(uint32_t security_state,
|
||||||
|
uint64_t entrypoint, uint32_t spsr)
|
||||||
|
{
|
||||||
|
cpu_context_t *ctx;
|
||||||
|
el3_state_t *state;
|
||||||
|
|
||||||
|
ctx = cm_get_context(security_state);
|
||||||
|
assert(ctx);
|
||||||
|
|
||||||
|
/* Populate EL3 state so that ERET jumps to the correct entry */
|
||||||
|
state = get_el3state_ctx(ctx);
|
||||||
|
write_ctx_reg(state, CTX_ELR_EL3, entrypoint);
|
||||||
|
write_ctx_reg(state, CTX_SPSR_EL3, spsr);
|
||||||
|
}
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
|
* This function updates a single bit in the SCR_EL3 member of the 'cpu_context'
|
||||||
* pertaining to the given security state using the value and bit position
|
* pertaining to the given security state using the value and bit position
|
||||||
|
@ -233,26 +397,9 @@ uint32_t cm_get_scr_el3(uint32_t security_state)
|
||||||
void cm_set_next_eret_context(uint32_t security_state)
|
void cm_set_next_eret_context(uint32_t security_state)
|
||||||
{
|
{
|
||||||
cpu_context_t *ctx;
|
cpu_context_t *ctx;
|
||||||
#if DEBUG
|
|
||||||
uint64_t sp_mode;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
ctx = cm_get_context(security_state);
|
ctx = cm_get_context(security_state);
|
||||||
assert(ctx);
|
assert(ctx);
|
||||||
|
|
||||||
#if DEBUG
|
cm_set_next_context(ctx);
|
||||||
/*
|
|
||||||
* Check that this function is called with SP_EL0 as the stack
|
|
||||||
* pointer
|
|
||||||
*/
|
|
||||||
__asm__ volatile("mrs %0, SPSel\n"
|
|
||||||
: "=r" (sp_mode));
|
|
||||||
|
|
||||||
assert(sp_mode == MODE_SP_EL0);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
__asm__ volatile("msr spsel, #1\n"
|
|
||||||
"mov sp, %0\n"
|
|
||||||
"msr spsel, #0\n"
|
|
||||||
: : "r" (ctx));
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,6 +34,11 @@
|
||||||
#include <cpu_data.h>
|
#include <cpu_data.h>
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
/*******************************************************************************
|
||||||
|
* Forward declarations
|
||||||
|
******************************************************************************/
|
||||||
|
struct entry_point_info;
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Function & variable prototypes
|
* Function & variable prototypes
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
|
@ -45,12 +50,14 @@ void cm_set_context_by_mpidr(uint64_t mpidr,
|
||||||
uint32_t security_state);
|
uint32_t security_state);
|
||||||
static inline void cm_set_context(void *context, uint32_t security_state);
|
static inline void cm_set_context(void *context, uint32_t security_state);
|
||||||
void cm_el3_sysregs_context_save(uint32_t security_state);
|
void cm_el3_sysregs_context_save(uint32_t security_state);
|
||||||
|
void cm_init_context(uint64_t mpidr, const struct entry_point_info *ep);
|
||||||
|
void cm_prepare_el3_exit(uint32_t security_state);
|
||||||
void cm_el3_sysregs_context_restore(uint32_t security_state);
|
void cm_el3_sysregs_context_restore(uint32_t security_state);
|
||||||
void cm_el1_sysregs_context_save(uint32_t security_state);
|
void cm_el1_sysregs_context_save(uint32_t security_state);
|
||||||
void cm_el1_sysregs_context_restore(uint32_t security_state);
|
void cm_el1_sysregs_context_restore(uint32_t security_state);
|
||||||
void cm_set_el3_eret_context(uint32_t security_state, uint64_t entrypoint,
|
|
||||||
uint32_t spsr, uint32_t scr);
|
|
||||||
void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint);
|
void cm_set_elr_el3(uint32_t security_state, uint64_t entrypoint);
|
||||||
|
void cm_set_elr_spsr_el3(uint32_t security_state,
|
||||||
|
uint64_t entrypoint, uint32_t spsr);
|
||||||
void cm_write_scr_el3_bit(uint32_t security_state,
|
void cm_write_scr_el3_bit(uint32_t security_state,
|
||||||
uint32_t bit_pos,
|
uint32_t bit_pos,
|
||||||
uint32_t value);
|
uint32_t value);
|
||||||
|
|
|
@ -33,7 +33,6 @@
|
||||||
|
|
||||||
#define SECURE 0x0
|
#define SECURE 0x0
|
||||||
#define NON_SECURE 0x1
|
#define NON_SECURE 0x1
|
||||||
#define PARAM_EP_SECURITY_MASK 0x1
|
|
||||||
|
|
||||||
#define UP 1
|
#define UP 1
|
||||||
#define DOWN 0
|
#define DOWN 0
|
||||||
|
@ -64,10 +63,23 @@
|
||||||
#define ENTRY_POINT_INFO_PC_OFFSET 0x08
|
#define ENTRY_POINT_INFO_PC_OFFSET 0x08
|
||||||
#define ENTRY_POINT_INFO_ARGS_OFFSET 0x18
|
#define ENTRY_POINT_INFO_ARGS_OFFSET 0x18
|
||||||
|
|
||||||
|
#define PARAM_EP_SECURITY_MASK 0x1
|
||||||
#define GET_SECURITY_STATE(x) (x & PARAM_EP_SECURITY_MASK)
|
#define GET_SECURITY_STATE(x) (x & PARAM_EP_SECURITY_MASK)
|
||||||
#define SET_SECURITY_STATE(x, security) \
|
#define SET_SECURITY_STATE(x, security) \
|
||||||
((x) = ((x) & ~PARAM_EP_SECURITY_MASK) | (security))
|
((x) = ((x) & ~PARAM_EP_SECURITY_MASK) | (security))
|
||||||
|
|
||||||
|
#define EP_EE_MASK 0x2
|
||||||
|
#define EP_EE_LITTLE 0x0
|
||||||
|
#define EP_EE_BIG 0x2
|
||||||
|
#define EP_GET_EE(x) (x & EP_EE_MASK)
|
||||||
|
#define EP_SET_EE(x, ee) ((x) = ((x) & ~EP_EE_MASK) | (ee))
|
||||||
|
|
||||||
|
#define EP_ST_MASK 0x4
|
||||||
|
#define EP_ST_DISABLE 0x0
|
||||||
|
#define EP_ST_ENABLE 0x4
|
||||||
|
#define EP_GET_ST(x) (x & EP_ST_MASK)
|
||||||
|
#define EP_SET_ST(x, ee) ((x) = ((x) & ~EP_ST_MASK) | (ee))
|
||||||
|
|
||||||
#define PARAM_EP 0x01
|
#define PARAM_EP 0x01
|
||||||
#define PARAM_IMAGE_BINARY 0x02
|
#define PARAM_IMAGE_BINARY 0x02
|
||||||
#define PARAM_BL31 0x03
|
#define PARAM_BL31 0x03
|
||||||
|
|
|
@ -167,6 +167,7 @@
|
||||||
#define HCR_FMO_BIT (1 << 3)
|
#define HCR_FMO_BIT (1 << 3)
|
||||||
|
|
||||||
/* CNTHCTL_EL2 definitions */
|
/* CNTHCTL_EL2 definitions */
|
||||||
|
#define EVNTEN_BIT (1 << 2)
|
||||||
#define EL1PCEN_BIT (1 << 1)
|
#define EL1PCEN_BIT (1 << 1)
|
||||||
#define EL1PCTEN_BIT (1 << 0)
|
#define EL1PCTEN_BIT (1 << 0)
|
||||||
|
|
||||||
|
|
|
@ -262,6 +262,9 @@ DEFINE_SYSREG_RW_FUNCS(cnthctl_el2)
|
||||||
|
|
||||||
DEFINE_SYSREG_RW_FUNCS(tpidr_el3)
|
DEFINE_SYSREG_RW_FUNCS(tpidr_el3)
|
||||||
|
|
||||||
|
DEFINE_SYSREG_RW_FUNCS(vpidr_el2)
|
||||||
|
DEFINE_SYSREG_RW_FUNCS(vmpidr_el2)
|
||||||
|
|
||||||
/* Implementation specific registers */
|
/* Implementation specific registers */
|
||||||
|
|
||||||
DEFINE_RENAME_SYSREG_RW_FUNCS(cpuectlr_el1, CPUECTLR_EL1)
|
DEFINE_RENAME_SYSREG_RW_FUNCS(cpuectlr_el1, CPUECTLR_EL1)
|
||||||
|
|
|
@ -45,9 +45,8 @@ int32_t tspd_init_secure_context(uint64_t entrypoint,
|
||||||
uint64_t mpidr,
|
uint64_t mpidr,
|
||||||
tsp_context_t *tsp_ctx)
|
tsp_context_t *tsp_ctx)
|
||||||
{
|
{
|
||||||
uint32_t scr, sctlr;
|
entry_point_info_t ep;
|
||||||
el1_sys_regs_t *el1_state;
|
uint32_t ep_attr;
|
||||||
uint32_t spsr;
|
|
||||||
|
|
||||||
/* Passing a NULL context is a critical programming error */
|
/* Passing a NULL context is a critical programming error */
|
||||||
assert(tsp_ctx);
|
assert(tsp_ctx);
|
||||||
|
@ -58,51 +57,24 @@ int32_t tspd_init_secure_context(uint64_t entrypoint,
|
||||||
*/
|
*/
|
||||||
assert(rw == TSP_AARCH64);
|
assert(rw == TSP_AARCH64);
|
||||||
|
|
||||||
/*
|
|
||||||
* This might look redundant if the context was statically
|
|
||||||
* allocated but this function cannot make that assumption.
|
|
||||||
*/
|
|
||||||
memset(tsp_ctx, 0, sizeof(*tsp_ctx));
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Set the right security state, register width and enable access to
|
|
||||||
* the secure physical timer for the SP.
|
|
||||||
*/
|
|
||||||
scr = read_scr();
|
|
||||||
scr &= ~SCR_NS_BIT;
|
|
||||||
scr &= ~SCR_RW_BIT;
|
|
||||||
scr |= SCR_ST_BIT;
|
|
||||||
if (rw == TSP_AARCH64)
|
|
||||||
scr |= SCR_RW_BIT;
|
|
||||||
|
|
||||||
/* Get a pointer to the S-EL1 context memory */
|
|
||||||
el1_state = get_sysregs_ctx(&tsp_ctx->cpu_ctx);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Program the SCTLR_EL1 such that upon entry in S-EL1, caches and MMU are
|
|
||||||
* disabled and exception endianess is set to be the same as EL3
|
|
||||||
*/
|
|
||||||
sctlr = read_sctlr_el3();
|
|
||||||
sctlr &= SCTLR_EE_BIT;
|
|
||||||
sctlr |= SCTLR_EL1_RES1;
|
|
||||||
write_ctx_reg(el1_state, CTX_SCTLR_EL1, sctlr);
|
|
||||||
|
|
||||||
/* Set this context as ready to be initialised i.e OFF */
|
|
||||||
set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This context has not been used yet. It will become valid
|
|
||||||
* when the TSP is interrupted and wants the TSPD to preserve
|
|
||||||
* the context.
|
|
||||||
*/
|
|
||||||
clr_std_smc_active_flag(tsp_ctx->state);
|
|
||||||
|
|
||||||
/* Associate this context with the cpu specified */
|
/* Associate this context with the cpu specified */
|
||||||
tsp_ctx->mpidr = mpidr;
|
tsp_ctx->mpidr = mpidr;
|
||||||
|
tsp_ctx->state = 0;
|
||||||
|
set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);
|
||||||
|
clr_std_smc_active_flag(tsp_ctx->state);
|
||||||
|
|
||||||
cm_set_context(&tsp_ctx->cpu_ctx, SECURE);
|
cm_set_context_by_mpidr(mpidr, &tsp_ctx->cpu_ctx, SECURE);
|
||||||
spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
|
|
||||||
cm_set_el3_eret_context(SECURE, entrypoint, spsr, scr);
|
/* initialise an entrypoint to set up the CPU context */
|
||||||
|
ep_attr = SECURE | EP_ST_ENABLE;
|
||||||
|
if (read_sctlr_el3() & SCTLR_EE_BIT)
|
||||||
|
ep_attr |= EP_EE_BIG;
|
||||||
|
SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1, ep_attr);
|
||||||
|
ep.pc = entrypoint;
|
||||||
|
ep.spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
|
||||||
|
memset(&ep.args, 0, sizeof(ep.args));
|
||||||
|
|
||||||
|
cm_init_context(mpidr, &ep);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -122,13 +122,9 @@ static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
|
||||||
CTX_ELR_EL3);
|
CTX_ELR_EL3);
|
||||||
}
|
}
|
||||||
|
|
||||||
SMC_SET_EL3(&tsp_ctx->cpu_ctx,
|
|
||||||
CTX_SPSR_EL3,
|
|
||||||
SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
|
|
||||||
SMC_SET_EL3(&tsp_ctx->cpu_ctx,
|
|
||||||
CTX_ELR_EL3,
|
|
||||||
(uint64_t) &tsp_vectors->fiq_entry);
|
|
||||||
cm_el1_sysregs_context_restore(SECURE);
|
cm_el1_sysregs_context_restore(SECURE);
|
||||||
|
cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->fiq_entry,
|
||||||
|
SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS));
|
||||||
cm_set_next_eret_context(SECURE);
|
cm_set_next_eret_context(SECURE);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -42,8 +42,8 @@ typedef int (*afflvl_off_handler_t)(unsigned long, aff_map_node_t *);
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
static int psci_afflvl0_off(unsigned long mpidr, aff_map_node_t *cpu_node)
|
static int psci_afflvl0_off(unsigned long mpidr, aff_map_node_t *cpu_node)
|
||||||
{
|
{
|
||||||
unsigned int index, plat_state;
|
unsigned int plat_state;
|
||||||
int rc = PSCI_E_SUCCESS;
|
int rc;
|
||||||
unsigned long sctlr;
|
unsigned long sctlr;
|
||||||
|
|
||||||
assert(cpu_node->level == MPIDR_AFFLVL0);
|
assert(cpu_node->level == MPIDR_AFFLVL0);
|
||||||
|
@ -67,9 +67,6 @@ static int psci_afflvl0_off(unsigned long mpidr, aff_map_node_t *cpu_node)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
index = cpu_node->data;
|
|
||||||
memset(&psci_ns_entry_info[index], 0, sizeof(psci_ns_entry_info[index]));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Arch. management. Perform the necessary steps to flush all
|
* Arch. management. Perform the necessary steps to flush all
|
||||||
* cpu caches.
|
* cpu caches.
|
||||||
|
@ -96,6 +93,7 @@ static int psci_afflvl0_off(unsigned long mpidr, aff_map_node_t *cpu_node)
|
||||||
* Plat. management: Perform platform specific actions to turn this
|
* Plat. management: Perform platform specific actions to turn this
|
||||||
* cpu off e.g. exit cpu coherency, program the power controller etc.
|
* cpu off e.g. exit cpu coherency, program the power controller etc.
|
||||||
*/
|
*/
|
||||||
|
rc = PSCI_E_SUCCESS;
|
||||||
if (psci_plat_pm_ops->affinst_off) {
|
if (psci_plat_pm_ops->affinst_off) {
|
||||||
|
|
||||||
/* Get the current physical state of this cpu */
|
/* Get the current physical state of this cpu */
|
||||||
|
|
|
@ -75,8 +75,10 @@ static int psci_afflvl0_on(unsigned long target_cpu,
|
||||||
unsigned long ns_entrypoint,
|
unsigned long ns_entrypoint,
|
||||||
unsigned long context_id)
|
unsigned long context_id)
|
||||||
{
|
{
|
||||||
unsigned int index, plat_state;
|
unsigned int plat_state;
|
||||||
unsigned long psci_entrypoint;
|
unsigned long psci_entrypoint;
|
||||||
|
uint32_t ns_scr_el3 = read_scr_el3();
|
||||||
|
uint32_t ns_sctlr_el1 = read_sctlr_el1();
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
/* Sanity check to safeguard against data corruption */
|
/* Sanity check to safeguard against data corruption */
|
||||||
|
@ -103,8 +105,8 @@ static int psci_afflvl0_on(unsigned long target_cpu,
|
||||||
* the non-secure world from the non-secure state from
|
* the non-secure world from the non-secure state from
|
||||||
* where this call originated.
|
* where this call originated.
|
||||||
*/
|
*/
|
||||||
index = cpu_node->data;
|
rc = psci_save_ns_entry(target_cpu, ns_entrypoint, context_id,
|
||||||
rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id);
|
ns_scr_el3, ns_sctlr_el1);
|
||||||
if (rc != PSCI_E_SUCCESS)
|
if (rc != PSCI_E_SUCCESS)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
@ -336,7 +338,7 @@ int psci_afflvl_on(unsigned long target_cpu,
|
||||||
static unsigned int psci_afflvl0_on_finish(unsigned long mpidr,
|
static unsigned int psci_afflvl0_on_finish(unsigned long mpidr,
|
||||||
aff_map_node_t *cpu_node)
|
aff_map_node_t *cpu_node)
|
||||||
{
|
{
|
||||||
unsigned int index, plat_state, state, rc = PSCI_E_SUCCESS;
|
unsigned int plat_state, state, rc;
|
||||||
|
|
||||||
assert(cpu_node->level == MPIDR_AFFLVL0);
|
assert(cpu_node->level == MPIDR_AFFLVL0);
|
||||||
|
|
||||||
|
@ -383,11 +385,9 @@ static unsigned int psci_afflvl0_on_finish(unsigned long mpidr,
|
||||||
/*
|
/*
|
||||||
* Generic management: Now we just need to retrieve the
|
* Generic management: Now we just need to retrieve the
|
||||||
* information that we had stashed away during the cpu_on
|
* information that we had stashed away during the cpu_on
|
||||||
* call to set this cpu on its way. First get the index
|
* call to set this cpu on its way.
|
||||||
* for restoring the re-entry info
|
|
||||||
*/
|
*/
|
||||||
index = cpu_node->data;
|
cm_prepare_el3_exit(NON_SECURE);
|
||||||
psci_get_ns_entry_info(index);
|
|
||||||
|
|
||||||
/* State management: mark this cpu as on */
|
/* State management: mark this cpu as on */
|
||||||
psci_set_state(cpu_node, PSCI_STATE_ON);
|
psci_set_state(cpu_node, PSCI_STATE_ON);
|
||||||
|
@ -395,6 +395,7 @@ static unsigned int psci_afflvl0_on_finish(unsigned long mpidr,
|
||||||
/* Clean caches before re-entering normal world */
|
/* Clean caches before re-entering normal world */
|
||||||
dcsw_op_louis(DCCSW);
|
dcsw_op_louis(DCCSW);
|
||||||
|
|
||||||
|
rc = PSCI_E_SUCCESS;
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -132,10 +132,12 @@ static int psci_afflvl0_suspend(unsigned long mpidr,
|
||||||
unsigned long context_id,
|
unsigned long context_id,
|
||||||
unsigned int power_state)
|
unsigned int power_state)
|
||||||
{
|
{
|
||||||
unsigned int index, plat_state;
|
unsigned int plat_state;
|
||||||
unsigned long psci_entrypoint, sctlr;
|
unsigned long psci_entrypoint, sctlr;
|
||||||
el3_state_t *saved_el3_state;
|
el3_state_t *saved_el3_state;
|
||||||
int rc = PSCI_E_SUCCESS;
|
uint32_t ns_scr_el3 = read_scr_el3();
|
||||||
|
uint32_t ns_sctlr_el1 = read_sctlr_el1();
|
||||||
|
int rc;
|
||||||
|
|
||||||
/* Sanity check to safeguard against data corruption */
|
/* Sanity check to safeguard against data corruption */
|
||||||
assert(cpu_node->level == MPIDR_AFFLVL0);
|
assert(cpu_node->level == MPIDR_AFFLVL0);
|
||||||
|
@ -163,8 +165,8 @@ static int psci_afflvl0_suspend(unsigned long mpidr,
|
||||||
* Generic management: Store the re-entry information for the
|
* Generic management: Store the re-entry information for the
|
||||||
* non-secure world
|
* non-secure world
|
||||||
*/
|
*/
|
||||||
index = cpu_node->data;
|
rc = psci_save_ns_entry(read_mpidr_el1(), ns_entrypoint, context_id,
|
||||||
rc = psci_set_ns_entry_info(index, ns_entrypoint, context_id);
|
ns_scr_el3, ns_sctlr_el1);
|
||||||
if (rc != PSCI_E_SUCCESS)
|
if (rc != PSCI_E_SUCCESS)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
|
@ -174,7 +176,6 @@ static int psci_afflvl0_suspend(unsigned long mpidr,
|
||||||
* L1 caches and exit intra-cluster coherency et al
|
* L1 caches and exit intra-cluster coherency et al
|
||||||
*/
|
*/
|
||||||
cm_el3_sysregs_context_save(NON_SECURE);
|
cm_el3_sysregs_context_save(NON_SECURE);
|
||||||
rc = PSCI_E_SUCCESS;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The EL3 state to PoC since it will be accessed after a
|
* The EL3 state to PoC since it will be accessed after a
|
||||||
|
@ -214,6 +215,8 @@ static int psci_afflvl0_suspend(unsigned long mpidr,
|
||||||
* platform defined mailbox with the psci entrypoint,
|
* platform defined mailbox with the psci entrypoint,
|
||||||
* program the power controller etc.
|
* program the power controller etc.
|
||||||
*/
|
*/
|
||||||
|
rc = PSCI_E_SUCCESS;
|
||||||
|
|
||||||
if (psci_plat_pm_ops->affinst_suspend) {
|
if (psci_plat_pm_ops->affinst_suspend) {
|
||||||
plat_state = psci_get_phys_state(cpu_node);
|
plat_state = psci_get_phys_state(cpu_node);
|
||||||
rc = psci_plat_pm_ops->affinst_suspend(mpidr,
|
rc = psci_plat_pm_ops->affinst_suspend(mpidr,
|
||||||
|
@ -454,7 +457,7 @@ int psci_afflvl_suspend(unsigned long mpidr,
|
||||||
static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr,
|
static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr,
|
||||||
aff_map_node_t *cpu_node)
|
aff_map_node_t *cpu_node)
|
||||||
{
|
{
|
||||||
unsigned int index, plat_state, state, rc = PSCI_E_SUCCESS;
|
unsigned int plat_state, state, rc;
|
||||||
int32_t suspend_level;
|
int32_t suspend_level;
|
||||||
|
|
||||||
assert(cpu_node->level == MPIDR_AFFLVL0);
|
assert(cpu_node->level == MPIDR_AFFLVL0);
|
||||||
|
@ -481,14 +484,11 @@ static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr,
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get the index for restoring the re-entry information */
|
/* Get the index for restoring the re-entry information */
|
||||||
index = cpu_node->data;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Arch. management: Restore the stashed EL3 architectural
|
* Arch. management: Restore the stashed EL3 architectural
|
||||||
* context from the 'cpu_context' structure for this cpu.
|
* context from the 'cpu_context' structure for this cpu.
|
||||||
*/
|
*/
|
||||||
cm_el3_sysregs_context_restore(NON_SECURE);
|
cm_el3_sysregs_context_restore(NON_SECURE);
|
||||||
rc = PSCI_E_SUCCESS;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call the cpu suspend finish handler registered by the Secure Payload
|
* Call the cpu suspend finish handler registered by the Secure Payload
|
||||||
|
@ -509,7 +509,7 @@ static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr,
|
||||||
* information that we had stashed away during the suspend
|
* information that we had stashed away during the suspend
|
||||||
* call to set this cpu on its way.
|
* call to set this cpu on its way.
|
||||||
*/
|
*/
|
||||||
psci_get_ns_entry_info(index);
|
cm_prepare_el3_exit(NON_SECURE);
|
||||||
|
|
||||||
/* State management: mark this cpu as on */
|
/* State management: mark this cpu as on */
|
||||||
psci_set_state(cpu_node, PSCI_STATE_ON);
|
psci_set_state(cpu_node, PSCI_STATE_ON);
|
||||||
|
@ -517,6 +517,7 @@ static unsigned int psci_afflvl0_suspend_finish(unsigned long mpidr,
|
||||||
/* Clean caches before re-entering normal world */
|
/* Clean caches before re-entering normal world */
|
||||||
dcsw_op_louis(DCCSW);
|
dcsw_op_louis(DCCSW);
|
||||||
|
|
||||||
|
rc = PSCI_E_SUCCESS;
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,6 +36,7 @@
|
||||||
#include <context_mgmt.h>
|
#include <context_mgmt.h>
|
||||||
#include <debug.h>
|
#include <debug.h>
|
||||||
#include <platform.h>
|
#include <platform.h>
|
||||||
|
#include <string.h>
|
||||||
#include "psci_private.h"
|
#include "psci_private.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -50,7 +51,6 @@ const spd_pm_ops_t *psci_spd_pm;
|
||||||
* array during startup.
|
* array during startup.
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
suspend_context_t psci_suspend_context[PSCI_NUM_AFFS];
|
suspend_context_t psci_suspend_context[PSCI_NUM_AFFS];
|
||||||
ns_entry_info_t psci_ns_entry_info[PSCI_NUM_AFFS];
|
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* Grand array that holds the platform's topology information for state
|
* Grand array that holds the platform's topology information for state
|
||||||
|
@ -212,97 +212,36 @@ int psci_validate_mpidr(unsigned long mpidr, int level)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* This function retrieves all the stashed information needed to correctly
|
* This function determines the full entrypoint information for the requested
|
||||||
* resume a cpu's execution in the non-secure state after it has been physically
|
* PSCI entrypoint on power on/resume and saves this in the non-secure CPU
|
||||||
* powered on i.e. turned ON or resumed from SUSPEND
|
* cpu_context, ready for when the core boots.
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
void psci_get_ns_entry_info(unsigned int index)
|
int psci_save_ns_entry(uint64_t mpidr,
|
||||||
|
uint64_t entrypoint, uint64_t context_id,
|
||||||
|
uint32_t ns_scr_el3, uint32_t ns_sctlr_el1)
|
||||||
{
|
{
|
||||||
unsigned long sctlr = 0, scr, el_status, id_aa64pfr0;
|
uint32_t ep_attr, mode, sctlr, daif, ee;
|
||||||
cpu_context_t *ns_entry_context;
|
entry_point_info_t ep;
|
||||||
gp_regs_t *ns_entry_gpregs;
|
|
||||||
|
|
||||||
scr = read_scr();
|
sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
|
||||||
|
ee = 0;
|
||||||
|
|
||||||
/* Find out which EL we are going to */
|
ep_attr = NON_SECURE | EP_ST_DISABLE;
|
||||||
id_aa64pfr0 = read_id_aa64pfr0_el1();
|
if (sctlr & SCTLR_EE_BIT) {
|
||||||
el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
|
ep_attr |= EP_EE_BIG;
|
||||||
ID_AA64PFR0_ELX_MASK;
|
ee = 1;
|
||||||
|
}
|
||||||
|
SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1, ep_attr);
|
||||||
|
|
||||||
/* Restore endianess */
|
ep.pc = entrypoint;
|
||||||
if (psci_ns_entry_info[index].sctlr & SCTLR_EE_BIT)
|
memset(&ep.args, 0, sizeof(ep.args));
|
||||||
sctlr |= SCTLR_EE_BIT;
|
ep.args.arg0 = context_id;
|
||||||
else
|
|
||||||
sctlr &= ~SCTLR_EE_BIT;
|
|
||||||
|
|
||||||
/* Turn off MMU and Caching */
|
|
||||||
sctlr &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_M_BIT);
|
|
||||||
|
|
||||||
/* Set the register width */
|
|
||||||
if (psci_ns_entry_info[index].scr & SCR_RW_BIT)
|
|
||||||
scr |= SCR_RW_BIT;
|
|
||||||
else
|
|
||||||
scr &= ~SCR_RW_BIT;
|
|
||||||
|
|
||||||
scr |= SCR_NS_BIT;
|
|
||||||
|
|
||||||
if (el_status)
|
|
||||||
write_sctlr_el2(sctlr);
|
|
||||||
else
|
|
||||||
write_sctlr_el1(sctlr);
|
|
||||||
|
|
||||||
/* Fulfill the cpu_on entry reqs. as per the psci spec */
|
|
||||||
ns_entry_context = (cpu_context_t *) cm_get_context(NON_SECURE);
|
|
||||||
assert(ns_entry_context);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Setup general purpose registers to return the context id and
|
|
||||||
* prevent leakage of secure information into the normal world.
|
|
||||||
*/
|
|
||||||
ns_entry_gpregs = get_gpregs_ctx(ns_entry_context);
|
|
||||||
write_ctx_reg(ns_entry_gpregs,
|
|
||||||
CTX_GPREG_X0,
|
|
||||||
psci_ns_entry_info[index].context_id);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Tell the context management library to setup EL3 system registers to
|
|
||||||
* be able to ERET into the ns state, and SP_EL3 points to the right
|
|
||||||
* context to exit from EL3 correctly.
|
|
||||||
*/
|
|
||||||
cm_set_el3_eret_context(NON_SECURE,
|
|
||||||
psci_ns_entry_info[index].eret_info.entrypoint,
|
|
||||||
psci_ns_entry_info[index].eret_info.spsr,
|
|
||||||
scr);
|
|
||||||
|
|
||||||
cm_set_next_eret_context(NON_SECURE);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*******************************************************************************
|
|
||||||
* This function retrieves and stashes all the information needed to correctly
|
|
||||||
* resume a cpu's execution in the non-secure state after it has been physically
|
|
||||||
* powered on i.e. turned ON or resumed from SUSPEND. This is done prior to
|
|
||||||
* turning it on or before suspending it.
|
|
||||||
******************************************************************************/
|
|
||||||
int psci_set_ns_entry_info(unsigned int index,
|
|
||||||
unsigned long entrypoint,
|
|
||||||
unsigned long context_id)
|
|
||||||
{
|
|
||||||
int rc = PSCI_E_SUCCESS;
|
|
||||||
unsigned int rw, mode, ee, spsr = 0;
|
|
||||||
unsigned long id_aa64pfr0 = read_id_aa64pfr0_el1(), scr = read_scr();
|
|
||||||
unsigned long el_status;
|
|
||||||
unsigned long daif;
|
|
||||||
|
|
||||||
/* Figure out what mode do we enter the non-secure world in */
|
|
||||||
el_status = (id_aa64pfr0 >> ID_AA64PFR0_EL2_SHIFT) &
|
|
||||||
ID_AA64PFR0_ELX_MASK;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Figure out whether the cpu enters the non-secure address space
|
* Figure out whether the cpu enters the non-secure address space
|
||||||
* in aarch32 or aarch64
|
* in aarch32 or aarch64
|
||||||
*/
|
*/
|
||||||
rw = scr & SCR_RW_BIT;
|
if (ns_scr_el3 & SCR_RW_BIT) {
|
||||||
if (rw) {
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check whether a Thumb entry point has been provided for an
|
* Check whether a Thumb entry point has been provided for an
|
||||||
|
@ -311,28 +250,12 @@ int psci_set_ns_entry_info(unsigned int index,
|
||||||
if (entrypoint & 0x1)
|
if (entrypoint & 0x1)
|
||||||
return PSCI_E_INVALID_PARAMS;
|
return PSCI_E_INVALID_PARAMS;
|
||||||
|
|
||||||
if (el_status && (scr & SCR_HCE_BIT)) {
|
mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
|
||||||
mode = MODE_EL2;
|
|
||||||
ee = read_sctlr_el2() & SCTLR_EE_BIT;
|
|
||||||
} else {
|
|
||||||
mode = MODE_EL1;
|
|
||||||
ee = read_sctlr_el1() & SCTLR_EE_BIT;
|
|
||||||
}
|
|
||||||
|
|
||||||
spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
|
ep.spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
|
||||||
|
|
||||||
psci_ns_entry_info[index].sctlr |= ee;
|
|
||||||
psci_ns_entry_info[index].scr |= SCR_RW_BIT;
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
|
mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
|
||||||
if (el_status && (scr & SCR_HCE_BIT)) {
|
|
||||||
mode = MODE32_hyp;
|
|
||||||
ee = read_sctlr_el2() & SCTLR_EE_BIT;
|
|
||||||
} else {
|
|
||||||
mode = MODE32_svc;
|
|
||||||
ee = read_sctlr_el1() & SCTLR_EE_BIT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* TODO: Choose async. exception bits if HYP mode is not
|
* TODO: Choose async. exception bits if HYP mode is not
|
||||||
|
@ -340,18 +263,13 @@ int psci_set_ns_entry_info(unsigned int index,
|
||||||
*/
|
*/
|
||||||
daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
|
daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
|
||||||
|
|
||||||
spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
|
ep.spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
|
||||||
|
|
||||||
/* Ensure that the CSPR.E and SCTLR.EE bits match */
|
|
||||||
psci_ns_entry_info[index].sctlr |= ee;
|
|
||||||
psci_ns_entry_info[index].scr &= ~SCR_RW_BIT;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
psci_ns_entry_info[index].eret_info.entrypoint = entrypoint;
|
/* initialise an entrypoint to set up the CPU context */
|
||||||
psci_ns_entry_info[index].eret_info.spsr = spsr;
|
cm_init_context(mpidr, &ep);
|
||||||
psci_ns_entry_info[index].context_id = context_id;
|
|
||||||
|
|
||||||
return rc;
|
return PSCI_E_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
|
|
|
@ -35,22 +35,6 @@
|
||||||
#include <bakery_lock.h>
|
#include <bakery_lock.h>
|
||||||
#include <psci.h>
|
#include <psci.h>
|
||||||
|
|
||||||
/*******************************************************************************
|
|
||||||
* The following two data structures hold the generic information to bringup
|
|
||||||
* a suspended/hotplugged out cpu
|
|
||||||
******************************************************************************/
|
|
||||||
typedef struct eret_params {
|
|
||||||
unsigned long entrypoint;
|
|
||||||
unsigned long spsr;
|
|
||||||
} eret_params_t;
|
|
||||||
|
|
||||||
typedef struct ns_entry_info {
|
|
||||||
eret_params_t eret_info;
|
|
||||||
unsigned long context_id;
|
|
||||||
unsigned int scr;
|
|
||||||
unsigned int sctlr;
|
|
||||||
} ns_entry_info_t;
|
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* The following two data structures hold the topology tree which in turn tracks
|
* The following two data structures hold the topology tree which in turn tracks
|
||||||
* the state of the all the affinity instances supported by the platform.
|
* the state of the all the affinity instances supported by the platform.
|
||||||
|
@ -85,7 +69,6 @@ typedef unsigned int (*afflvl_power_on_finisher_t)(unsigned long,
|
||||||
* Data prototypes
|
* Data prototypes
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
extern suspend_context_t psci_suspend_context[PSCI_NUM_AFFS];
|
extern suspend_context_t psci_suspend_context[PSCI_NUM_AFFS];
|
||||||
extern ns_entry_info_t psci_ns_entry_info[PSCI_NUM_AFFS];
|
|
||||||
extern const plat_pm_ops_t *psci_plat_pm_ops;
|
extern const plat_pm_ops_t *psci_plat_pm_ops;
|
||||||
extern aff_map_node_t psci_aff_map[PSCI_NUM_AFFS];
|
extern aff_map_node_t psci_aff_map[PSCI_NUM_AFFS];
|
||||||
|
|
||||||
|
@ -102,7 +85,6 @@ int get_max_afflvl(void);
|
||||||
unsigned short psci_get_state(aff_map_node_t *node);
|
unsigned short psci_get_state(aff_map_node_t *node);
|
||||||
unsigned short psci_get_phys_state(aff_map_node_t *node);
|
unsigned short psci_get_phys_state(aff_map_node_t *node);
|
||||||
void psci_set_state(aff_map_node_t *node, unsigned short state);
|
void psci_set_state(aff_map_node_t *node, unsigned short state);
|
||||||
void psci_get_ns_entry_info(unsigned int index);
|
|
||||||
unsigned long mpidr_set_aff_inst(unsigned long, unsigned char, int);
|
unsigned long mpidr_set_aff_inst(unsigned long, unsigned char, int);
|
||||||
int psci_validate_mpidr(unsigned long, int);
|
int psci_validate_mpidr(unsigned long, int);
|
||||||
int get_power_on_target_afflvl(unsigned long mpidr);
|
int get_power_on_target_afflvl(unsigned long mpidr);
|
||||||
|
@ -110,9 +92,9 @@ void psci_afflvl_power_on_finish(unsigned long,
|
||||||
int,
|
int,
|
||||||
int,
|
int,
|
||||||
afflvl_power_on_finisher_t *);
|
afflvl_power_on_finisher_t *);
|
||||||
int psci_set_ns_entry_info(unsigned int index,
|
int psci_save_ns_entry(uint64_t mpidr,
|
||||||
unsigned long entrypoint,
|
uint64_t entrypoint, uint64_t context_id,
|
||||||
unsigned long context_id);
|
uint32_t caller_scr_el3, uint32_t caller_sctlr_el1);
|
||||||
int psci_check_afflvl_range(int start_afflvl, int end_afflvl);
|
int psci_check_afflvl_range(int start_afflvl, int end_afflvl);
|
||||||
void psci_acquire_afflvl_locks(unsigned long mpidr,
|
void psci_acquire_afflvl_locks(unsigned long mpidr,
|
||||||
int start_afflvl,
|
int start_afflvl,
|
||||||
|
|
|
@ -59,7 +59,7 @@ static aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
|
||||||
|
|
||||||
/*******************************************************************************
|
/*******************************************************************************
|
||||||
* 'psci_ns_einfo_idx' keeps track of the next free index in the
|
* 'psci_ns_einfo_idx' keeps track of the next free index in the
|
||||||
* 'psci_ns_entry_info' & 'psci_suspend_context' arrays.
|
* 'psci_suspend_context' arrays.
|
||||||
******************************************************************************/
|
******************************************************************************/
|
||||||
static unsigned int psci_ns_einfo_idx;
|
static unsigned int psci_ns_einfo_idx;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue