Merge pull request #361 from achingupta/for_sm/psci_proto_v5

For sm/psci proto v5
This commit is contained in:
Achin Gupta 2015-08-17 14:56:31 +01:00
commit 432b9905d5
73 changed files with 4981 additions and 3139 deletions

View file

@ -66,6 +66,9 @@ ARM_CCI_PRODUCT_ID := 400
ASM_ASSERTION := ${DEBUG}
# Build option to choose whether Trusted firmware uses Coherent memory or not.
USE_COHERENT_MEM := 1
# Flag used to choose the power state format viz Extended State-ID or the Original
# format.
PSCI_EXTENDED_STATE_ID := 0
# Default FIP file name
FIP_NAME := fip.bin
# By default, use the -pedantic option in the gcc command line
@ -79,6 +82,8 @@ TRUSTED_BOARD_BOOT := 0
# By default, consider that the platform's reset address is not programmable.
# The platform Makefile is free to override this value.
PROGRAMMABLE_RESET_ADDRESS := 0
# Build flag to warn about usage of deprecated platform and framework APIs
WARN_DEPRECATED := 0
# Checkpatch ignores
CHECK_IGNORE = --ignore COMPLEX_MACRO \
@ -166,6 +171,16 @@ msg_start:
include ${PLAT_MAKEFILE_FULL}
# If the platform has not defined ENABLE_PLAT_COMPAT, then enable it by default
ifndef ENABLE_PLAT_COMPAT
ENABLE_PLAT_COMPAT := 1
endif
# Include the platform compatibility helpers for PSCI
ifneq (${ENABLE_PLAT_COMPAT}, 0)
include plat/compat/plat_compat.mk
endif
# Include the CPU specific operations makefile. By default all CPU errata
# workarounds and CPU specifc optimisations are disabled. This can be
# overridden by the platform.
@ -268,6 +283,10 @@ $(eval $(call add_define,LOG_LEVEL))
$(eval $(call assert_boolean,USE_COHERENT_MEM))
$(eval $(call add_define,USE_COHERENT_MEM))
# Process PSCI_EXTENDED_STATE_ID flag
$(eval $(call assert_boolean,PSCI_EXTENDED_STATE_ID))
$(eval $(call add_define,PSCI_EXTENDED_STATE_ID))
# Process Generate CoT flags
$(eval $(call assert_boolean,GENERATE_COT))
$(eval $(call assert_boolean,CREATE_KEYS))
@ -281,6 +300,14 @@ $(eval $(call add_define,TRUSTED_BOARD_BOOT))
$(eval $(call assert_boolean,PROGRAMMABLE_RESET_ADDRESS))
$(eval $(call add_define,PROGRAMMABLE_RESET_ADDRESS))
# Process ENABLE_PLAT_COMPAT flag
$(eval $(call assert_boolean,ENABLE_PLAT_COMPAT))
$(eval $(call add_define,ENABLE_PLAT_COMPAT))
# Process WARN_DEPRECATED flag
$(eval $(call assert_boolean,WARN_DEPRECATED))
$(eval $(call add_define,WARN_DEPRECATED))
ASFLAGS += -nostdinc -ffreestanding -Wa,--fatal-warnings \
-Werror -Wmissing-include-dirs \
-mgeneral-regs-only -D__ASSEMBLY__ \

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -105,8 +105,7 @@ func bl2_entrypoint
* primary cpu is running at the moment.
* --------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_set_stack
bl plat_set_my_stack
/* ---------------------------------------------
* Perform early platform setup & platform

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -32,7 +32,6 @@
#include <cpu_data.h>
.globl init_cpu_data_ptr
.globl _cpu_data_by_mpidr
.globl _cpu_data_by_index
/* -----------------------------------------------------------------
@ -41,37 +40,19 @@
* Initialise the TPIDR_EL3 register to refer to the cpu_data_t
* for the calling CPU. This must be called before cm_get_cpu_data()
*
* This can be called without a valid stack.
* clobbers: x0, x1, x9, x10
* This can be called without a valid stack. It assumes that
* plat_my_core_pos() does not clobber register x10.
* clobbers: x0, x1, x10
* -----------------------------------------------------------------
*/
func init_cpu_data_ptr
mov x10, x30
mrs x0, mpidr_el1
bl _cpu_data_by_mpidr
bl plat_my_core_pos
bl _cpu_data_by_index
msr tpidr_el3, x0
ret x10
endfunc init_cpu_data_ptr
/* -----------------------------------------------------------------
* cpu_data_t *_cpu_data_by_mpidr(uint64_t mpidr)
*
* Return the cpu_data structure for the CPU with given MPIDR
*
* This can be called without a valid stack. It assumes that
* platform_get_core_pos() does not clobber register x9.
* clobbers: x0, x1, x9
* -----------------------------------------------------------------
*/
func _cpu_data_by_mpidr
mov x9, x30
bl platform_get_core_pos
mov x30, x9
b _cpu_data_by_index
endfunc _cpu_data_by_mpidr
/* -----------------------------------------------------------------
* cpu_data_t *_cpu_data_by_index(uint32_t cpu_index)
*

View file

@ -1,5 +1,5 @@
#
# Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
# Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@ -42,9 +42,9 @@ BL31_SOURCES += bl31/bl31_main.c \
lib/cpus/aarch64/cpu_helpers.S \
lib/locks/exclusive/spinlock.S \
services/std_svc/std_svc_setup.c \
services/std_svc/psci/psci_afflvl_off.c \
services/std_svc/psci/psci_afflvl_on.c \
services/std_svc/psci/psci_afflvl_suspend.c \
services/std_svc/psci/psci_off.c \
services/std_svc/psci/psci_on.c \
services/std_svc/psci/psci_suspend.c \
services/std_svc/psci/psci_common.c \
services/std_svc/psci/psci_entry.S \
services/std_svc/psci/psci_helpers.S \

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -155,7 +155,7 @@ void bl31_prepare_next_image_entry(void)
INFO("BL3-1: Next image address = 0x%llx\n",
(unsigned long long) next_image_info->pc);
INFO("BL3-1: Next image spsr = 0x%x\n", next_image_info->spsr);
cm_init_context(read_mpidr_el1(), next_image_info);
cm_init_my_context(next_image_info);
cm_prepare_el3_exit(image_type);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -64,6 +64,32 @@ void cm_init(void)
*/
}
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the CPU identified by `cpu_idx` that was set as the context for the
* specified security state. NULL is returned if no such structure has been
* specified.
******************************************************************************/
void *cm_get_context_by_index(unsigned int cpu_idx,
unsigned int security_state)
{
assert(sec_state_is_valid(security_state));
return get_cpu_data_by_index(cpu_idx, cpu_context[security_state]);
}
/*******************************************************************************
* This function sets the pointer to the current 'cpu_context' structure for the
* specified security state for the CPU identified by CPU index.
******************************************************************************/
void cm_set_context_by_index(unsigned int cpu_idx, void *context,
unsigned int security_state)
{
assert(sec_state_is_valid(security_state));
set_cpu_data_by_index(cpu_idx, cpu_context[security_state], context);
}
/*******************************************************************************
* This function returns a pointer to the most recent 'cpu_context' structure
* for the CPU identified by MPIDR that was set as the context for the specified
@ -73,7 +99,7 @@ void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state)
{
assert(sec_state_is_valid(security_state));
return get_cpu_data_by_mpidr(mpidr, cpu_context[security_state]);
return cm_get_context_by_index(platform_get_core_pos(mpidr), security_state);
}
/*******************************************************************************
@ -84,7 +110,8 @@ void cm_set_context_by_mpidr(uint64_t mpidr, void *context, uint32_t security_st
{
assert(sec_state_is_valid(security_state));
set_cpu_data_by_mpidr(mpidr, cpu_context[security_state], context);
cm_set_context_by_index(platform_get_core_pos(mpidr),
context, security_state);
}
/*******************************************************************************
@ -114,7 +141,7 @@ static inline void cm_set_next_context(void *context)
}
/*******************************************************************************
* The following function initializes a cpu_context for the current CPU for
* The following function initializes the cpu_context 'ctx' for
* first use, and sets the initial entrypoint state as specified by the
* entry_point_info structure.
*
@ -123,25 +150,24 @@ static inline void cm_set_next_context(void *context)
* context and sets this as the next context to return to.
*
* The EE and ST attributes are used to configure the endianess and secure
* timer availability for the new excution context.
* timer availability for the new execution context.
*
* To prepare the register state for entry call cm_prepare_el3_exit() and
* el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to
* cm_e1_sysreg_context_restore().
******************************************************************************/
void cm_init_context(uint64_t mpidr, const entry_point_info_t *ep)
static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep)
{
uint32_t security_state;
cpu_context_t *ctx;
unsigned int security_state;
uint32_t scr_el3;
el3_state_t *state;
gp_regs_t *gp_regs;
unsigned long sctlr_elx;
security_state = GET_SECURITY_STATE(ep->h.attr);
ctx = cm_get_context_by_mpidr(mpidr, security_state);
assert(ctx);
security_state = GET_SECURITY_STATE(ep->h.attr);
/* Clear any residual register values from the context */
memset(ctx, 0, sizeof(*ctx));
@ -209,6 +235,45 @@ void cm_init_context(uint64_t mpidr, const entry_point_info_t *ep)
memcpy(gp_regs, (void *)&ep->args, sizeof(aapcs64_params_t));
}
/*******************************************************************************
* The following function initializes the cpu_context for a CPU specified by
* its `cpu_idx` for first use, and sets the initial entrypoint state as
* specified by the entry_point_info structure.
******************************************************************************/
void cm_init_context_by_index(unsigned int cpu_idx,
const entry_point_info_t *ep)
{
cpu_context_t *ctx;
ctx = cm_get_context_by_index(cpu_idx, GET_SECURITY_STATE(ep->h.attr));
cm_init_context_common(ctx, ep);
}
/*******************************************************************************
* The following function initializes the cpu_context for the current CPU
* for first use, and sets the initial entrypoint state as specified by the
* entry_point_info structure.
******************************************************************************/
void cm_init_my_context(const entry_point_info_t *ep)
{
cpu_context_t *ctx;
ctx = cm_get_context(GET_SECURITY_STATE(ep->h.attr));
cm_init_context_common(ctx, ep);
}
/*******************************************************************************
* The following function provides a compatibility function for SPDs using the
* existing cm library routines. This function is expected to be invoked for
* initializing the cpu_context for the CPU specified by MPIDR for first use.
******************************************************************************/
void cm_init_context(unsigned long mpidr, const entry_point_info_t *ep)
{
if ((mpidr & MPIDR_AFFINITY_MASK) ==
(read_mpidr_el1() & MPIDR_AFFINITY_MASK))
cm_init_my_context(ep);
else
cm_init_context_by_index(platform_get_core_pos(mpidr), ep);
}
/*******************************************************************************
* Prepare the CPU system registers for first entry into secure or normal world
*

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -122,8 +122,7 @@ func tsp_entrypoint
* primary cpu is running at the moment.
* --------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_set_stack
bl plat_set_my_stack
/* ---------------------------------------------
* Perform early platform setup & platform
@ -248,8 +247,7 @@ func tsp_cpu_on_entry
* enabled.
* --------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_set_stack
bl plat_set_my_stack
/* --------------------------------------------
* Enable the MMU with the DCache disabled. It

View file

@ -49,8 +49,7 @@
******************************************************************************/
void tsp_update_sync_fiq_stats(uint32_t type, uint64_t elr_el3)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
tsp_stats[linear_id].sync_fiq_count++;
if (type == TSP_HANDLE_FIQ_AND_RETURN)
@ -59,9 +58,9 @@ void tsp_update_sync_fiq_stats(uint32_t type, uint64_t elr_el3)
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
spin_lock(&console_lock);
VERBOSE("TSP: cpu 0x%lx sync fiq request from 0x%lx\n",
mpidr, elr_el3);
read_mpidr(), elr_el3);
VERBOSE("TSP: cpu 0x%lx: %d sync fiq requests, %d sync fiq returns\n",
mpidr,
read_mpidr(),
tsp_stats[linear_id].sync_fiq_count,
tsp_stats[linear_id].sync_fiq_ret_count);
spin_unlock(&console_lock);
@ -77,8 +76,7 @@ void tsp_update_sync_fiq_stats(uint32_t type, uint64_t elr_el3)
******************************************************************************/
int32_t tsp_fiq_handler(void)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr), id;
uint32_t linear_id = plat_my_core_pos(), id;
/*
* Get the highest priority pending interrupt id and see if it is the
@ -105,9 +103,9 @@ int32_t tsp_fiq_handler(void)
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
spin_lock(&console_lock);
VERBOSE("TSP: cpu 0x%lx handled fiq %d\n",
mpidr, id);
read_mpidr(), id);
VERBOSE("TSP: cpu 0x%lx: %d fiq requests\n",
mpidr, tsp_stats[linear_id].fiq_count);
read_mpidr(), tsp_stats[linear_id].fiq_count);
spin_unlock(&console_lock);
#endif
return 0;
@ -115,15 +113,14 @@ int32_t tsp_fiq_handler(void)
int32_t tsp_irq_received(void)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
tsp_stats[linear_id].irq_count++;
#if LOG_LEVEL >= LOG_LEVEL_VERBOSE
spin_lock(&console_lock);
VERBOSE("TSP: cpu 0x%lx received irq\n", mpidr);
VERBOSE("TSP: cpu 0x%lx received irq\n", read_mpidr());
VERBOSE("TSP: cpu 0x%lx: %d irq requests\n",
mpidr, tsp_stats[linear_id].irq_count);
read_mpidr(), tsp_stats[linear_id].irq_count);
spin_unlock(&console_lock);
#endif
return TSP_PREEMPTED;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -72,7 +72,6 @@ static tsp_args_t *set_smc_args(uint64_t arg0,
uint64_t arg6,
uint64_t arg7)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id;
tsp_args_t *pcpu_smc_args;
@ -80,7 +79,7 @@ static tsp_args_t *set_smc_args(uint64_t arg0,
* Return to Secure Monitor by raising an SMC. The results of the
* service are passed as an arguments to the SMC
*/
linear_id = platform_get_core_pos(mpidr);
linear_id = plat_my_core_pos();
pcpu_smc_args = &tsp_smc_args[linear_id];
write_sp_arg(pcpu_smc_args, TSP_ARG0, arg0);
write_sp_arg(pcpu_smc_args, TSP_ARG1, arg1);
@ -107,8 +106,7 @@ uint64_t tsp_main(void)
INFO("TSP: Total memory size : 0x%lx bytes\n",
BL32_TOTAL_LIMIT - BL32_TOTAL_BASE);
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
/* Initialize the platform */
tsp_platform_setup();
@ -123,7 +121,8 @@ uint64_t tsp_main(void)
#if LOG_LEVEL >= LOG_LEVEL_INFO
spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n", mpidr,
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
tsp_stats[linear_id].cpu_on_count);
@ -139,8 +138,7 @@ uint64_t tsp_main(void)
******************************************************************************/
tsp_args_t *tsp_cpu_on_main(void)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
/* Initialize secure/applications state here */
tsp_generic_timer_start();
@ -152,8 +150,9 @@ tsp_args_t *tsp_cpu_on_main(void)
#if LOG_LEVEL >= LOG_LEVEL_INFO
spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx turned on\n", mpidr);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n", mpidr,
INFO("TSP: cpu 0x%lx turned on\n", read_mpidr());
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu on requests\n",
read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
tsp_stats[linear_id].cpu_on_count);
@ -176,8 +175,7 @@ tsp_args_t *tsp_cpu_off_main(uint64_t arg0,
uint64_t arg6,
uint64_t arg7)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
/*
* This cpu is being turned off, so disable the timer to prevent the
@ -193,8 +191,9 @@ tsp_args_t *tsp_cpu_off_main(uint64_t arg0,
#if LOG_LEVEL >= LOG_LEVEL_INFO
spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx off request\n", mpidr);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu off requests\n", mpidr,
INFO("TSP: cpu 0x%lx off request\n", read_mpidr());
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu off requests\n",
read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
tsp_stats[linear_id].cpu_off_count);
@ -219,8 +218,7 @@ tsp_args_t *tsp_cpu_suspend_main(uint64_t arg0,
uint64_t arg6,
uint64_t arg7)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
/*
* Save the time context and disable it to prevent the secure timer
@ -237,7 +235,7 @@ tsp_args_t *tsp_cpu_suspend_main(uint64_t arg0,
#if LOG_LEVEL >= LOG_LEVEL_INFO
spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
mpidr,
read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
tsp_stats[linear_id].cpu_suspend_count);
@ -262,8 +260,7 @@ tsp_args_t *tsp_cpu_resume_main(uint64_t suspend_level,
uint64_t arg6,
uint64_t arg7)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
/* Restore the generic timer context */
tsp_generic_timer_restore();
@ -276,9 +273,9 @@ tsp_args_t *tsp_cpu_resume_main(uint64_t suspend_level,
#if LOG_LEVEL >= LOG_LEVEL_INFO
spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx resumed. suspend level %ld\n",
mpidr, suspend_level);
read_mpidr(), suspend_level);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets %d cpu suspend requests\n",
mpidr,
read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count,
tsp_stats[linear_id].cpu_suspend_count);
@ -301,8 +298,7 @@ tsp_args_t *tsp_system_off_main(uint64_t arg0,
uint64_t arg6,
uint64_t arg7)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
/* Update this cpu's statistics */
tsp_stats[linear_id].smc_count++;
@ -310,8 +306,8 @@ tsp_args_t *tsp_system_off_main(uint64_t arg0,
#if LOG_LEVEL >= LOG_LEVEL_INFO
spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx SYSTEM_OFF request\n", mpidr);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets requests\n", mpidr,
INFO("TSP: cpu 0x%lx SYSTEM_OFF request\n", read_mpidr());
INFO("TSP: cpu 0x%lx: %d smcs, %d erets requests\n", read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count);
spin_unlock(&console_lock);
@ -334,8 +330,7 @@ tsp_args_t *tsp_system_reset_main(uint64_t arg0,
uint64_t arg6,
uint64_t arg7)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
/* Update this cpu's statistics */
tsp_stats[linear_id].smc_count++;
@ -343,8 +338,8 @@ tsp_args_t *tsp_system_reset_main(uint64_t arg0,
#if LOG_LEVEL >= LOG_LEVEL_INFO
spin_lock(&console_lock);
INFO("TSP: cpu 0x%lx SYSTEM_RESET request\n", mpidr);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets requests\n", mpidr,
INFO("TSP: cpu 0x%lx SYSTEM_RESET request\n", read_mpidr());
INFO("TSP: cpu 0x%lx: %d smcs, %d erets requests\n", read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count);
spin_unlock(&console_lock);
@ -371,17 +366,16 @@ tsp_args_t *tsp_smc_handler(uint64_t func,
{
uint64_t results[2];
uint64_t service_args[2];
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
/* Update this cpu's statistics */
tsp_stats[linear_id].smc_count++;
tsp_stats[linear_id].eret_count++;
INFO("TSP: cpu 0x%lx received %s smc 0x%lx\n", mpidr,
INFO("TSP: cpu 0x%lx received %s smc 0x%lx\n", read_mpidr(),
((func >> 31) & 1) == 1 ? "fast" : "standard",
func);
INFO("TSP: cpu 0x%lx: %d smcs, %d erets\n", mpidr,
INFO("TSP: cpu 0x%lx: %d smcs, %d erets\n", read_mpidr(),
tsp_stats[linear_id].smc_count,
tsp_stats[linear_id].eret_count);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -92,7 +92,7 @@ void tsp_generic_timer_stop(void)
******************************************************************************/
void tsp_generic_timer_save(void)
{
uint32_t linear_id = platform_get_core_pos(read_mpidr());
uint32_t linear_id = plat_my_core_pos();
pcpu_timer_context[linear_id].cval = read_cntps_cval_el1();
pcpu_timer_context[linear_id].ctl = read_cntps_ctl_el1();
@ -105,7 +105,7 @@ void tsp_generic_timer_save(void)
******************************************************************************/
void tsp_generic_timer_restore(void)
{
uint32_t linear_id = platform_get_core_pos(read_mpidr());
uint32_t linear_id = plat_my_core_pos();
write_cntps_cval_el1(pcpu_timer_context[linear_id].cval);
write_cntps_ctl_el1(pcpu_timer_context[linear_id].ctl);

Binary file not shown.

After

Width:  |  Height:  |  Size: 418 KiB

View file

@ -0,0 +1,574 @@
Guide to migrate to new Platform porting interface
==================================================
Contents
--------
1. [Introduction](#1--introduction)
2. [Platform API modification due to PSCI framework changes](#2--platform-api-modification-due-to-psci-framework-changes)
* [Power domain topology framework platform API modifications](#21-power-domain-topology-framework-platform-api-modifications)
* [Composite power state framework platform API modifications](#22-composite-power-state-framework-platform-api-modifications)
* [Miscellaneous modifications](#23-miscellaneous-modifications)
3. [Compatibility layer](#3--compatibility-layer)
4. [Deprecated Platform API](#4--deprecated-platform-api)
- - - - - - - - - - - - - - - - - -
1. Introduction
----------------
The PSCI implementation in Trusted Firmware has undergone a redesign because of
three requirements that the PSCI 1.0 specification introduced :
* Removing the framework assumption about the structure of the MPIDR, and
its relation to the power topology enables support for deeper and more
complex hierarchies.
* Reworking the power state coordination implementation in the framework
to support the more detailed PSCI 1.0 requirements and reduce platform
port complexity
* Enable the use of the extended power_state parameter and the larger StateID
field
The PSCI 1.0 implementation introduces new frameworks to fulfill the above
requirements. These framework changes mean that the platform porting API must
also be modified. This document is a guide to assist migration of the existing
platform ports to the new platform API.
This document describes the new platform API and compares it with the
deprecated API. It also describes the compatibility layer that enables the
existing platform ports to work with the PSCI 1.0 implementation. The
deprecated platform API is documented for reference.
2. Platform API modification due to PSCI framework changes
-----------------------------------------------------------
This section describes changes to the platform APIs.
2.1 Power domain topology framework platform API modifications
--------------------------------------------------------------
This removes the assumption in the PSCI implementation that MPIDR
based affinity instances map directly to power domains. A power domain, as
described in section 4.2 of [PSCI], could contain a core or a logical group
of cores (a cluster) which share some state on which power management
operations can be performed. The existing affinity instance based APIs
`plat_get_aff_count()` and `plat_get_aff_count()` are deprecated. The new
platform interfaces that are introduced for this framework are:
* `plat_core_pos_by_mpidr()`
* `plat_my_core_pos()`
* `plat_get_power_domain_tree_desc()`
`plat_my_core_pos()` and `plat_core_pos_by_mpidr()` are mandatory
and are meant to replace the existing `platform_get_core_pos()` API.
The description of these APIs can be found in the [Porting Guide][my_core_pos].
These are used by the power domain topology framework such that:
1. The generic PSCI code does not generate MPIDRs or use them to query the
platform about the number of power domains at a particular power level. The
`plat_get_power_domain_tree_desc()` provides a description of the power
domain tree on the SoC through a pointer to the byte array containing the
power domain topology tree description data structure.
2. The linear indices returned by `plat_core_pos_by_mpidr()` and
`plat_my_core_pos()` are used to retrieve core power domain nodes from
the power domain tree. These core indices are unique for a core and it is a
number between `0` and `PLATFORM_CORE_COUNT - 1`. The platform can choose
to implement a static mapping between `MPIDR` and core index or implement
a dynamic mapping, choosing to skip the unavailable/unused cores to compact
the core indices.
In addition, the platforms must define the macros `PLAT_NUM_PWR_DOMAINS` and
`PLAT_MAX_PWR_LVL` which replace the macros `PLAT_NUM_AFFS` and
`PLATFORM_MAX_AFFLVL` respectively. On platforms where the affinity instances
correspond to power domains, the values of new macros remain the same as the
old ones.
More details on the power domain topology description and its platform
interface can be found in [psci pd tree].
2.2 Composite power state framework platform API modifications
--------------------------------------------------------------
The state-ID field in the power-state parameter of a CPU_SUSPEND call can be
used to describe the composite power states specific to a platform. The existing
PSCI state coordination had the limitation that it operates on a run/off
granularity of power states and it did not interpret the state-ID field. This
was acceptable as the specification requirement in PSCI 0.2. The framework's
approach to coordination only requires maintaining a reference
count of the number of cores that have requested the cluster to remain powered.
In the PSCI 1.0 specification, this approach is non optimal. If composite
power states are used, the PSCI implementation cannot make global
decisions about state coordination required because it does not understand the
platform specific states.
The PSCI 1.0 implementation now defines a generic representation of the
power-state parameter :
typedef struct psci_power_state {
plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + 1];
} psci_power_state_t;
`pwr_domain_state` is an array where each index corresponds to a power level.
Each entry in the array contains the local power state the power domain at
that power level could enter. The meaning of the local power state value is
platform defined, and can vary between levels in a single platform. The PSCI
implementation constraints the values only so that it can classify the state
as RUN, RETENTION or OFF as required by the specification:
1. Zero means RUN
2. All OFF state values at all levels must be higher than all
RETENTION state values at all levels
The platform is required to define the macros `PLAT_MAX_RET_STATE` and
`PLAT_MAX_OFF_STATE` to the framework. The requirement for these macros can
be found in the [Porting Guide].
The PSCI 1.0 implementation adds support to involve the platform in state
coordination. This enables the platform to decide the final target state.
During a request to place a power domain in a low power state, the platform
is passed an array of requested `plat_local_state_t` for that power domain by
each core within it through the `plat_get_target_pwr_state()` API. This API
coordinates amongst these requested states to determine a target
`plat_local_state_t` for that power domain. A default weak implementation of
this API is provided in the platform layer which returns the minimum of the
requested local states back to the PSCI state coordination. More details
of `plat_get_target_pwr_state()` API can be found in the
[Porting Guide][get_target_pwr_state].
The PSCI Generic implementation expects platform ports to populate the handlers
for the `plat_psci_ops` structure which is declared as :
typedef struct plat_psci_ops {
void (*cpu_standby)(plat_local_state_t cpu_state);
int (*pwr_domain_on)(u_register_t mpidr);
void (*pwr_domain_off)(const psci_power_state_t *target_state);
void (*pwr_domain_suspend)(const psci_power_state_t *target_state);
void (*pwr_domain_on_finish)(const psci_power_state_t *target_state);
void (*pwr_domain_suspend_finish)(
const psci_power_state_t *target_state);
void (*system_off)(void) __dead2;
void (*system_reset)(void) __dead2;
int (*validate_power_state)(unsigned int power_state,
psci_power_state_t *req_state);
int (*validate_ns_entrypoint)(unsigned long ns_entrypoint);
void (*get_sys_suspend_power_state)(
psci_power_state_t *req_state);
} plat_psci_ops_t;
The description of these handlers can be found in the [Porting Guide][psci_ops].
The previous `plat_pm_ops` structure is deprecated. Compared with the previous
handlers, the major differences are:
* Difference in parameters
The PSCI 1.0 implementation depends on the `validate_power_state` handler to
convert the power-state parameter (possibly encoding a composite power state)
passed in a PSCI `CPU_SUSPEND` to the `psci_power_state` format.
The `plat_psci_ops` handlers, `pwr_domain_off` and `pwr_domain_suspend`, are
passed the target local state for each affected power domain. The platform
must execute operations specific to these target states. Similarly,
`pwr_domain_on_finish` and `pwr_domain_suspend_finish` are passed the local
states of the affected power domains before wakeup. The platform
must execute actions to restore these power domains from these specific
local states.
* Difference in invocation
Whereas the power management handlers in `plat_pm_ops` used to be invoked
for each affinity level till the target affinity level, the new handlers
are only invoked once. The `target_state` encodes the target low power
state or the low power state woken up from for each affected power domain.
* Difference in semantics
Although the previous `suspend` handlers could be used for power down as well
as retention at different affinity levels, the new handlers make this support
explicit. The `pwr_domain_suspend` can be used to specify powerdown and
retention at various power domain levels subject to the conditions mentioned
in section 4.2.1 of [PSCI]
Unlike the previous `standby` handler, the `cpu_standby()` handler is only used
as a fast path for placing a core power domain into a standby or retention
state.
The below diagram shows the sequence of a PSCI SUSPEND call and the interaction
with the platform layer depicting the exchange of data between PSCI Generic
layer and the platform layer.
![Image 1](diagrams/psci-suspend-sequence.png?raw=true)
Refer [plat/arm/board/fvp/fvp_pm.c] for the implementation details of
these handlers for the FVP. The commit b6df6ccbc88cc14592f5e603ef580d3cbf4733c3
demonstrates the migration of ARM reference platforms to the new platform API.
2.3 Miscellaneous modifications
-------------------------------
In addition to the framework changes, unification of warm reset entry points on
wakeup from low power modes has led to a change in the platform API. In the
earlier implementation, the warm reset entry used to be programmed into the
mailboxes by the 'ON' and 'SUSPEND' power management hooks. In the PSCI 1.0
implementation, this information is not required, because it can figure that
out by querying affinity info state whether to execute the 'suspend_finisher`
or 'on_finisher'.
As a result, the warm reset entry point must be programmed only once. The
`plat_setup_psci_ops()` API takes the secure entry point as an
additional parameter to enable the platforms to configure their mailbox. The
plat_psci_ops handlers `pwr_domain_on` and `pwr_domain_suspend` no longer take
the warm reset entry point as a parameter.
Also, some platform APIs which took `MPIDR` as an argument were only ever
invoked to perform actions specific to the caller core which makes the argument
redundant. Therefore the platform APIs `plat_get_my_entrypoint()`,
`plat_is_my_cpu_primary()`, `plat_set_my_stack()` and
`plat_get_my_stack()` are defined which are meant to be invoked only for
operations on the current caller core instead of `platform_get_entrypoint()`,
`platform_is_primary_cpu()`, `platform_set_stack()` and `platform_get_stack()`.
3. Compatibility layer
----------------------
To ease the migration of the platform ports to the new porting interface,
a compatibility layer is introduced that essentially implements a glue layer
between the old platform API and the new API. The build flag
`ENABLE_PLAT_COMPAT` (enabled by default), specifies whether to enable this
layer or not. A platform port which has migrated to the new API can disable
this flag within the platform specific makefile.
The compatibility layer works on the assumption that the onus of
state coordination, in case multiple low power states are supported,
is with the platform. The generic PSCI implementation only takes into
account whether the suspend request is power down or not. This corresponds
with the behavior of the PSCI implementation before the introduction of
new frameworks. Also, it assumes that the affinity levels of the platform
correspond directly to the power domain levels.
The compatibility layer dynamically constructs the new topology
description array by querying the platform using `plat_get_aff_count()`
and `plat_get_aff_count()` APIs. The linear index returned by
`platform_get_core_pos()` is used as the core index for the cores. The
higher level (non-core) power domain nodes must know the cores contained
within its domain. It does so by storing the core index of first core
within it and number of core indexes following it. This means that core
indices returned by `platform_get_core_pos()` for cores within a particular
power domain must be consecutive. We expect that this is the case for most
platform ports including ARM reference platforms.
The old PSCI helpers like `psci_get_suspend_powerstate()`,
`psci_get_suspend_stateid()`, `psci_get_suspend_stateid_by_mpidr()`,
`psci_get_max_phys_off_afflvl()` and `psci_get_suspend_afflvl()` are also
implemented for the compatibility layer. This allows the existing
platform ports to work with the new PSCI frameworks without significant
rework.
4. Deprecated Platform API
---------------------------
This section documents the deprecated platform porting API.
## Common mandatory modifications
The mandatory macros to be defined by the platform port in `platform_def.h`
* **#define : PLATFORM_NUM_AFFS**
Defines the total number of nodes in the affinity hierarchy at all affinity
levels used by the platform.
* **#define : PLATFORM_MAX_AFFLVL**
Defines the maximum affinity level that the power management operations
should apply to. ARMv8-A has support for four affinity levels. It is likely
that hardware will implement fewer affinity levels. This macro allows the
PSCI implementation to consider only those affinity levels in the system
that the platform implements. For example, the Base AEM FVP implements two
clusters with a configurable number of cores. It reports the maximum
affinity level as 1, resulting in PSCI power control up to the cluster
level.
The following functions must be implemented by the platform port to enable
the reset vector code to perform the required tasks.
### Function : platform_get_entrypoint() [mandatory]
Argument : unsigned long
Return : unsigned long
This function is called with the `SCTLR.M` and `SCTLR.C` bits disabled. The core
is identified by its `MPIDR`, which is passed as the argument. The function is
responsible for distinguishing between a warm and cold reset using platform-
specific means. If it is a warm reset, it returns the entrypoint into the
BL3-1 image that the core must jump to. If it is a cold reset, this function
must return zero.
This function is also responsible for implementing a platform-specific mechanism
to handle the condition where the core has been warm reset but there is no
entrypoint to jump to.
This function does not follow the Procedure Call Standard used by the
Application Binary Interface for the ARM 64-bit architecture. The caller should
not assume that callee saved registers are preserved across a call to this
function.
### Function : platform_is_primary_cpu() [mandatory]
Argument : unsigned long
Return : unsigned int
This function identifies a core by its `MPIDR`, which is passed as the argument,
to determine whether this core is the primary core or a secondary core. A return
value of zero indicates that the core is not the primary core, while a non-zero
return value indicates that the core is the primary core.
## Common optional modifications
### Function : platform_get_core_pos()
Argument : unsigned long
Return : int
A platform may need to convert the `MPIDR` of a core to an absolute number, which
can be used as a core-specific linear index into blocks of memory (for example
while allocating per-core stacks). This routine contains a simple mechanism
to perform this conversion, using the assumption that each cluster contains a
maximum of four cores:
linear index = cpu_id + (cluster_id * 4)
cpu_id = 8-bit value in MPIDR at affinity level 0
cluster_id = 8-bit value in MPIDR at affinity level 1
### Function : platform_set_stack()
Argument : unsigned long
Return : void
This function sets the current stack pointer to the normal memory stack that
has been allocated for the core specified by MPIDR. For BL images that only
require a stack for the primary core the parameter is ignored. The size of
the stack allocated to each core is specified by the platform defined constant
`PLATFORM_STACK_SIZE`.
Common implementations of this function for the UP and MP BL images are
provided in [plat/common/aarch64/platform_up_stack.S] and
[plat/common/aarch64/platform_mp_stack.S]
### Function : platform_get_stack()
Argument : unsigned long
Return : unsigned long
This function returns the base address of the normal memory stack that
has been allocated for the core specificed by MPIDR. For BL images that only
require a stack for the primary core the parameter is ignored. The size of
the stack allocated to each core is specified by the platform defined constant
`PLATFORM_STACK_SIZE`.
Common implementations of this function for the UP and MP BL images are
provided in [plat/common/aarch64/platform_up_stack.S] and
[plat/common/aarch64/platform_mp_stack.S]
## Modifications for Power State Coordination Interface (in BL3-1)
The following functions must be implemented to initialize PSCI functionality in
the ARM Trusted Firmware.
### Function : plat_get_aff_count() [mandatory]
Argument : unsigned int, unsigned long
Return : unsigned int
This function may execute with the MMU and data caches enabled if the platform
port does the necessary initializations in `bl31_plat_arch_setup()`. It is only
called by the primary core.
This function is called by the PSCI initialization code to detect the system
topology. Its purpose is to return the number of affinity instances implemented
at a given `affinity level` (specified by the first argument) and a given
`MPIDR` (specified by the second argument). For example, on a dual-cluster
system where first cluster implements two cores and the second cluster
implements four cores, a call to this function with an `MPIDR` corresponding
to the first cluster (`0x0`) and affinity level 0, would return 2. A call
to this function with an `MPIDR` corresponding to the second cluster (`0x100`)
and affinity level 0, would return 4.
### Function : plat_get_aff_state() [mandatory]
Argument : unsigned int, unsigned long
Return : unsigned int
This function may execute with the MMU and data caches enabled if the platform
port does the necessary initializations in `bl31_plat_arch_setup()`. It is only
called by the primary core.
This function is called by the PSCI initialization code. Its purpose is to
return the state of an affinity instance. The affinity instance is determined by
the affinity ID at a given `affinity level` (specified by the first argument)
and an `MPIDR` (specified by the second argument). The state can be one of
`PSCI_AFF_PRESENT` or `PSCI_AFF_ABSENT`. The latter state is used to cater for
system topologies where certain affinity instances are unimplemented. For
example, consider a platform that implements a single cluster with four cores and
another core implemented directly on the interconnect with the cluster. The
`MPIDR`s of the cluster would range from `0x0-0x3`. The `MPIDR` of the single
core is 0x100 to indicate that it does not belong to cluster 0. Cluster 1
is missing but needs to be accounted for to reach this single core in the
topology tree. Therefore it is marked as `PSCI_AFF_ABSENT`.
### Function : platform_setup_pm() [mandatory]
Argument : const plat_pm_ops **
Return : int
This function may execute with the MMU and data caches enabled if the platform
port does the necessary initializations in `bl31_plat_arch_setup()`. It is only
called by the primary core.
This function is called by PSCI initialization code. Its purpose is to export
handler routines for platform-specific power management actions by populating
the passed pointer with a pointer to the private `plat_pm_ops` structure of
BL3-1.
A description of each member of this structure is given below. A platform port
is expected to implement these handlers if the corresponding PSCI operation
is to be supported and these handlers are expected to succeed if the return
type is `void`.
#### plat_pm_ops.affinst_standby()
Perform the platform-specific setup to enter the standby state indicated by the
passed argument. The generic code expects the handler to succeed.
#### plat_pm_ops.affinst_on()
Perform the platform specific setup to power on an affinity instance, specified
by the `MPIDR` (first argument) and `affinity level` (third argument). The
`state` (fourth argument) contains the current state of that affinity instance
(ON or OFF). This is useful to determine whether any action must be taken. For
example, while powering on a core, the cluster that contains this core might
already be in the ON state. The platform decides what actions must be taken to
transition from the current state to the target state (indicated by the power
management operation). The generic code expects the platform to return
E_SUCCESS on success or E_INTERN_FAIL for any failure.
#### plat_pm_ops.affinst_off()
Perform the platform specific setup to power off an affinity instance of the
calling core. It is called by the PSCI `CPU_OFF` API implementation.
The `affinity level` (first argument) and `state` (second argument) have
a similar meaning as described in the `affinst_on()` operation. They
identify the affinity instance on which the call is made and its
current state. This gives the platform port an indication of the
state transition it must make to perform the requested action. For example, if
the calling core is the last powered on core in the cluster, after powering down
affinity level 0 (the core), the platform port should power down affinity
level 1 (the cluster) as well. The generic code expects the handler to succeed.
#### plat_pm_ops.affinst_suspend()
Perform the platform specific setup to power off an affinity instance of the
calling core. It is called by the PSCI `CPU_SUSPEND` API and `SYSTEM_SUSPEND`
API implementation
The `affinity level` (second argument) and `state` (third argument) have a
similar meaning as described in the `affinst_on()` operation. They are used to
identify the affinity instance on which the call is made and its current state.
This gives the platform port an indication of the state transition it must
make to perform the requested action. For example, if the calling core is the
last powered on core in the cluster, after powering down affinity level 0
(the core), the platform port should power down affinity level 1 (the cluster)
as well.
The difference between turning an affinity instance off and suspending it
is that in the former case, the affinity instance is expected to re-initialize
its state when it is next powered on (see `affinst_on_finish()`). In the latter
case, the affinity instance is expected to save enough state so that it can
resume execution by restoring this state when it is powered on (see
`affinst_suspend_finish()`).The generic code expects the handler to succeed.
#### plat_pm_ops.affinst_on_finish()
This function is called by the PSCI implementation after the calling core is
powered on and released from reset in response to an earlier PSCI `CPU_ON` call.
It performs the platform-specific setup required to initialize enough state for
this core to enter the Normal world and also provide secure runtime firmware
services.
The `affinity level` (first argument) and `state` (second argument) have a
similar meaning as described in the previous operations. The generic code
expects the handler to succeed.
#### plat_pm_ops.affinst_suspend_finish()
This function is called by the PSCI implementation after the calling core is
powered on and released from reset in response to an asynchronous wakeup
event, for example a timer interrupt that was programmed by the core during the
`CPU_SUSPEND` call or `SYSTEM_SUSPEND` call. It performs the platform-specific
setup required to restore the saved state for this core to resume execution
in the Normal world and also provide secure runtime firmware services.
The `affinity level` (first argument) and `state` (second argument) have a
similar meaning as described in the previous operations. The generic code
expects the platform to succeed.
#### plat_pm_ops.validate_power_state()
This function is called by the PSCI implementation during the `CPU_SUSPEND`
call to validate the `power_state` parameter of the PSCI API. If the
`power_state` is known to be invalid, the platform must return
PSCI_E_INVALID_PARAMS as an error, which is propagated back to the Normal
world PSCI client.
#### plat_pm_ops.validate_ns_entrypoint()
This function is called by the PSCI implementation during the `CPU_SUSPEND`,
`SYSTEM_SUSPEND` and `CPU_ON` calls to validate the Non-secure `entry_point`
parameter passed by the Normal world. If the `entry_point` is known to be
invalid, the platform must return PSCI_E_INVALID_PARAMS as an error, which is
propagated back to the Normal world PSCI client.
#### plat_pm_ops.get_sys_suspend_power_state()
This function is called by the PSCI implementation during the `SYSTEM_SUSPEND`
call to return the `power_state` parameter. This allows the platform to encode
the appropriate State-ID field within the `power_state` parameter which can be
utilized in `affinst_suspend()` to suspend to system affinity level. The
`power_state` parameter should be in the same format as specified by the
PSCI specification for the CPU_SUSPEND API.
- - - - - - - - - - - - - - - - - - - - - - - - - -
_Copyright (c) 2015, ARM Limited and Contributors. All rights reserved._
[Porting Guide]: porting-guide.md
[Power Domain Topology Design]: psci-pd-tree.md
[PSCI]: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf
[psci pd tree]: psci-pd-tree.md
[my_core_pos]: porting-guide.md#function--plat_my_core_pos
[get_target_pwr_state]: porting-guide.md#function--plat_get_target_pwr_state-optional
[psci_ops]: porting-guide.md#function--plat_setup_psci_ops-mandatory
[plat/arm/board/fvp/fvp_pm.c]: ../plat/arm/board/fvp/fvp_pm.c
[plat/common/aarch64/platform_mp_stack.S]: ../plat/common/aarch64/platform_mp_stack.S
[plat/common/aarch64/platform_up_stack.S]: ../plat/common/aarch64/platform_up_stack.S

View file

@ -8,7 +8,8 @@ Contents
2. [Common Modifications](#2--common-modifications)
* [Common mandatory modifications](#21-common-mandatory-modifications)
* [Handling reset](#22-handling-reset)
* [Common optional modifications](#23-common-optional-modifications)
* [Common mandatory modifications](#23-common-mandatory-modifications)
* [Common optional modifications](#24-common-optional-modifications)
3. [Boot Loader stage specific modifications](#3--modifications-specific-to-a-boot-loader-stage)
* [Boot Loader stage 1 (BL1)](#31-boot-loader-stage-1-bl1)
* [Boot Loader stage 2 (BL2)](#32-boot-loader-stage-2-bl2)
@ -25,6 +26,10 @@ Contents
1. Introduction
----------------
Please note that this document has been updated for the new platform API
as required by the PSCI v1.0 implementation. Please refer to the
[Migration Guide] for the previous platform API.
Porting the ARM Trusted Firmware to a new platform involves making some
mandatory and optional modifications for both the cold and warm boot paths.
Modifications consist of:
@ -139,21 +144,39 @@ platform port to define additional platform porting constants in
Defines the total number of CPUs implemented by the platform across all
clusters in the system.
* **#define : PLATFORM_NUM_AFFS**
* **#define : PLAT_NUM_PWR_DOMAINS**
Defines the total number of nodes in the affinity heirarchy at all affinity
levels used by the platform.
Defines the total number of nodes in the power domain topology
tree at all the power domain levels used by the platform.
This macro is used by the PSCI implementation to allocate
data structures to represent power domain topology.
* **#define : PLATFORM_MAX_AFFLVL**
* **#define : PLAT_MAX_PWR_LVL**
Defines the maximum affinity level that the power management operations
should apply to. ARMv8-A has support for 4 affinity levels. It is likely
that hardware will implement fewer affinity levels. This macro allows the
PSCI implementation to consider only those affinity levels in the system
that the platform implements. For example, the Base AEM FVP implements two
clusters with a configurable number of CPUs. It reports the maximum
affinity level as 1, resulting in PSCI power control up to the cluster
level.
Defines the maximum power domain level that the power management operations
should apply to. More often, but not always, the power domain level
corresponds to affinity level. This macro allows the PSCI implementation
to know the highest power domain level that it should consider for power
management operations in the system that the platform implements. For
example, the Base AEM FVP implements two clusters with a configurable
number of CPUs and it reports the maximum power domain level as 1.
* **#define : PLAT_MAX_OFF_STATE**
Defines the local power state corresponding to the deepest power down
possible at every power domain level in the platform. The local power
states for each level may be sparsely allocated between 0 and this value
with 0 being reserved for the RUN state. The PSCI implementation uses this
value to initialize the local power states of the power domain nodes and
to specify the requested power state for a PSCI_CPU_OFF call.
* **#define : PLAT_MAX_RET_STATE**
Defines the local power state corresponding to the deepest retention state
possible at every power domain level in the platform. This macro should be
a value less than PLAT_MAX_OFF_STATE and greater than 0. It is used by the
PSCI implementation to distuiguish between retention and power down local
power states within PSCI_CPU_SUSPEND call.
* **#define : BL1_RO_BASE**
@ -408,21 +431,17 @@ The following functions need to be implemented by the platform port to enable
reset vector code to perform the above tasks.
### Function : platform_get_entrypoint() [mandatory]
### Function : plat_get_my_entrypoint() [mandatory when PROGRAMMABLE_RESET_ADDRESS == 0]
Argument : unsigned long
Return : unsigned int
Argument : void
Return : unsigned long
This function is called with the `SCTLR.M` and `SCTLR.C` bits disabled. The CPU
is identified by its `MPIDR`, which is passed as the argument. The function is
responsible for distinguishing between a warm and cold reset using platform-
specific means. If it's a warm reset then it returns the entrypoint into the
BL3-1 image that the CPU must jump to. If it's a cold reset then this function
must return zero.
This function is also responsible for implementing a platform-specific mechanism
to handle the condition where the CPU has been warm reset but there is no
entrypoint to jump to.
This function is called with the called with the MMU and caches disabled
(`SCTLR_EL3.M` = 0 and `SCTLR_EL3.C` = 0). The function is responsible for
distinguishing between a warm and cold reset for the current CPU using
platform-specific means. If it's a warm reset, then it returns the warm
reset entrypoint point provided to `plat_setup_psci_ops()` during
BL3-1 initialization. If it's a cold reset then this function must return zero.
This function does not follow the Procedure Call Standard used by the
Application Binary Interface for the ARM 64-bit architecture. The caller should
@ -431,11 +450,16 @@ function.
This function fulfills requirement 1 and 3 listed above.
Note that for platforms that support programming the reset address, it is
expected that a CPU will start executing code directly at the right address,
both on a cold and warm reset. In this case, there is no need to identify the
type of reset nor to query the warm reset entrypoint. Therefore, implementing
this function is not required on such platforms.
### Function : plat_secondary_cold_boot_setup() [mandatory]
Argument : void
Return : void
This function is called with the MMU and data caches disabled. It is responsible
for placing the executing secondary CPU in a platform-specific state until the
@ -449,15 +473,15 @@ requires them.
This function fulfills requirement 2 above.
### Function : platform_is_primary_cpu() [mandatory]
### Function : plat_is_my_cpu_primary() [mandatory]
Argument : unsigned long
Argument : void
Return : unsigned int
This function identifies a CPU by its `MPIDR`, which is passed as the argument,
to determine whether this CPU is the primary CPU or a secondary CPU. A return
value of zero indicates that the CPU is not the primary CPU, while a non-zero
return value indicates that the CPU is the primary CPU.
This function identifies whether the current CPU is the primary CPU or a
secondary CPU. A return value of zero indicates that the CPU is not the
primary CPU, while a non-zero return value indicates that the CPU is the
primary CPU.
### Function : platform_mem_init() [mandatory]
@ -468,9 +492,6 @@ return value indicates that the CPU is the primary CPU.
This function is called before any access to data is made by the firmware, in
order to carry out any essential memory initialization.
The ARM FVP port uses this function to initialize the mailbox memory used for
providing the warm-boot entry-point addresses.
### Function: plat_get_rotpk_info()
@ -504,58 +525,75 @@ retrieved from the platform. The function also reports extra information related
to the ROTPK in the flags parameter.
2.3 Common mandatory modifications
---------------------------------
2.3 Common optional modifications
The following functions are mandatory functions which need to be implemented
by the platform port.
### Function : plat_my_core_pos()
Argument : void
Return : unsigned int
This funtion returns the index of the calling CPU which is used as a
CPU-specific linear index into blocks of memory (for example while allocating
per-CPU stacks). This function will be invoked very early in the
initialization sequence which mandates that this function should be
implemented in assembly and should not rely on the avalability of a C
runtime environment.
This function plays a crucial role in the power domain topology framework in
PSCI and details of this can be found in [Power Domain Topology Design].
### Function : plat_core_pos_by_mpidr()
Argument : u_register_t
Return : int
This function validates the `MPIDR` of a CPU and converts it to an index,
which can be used as a CPU-specific linear index into blocks of memory. In
case the `MPIDR` is invalid, this function returns -1. This function will only
be invoked by BL3-1 after the power domain topology is initialized and can
utilize the C runtime environment. For further details about how ARM Trusted
Firmware represents the power domain topology and how this relates to the
linear CPU index, please refer [Power Domain Topology Design].
2.4 Common optional modifications
---------------------------------
The following are helper functions implemented by the firmware that perform
common platform-specific tasks. A platform may choose to override these
definitions.
### Function : plat_set_my_stack()
### Function : platform_get_core_pos()
Argument : unsigned long
Return : int
A platform may need to convert the `MPIDR` of a CPU to an absolute number, which
can be used as a CPU-specific linear index into blocks of memory (for example
while allocating per-CPU stacks). This routine contains a simple mechanism
to perform this conversion, using the assumption that each cluster contains a
maximum of 4 CPUs:
linear index = cpu_id + (cluster_id * 4)
cpu_id = 8-bit value in MPIDR at affinity level 0
cluster_id = 8-bit value in MPIDR at affinity level 1
### Function : platform_set_stack()
Argument : unsigned long
Argument : void
Return : void
This function sets the current stack pointer to the normal memory stack that
has been allocated for the CPU specificed by MPIDR. For BL images that only
require a stack for the primary CPU the parameter is ignored. The size of
the stack allocated to each CPU is specified by the platform defined constant
`PLATFORM_STACK_SIZE`.
has been allocated for the current CPU. For BL images that only require a
stack for the primary CPU, the UP version of the function is used. The size
of the stack allocated to each CPU is specified by the platform defined
constant `PLATFORM_STACK_SIZE`.
Common implementations of this function for the UP and MP BL images are
provided in [plat/common/aarch64/platform_up_stack.S] and
[plat/common/aarch64/platform_mp_stack.S]
### Function : platform_get_stack()
### Function : plat_get_my_stack()
Argument : unsigned long
Argument : void
Return : unsigned long
This function returns the base address of the normal memory stack that
has been allocated for the CPU specificed by MPIDR. For BL images that only
require a stack for the primary CPU the parameter is ignored. The size of
the stack allocated to each CPU is specified by the platform defined constant
`PLATFORM_STACK_SIZE`.
has been allocated for the current CPU. For BL images that only require a
stack for the primary CPU, the UP version of the function is used. The size
of the stack allocated to each CPU is specified by the platform defined
constant `PLATFORM_STACK_SIZE`.
Common implementations of this function for the UP and MP BL images are
provided in [plat/common/aarch64/platform_up_stack.S] and
@ -1116,147 +1154,159 @@ modes table.
------------------------------------------------
The ARM Trusted Firmware's implementation of the PSCI API is based around the
concept of an _affinity instance_. Each _affinity instance_ can be uniquely
identified in a system by a CPU ID (the processor `MPIDR` is used in the PSCI
interface) and an _affinity level_. A processing element (for example, a
CPU) is at level 0. If the CPUs in the system are described in a tree where the
node above a CPU is a logical grouping of CPUs that share some state, then
affinity level 1 is that group of CPUs (for example, a cluster), and affinity
level 2 is a group of clusters (for example, the system). The implementation
assumes that the affinity level 1 ID can be computed from the affinity level 0
ID (for example, a unique cluster ID can be computed from the CPU ID). The
current implementation computes this on the basis of the recommended use of
`MPIDR` affinity fields in the ARM Architecture Reference Manual.
concept of a _power domain_. A _power domain_ is a CPU or a logical group of
CPUs which share some state on which power management operations can be
performed as specified by [PSCI]. Each CPU in the system is assigned a cpu
index which is a unique number between `0` and `PLATFORM_CORE_COUNT - 1`.
The _power domains_ are arranged in a hierarchial tree structure and
each _power domain_ can be identified in a system by the cpu index of any CPU
that is part of that domain and a _power domain level_. A processing element
(for example, a CPU) is at level 0. If the _power domain_ node above a CPU is
a logical grouping of CPUs that share some state, then level 1 is that group
of CPUs (for example, a cluster), and level 2 is a group of clusters
(for example, the system). More details on the power domain topology and its
organization can be found in [Power Domain Topology Design].
BL3-1's platform initialization code exports a pointer to the platform-specific
power management operations required for the PSCI implementation to function
correctly. This information is populated in the `plat_pm_ops` structure. The
PSCI implementation calls members of the `plat_pm_ops` structure for performing
power management operations for each affinity instance. For example, the target
CPU is specified by its `MPIDR` in a PSCI `CPU_ON` call. The `affinst_on()`
handler (if present) is called for each affinity instance as the PSCI
implementation powers up each affinity level implemented in the `MPIDR` (for
example, CPU, cluster and system).
correctly. This information is populated in the `plat_psci_ops` structure. The
PSCI implementation calls members of the `plat_psci_ops` structure for performing
power management operations on the power domains. For example, the target
CPU is specified by its `MPIDR` in a PSCI `CPU_ON` call. The `pwr_domain_on()`
handler (if present) is called for the CPU power domain.
The `power-state` parameter of a PSCI `CPU_SUSPEND` call can be used to
describe composite power states specific to a platform. The PSCI implementation
defines a generic representation of the power-state parameter viz which is an
array of local power states where each index corresponds to a power domain
level. Each entry contains the local power state the power domain at that power
level could enter. It depends on the `validate_power_state()` handler to
convert the power-state parameter (possibly encoding a composite power state)
passed in a PSCI `CPU_SUSPEND` call to this representation.
The following functions must be implemented to initialize PSCI functionality in
the ARM Trusted Firmware.
### Function : plat_get_aff_count() [mandatory]
### Function : plat_get_target_pwr_state() [optional]
Argument : unsigned int, unsigned long
Return : unsigned int
Argument : unsigned int, const plat_local_state_t *, unsigned int
Return : plat_local_state_t
This function may execute with the MMU and data caches enabled if the platform
port does the necessary initializations in `bl31_plat_arch_setup()`. It is only
called by the primary CPU.
The PSCI generic code uses this function to let the platform participate in
state coordination during a power management operation. The function is passed
a pointer to an array of platform specific local power state `states` (second
argument) which contains the requested power state for each CPU at a particular
power domain level `lvl` (first argument) within the power domain. The function
is expected to traverse this array of upto `ncpus` (third argument) and return
a coordinated target power state by the comparing all the requested power
states. The target power state should not be deeper than any of the requested
power states.
This function is called by the PSCI initialization code to detect the system
topology. Its purpose is to return the number of affinity instances implemented
at a given `affinity level` (specified by the first argument) and a given
`MPIDR` (specified by the second argument). For example, on a dual-cluster
system where first cluster implements 2 CPUs and the second cluster implements 4
CPUs, a call to this function with an `MPIDR` corresponding to the first cluster
(`0x0`) and affinity level 0, would return 2. A call to this function with an
`MPIDR` corresponding to the second cluster (`0x100`) and affinity level 0,
would return 4.
A weak definition of this API is provided by default wherein it assumes
that the platform assigns a local state value in order of increasing depth
of the power state i.e. for two power states X & Y, if X < Y
then X represents a shallower power state than Y. As a result, the
coordinated target local power state for a power domain will be the minimum
of the requested local power state values.
### Function : plat_get_aff_state() [mandatory]
### Function : plat_get_power_domain_tree_desc() [mandatory]
Argument : unsigned int, unsigned long
Return : unsigned int
Argument : void
Return : const unsigned char *
This function may execute with the MMU and data caches enabled if the platform
port does the necessary initializations in `bl31_plat_arch_setup()`. It is only
called by the primary CPU.
This function is called by the PSCI initialization code. Its purpose is to
return the state of an affinity instance. The affinity instance is determined by
the affinity ID at a given `affinity level` (specified by the first argument)
and an `MPIDR` (specified by the second argument). The state can be one of
`PSCI_AFF_PRESENT` or `PSCI_AFF_ABSENT`. The latter state is used to cater for
system topologies where certain affinity instances are unimplemented. For
example, consider a platform that implements a single cluster with 4 CPUs and
another CPU implemented directly on the interconnect with the cluster. The
`MPIDR`s of the cluster would range from `0x0-0x3`. The `MPIDR` of the single
CPU would be 0x100 to indicate that it does not belong to cluster 0. Cluster 1
is missing but needs to be accounted for to reach this single CPU in the
topology tree. Hence it is marked as `PSCI_AFF_ABSENT`.
This function returns a pointer to the byte array containing the power domain
topology tree description. The format and method to construct this array are
described in [Power Domain Topology Design]. The BL3-1 PSCI initilization code
requires this array to be described by the platform, either statically or
dynamically, to initialize the power domain topology tree. In case the array
is populated dynamically, then plat_core_pos_by_mpidr() and
plat_my_core_pos() should also be implemented suitably so that the topology
tree description matches the CPU indices returned by these APIs. These APIs
together form the platform interface for the PSCI topology framework.
### Function : platform_setup_pm() [mandatory]
## Function : plat_setup_psci_ops() [mandatory]
Argument : const plat_pm_ops **
Argument : uintptr_t, const plat_psci_ops **
Return : int
This function may execute with the MMU and data caches enabled if the platform
port does the necessary initializations in `bl31_plat_arch_setup()`. It is only
called by the primary CPU.
This function is called by PSCI initialization code. Its purpose is to export
handler routines for platform-specific power management actions by populating
the passed pointer with a pointer to BL3-1's private `plat_pm_ops` structure.
This function is called by PSCI initialization code. Its purpose is to let
the platform layer know about the warm boot entrypoint through the
`sec_entrypoint` (first argument) and to export handler routines for
platform-specific psci power management actions by populating the passed
pointer with a pointer to BL3-1's private `plat_psci_ops` structure.
A description of each member of this structure is given below. Please refer to
the ARM FVP specific implementation of these handlers in
[plat/arm/board/fvp/fvp_pm.c] as an example. A platform port is expected to
implement these handlers if the corresponding PSCI operation is to be supported
and these handlers are expected to succeed if the return type is `void`.
[plat/arm/board/fvp/fvp_pm.c] as an example. For each PSCI function that the
platform wants to support, the associated operation or operations in this
structure must be provided and implemented (Refer section 4 of
[Firmware Design] for the PSCI API supported in Trusted Firmware). To disable
a PSCI function in a platform port, the operation should be removed from this
structure instead of providing an empty implementation.
#### plat_pm_ops.affinst_standby()
#### plat_psci_ops.cpu_standby()
Perform the platform-specific setup to enter the standby state indicated by the
passed argument. The generic code expects the handler to succeed.
Perform the platform-specific actions to enter the standby state for a cpu
indicated by the passed argument. This provides a fast path for CPU standby
wherein overheads of PSCI state management and lock acquistion is avoided.
For this handler to be invoked by the PSCI `CPU_SUSPEND` API implementation,
the suspend state type specified in the `power-state` parameter should be
STANDBY and the target power domain level specified should be the CPU. The
handler should put the CPU into a low power retention state (usually by
issuing a wfi instruction) and ensure that it can be woken up from that
state by a normal interrupt. The generic code expects the handler to succeed.
#### plat_pm_ops.affinst_on()
#### plat_psci_ops.pwr_domain_on()
Perform the platform specific setup to power on an affinity instance, specified
by the `MPIDR` (first argument) and `affinity level` (third argument). The
`state` (fourth argument) contains the current state of that affinity instance
(ON or OFF). This is useful to determine whether any action must be taken. For
example, while powering on a CPU, the cluster that contains this CPU might
already be in the ON state. The platform decides what actions must be taken to
transition from the current state to the target state (indicated by the power
management operation). The generic code expects the platform to return
E_SUCCESS on success or E_INTERN_FAIL for any failure.
Perform the platform specific actions to power on a CPU, specified
by the `MPIDR` (first argument). The generic code expects the platform to
return PSCI_E_SUCCESS on success or PSCI_E_INTERN_FAIL for any failure.
#### plat_pm_ops.affinst_off()
#### plat_psci_ops.pwr_domain_off()
Perform the platform specific setup to power off an affinity instance of the
calling CPU. It is called by the PSCI `CPU_OFF` API implementation.
Perform the platform specific actions to prepare to power off the calling CPU
and its higher parent power domain levels as indicated by the `target_state`
(first argument). It is called by the PSCI `CPU_OFF` API implementation.
The `affinity level` (first argument) and `state` (second argument) have
a similar meaning as described in the `affinst_on()` operation. They are
used to identify the affinity instance on which the call is made and its
current state. This gives the platform port an indication of the
state transition it must make to perform the requested action. For example, if
the calling CPU is the last powered on CPU in the cluster, after powering down
affinity level 0 (CPU), the platform port should power down affinity level 1
(the cluster) as well. The generic code expects the handler to succeed.
The `target_state` encodes the platform coordinated target local power states
for the CPU power domain and its parent power domain levels. The handler
needs to perform power management operation corresponding to the local state
at each power level.
#### plat_pm_ops.affinst_suspend()
For this handler, the local power state for the CPU power domain will be a
power down state where as it could be either power down, retention or run state
for the higher power domain levels depending on the result of state
coordination. The generic code expects the handler to succeed.
Perform the platform specific setup to power off an affinity instance of the
calling CPU. It is called by the PSCI `CPU_SUSPEND` API and `SYSTEM_SUSPEND`
API implementation
#### plat_psci_ops.pwr_domain_suspend()
The `affinity level` (second argument) and `state` (third argument) have a
similar meaning as described in the `affinst_on()` operation. They are used to
identify the affinity instance on which the call is made and its current state.
This gives the platform port an indication of the state transition it must
make to perform the requested action. For example, if the calling CPU is the
last powered on CPU in the cluster, after powering down affinity level 0 (CPU),
the platform port should power down affinity level 1 (the cluster) as well.
Perform the platform specific actions to prepare to suspend the calling
CPU and its higher parent power domain levels as indicated by the
`target_state` (first argument). It is called by the PSCI `CPU_SUSPEND`
API implementation.
The difference between turning an affinity instance off versus suspending it
is that in the former case, the affinity instance is expected to re-initialize
its state when its next powered on (see `affinst_on_finish()`). In the latter
case, the affinity instance is expected to save enough state so that it can
The `target_state` has a similar meaning as described in
the `pwr_domain_off()` operation. It encodes the platform coordinated
target local power states for the CPU power domain and its parent
power domain levels. The handler needs to perform power management operation
corresponding to the local state at each power level. The generic code
expects the handler to succeed.
The difference between turning a power domain off versus suspending it
is that in the former case, the power domain is expected to re-initialize
its state when it is next powered on (see `pwr_domain_on_finish()`). In the
latter case, the power domain is expected to save enough state so that it can
resume execution by restoring this state when its powered on (see
`affinst_suspend_finish()`).The generic code expects the handler to succeed.
`pwr_domain_suspend_finish()`).
#### plat_pm_ops.affinst_on_finish()
#### plat_psci_ops.pwr_domain_on_finish()
This function is called by the PSCI implementation after the calling CPU is
powered on and released from reset in response to an earlier PSCI `CPU_ON` call.
@ -1264,11 +1314,12 @@ It performs the platform-specific setup required to initialize enough state for
this CPU to enter the normal world and also provide secure runtime firmware
services.
The `affinity level` (first argument) and `state` (second argument) have a
similar meaning as described in the previous operations. The generic code
expects the handler to succeed.
The `target_state` (first argument) is the prior state of the power domains
immediately before the CPU was turned on. It indicates which power domains
above the CPU might require initialization due to having previously been in
low power states. The generic code expects the handler to succeed.
#### plat_pm_ops.affinst_suspend_finish()
#### plat_psci_ops.pwr_domain_suspend_finish()
This function is called by the PSCI implementation after the calling CPU is
powered on and released from reset in response to an asynchronous wakeup
@ -1277,40 +1328,36 @@ event, for example a timer interrupt that was programmed by the CPU during the
setup required to restore the saved state for this CPU to resume execution
in the normal world and also provide secure runtime firmware services.
The `affinity level` (first argument) and `state` (second argument) have a
similar meaning as described in the previous operations. The generic code
expects the platform to succeed.
The `target_state` (first argument) has a similar meaning as described in
the `pwr_domain_on_finish()` operation. The generic code expects the platform
to succeed.
#### plat_pm_ops.validate_power_state()
#### plat_psci_ops.validate_power_state()
This function is called by the PSCI implementation during the `CPU_SUSPEND`
call to validate the `power_state` parameter of the PSCI API. If the
`power_state` is known to be invalid, the platform must return
PSCI_E_INVALID_PARAMS as error, which is propagated back to the normal
world PSCI client.
call to validate the `power_state` parameter of the PSCI API and if valid,
populate it in `req_state` (second argument) array as power domain level
specific local states. If the `power_state` is invalid, the platform must
return PSCI_E_INVALID_PARAMS as error, which is propagated back to the
normal world PSCI client.
#### plat_pm_ops.validate_ns_entrypoint()
#### plat_psci_ops.validate_ns_entrypoint()
This function is called by the PSCI implementation during the `CPU_SUSPEND`,
`SYSTEM_SUSPEND` and `CPU_ON` calls to validate the non-secure `entry_point`
parameter passed by the normal world. If the `entry_point` is known to be
invalid, the platform must return PSCI_E_INVALID_PARAMS as error, which is
parameter passed by the normal world. If the `entry_point` is invalid,
the platform must return PSCI_E_INVALID_ADDRESS as error, which is
propagated back to the normal world PSCI client.
#### plat_pm_ops.get_sys_suspend_power_state()
#### plat_psci_ops.get_sys_suspend_power_state()
This function is called by the PSCI implementation during the `SYSTEM_SUSPEND`
call to return the `power_state` parameter. This allows the platform to encode
the appropriate State-ID field within the `power_state` parameter which can be
utilized in `affinst_suspend()` to suspend to system affinity level. The
`power_state` parameter should be in the same format as specified by the
PSCI specification for the CPU_SUSPEND API.
call to get the `req_state` parameter from platform which encodes the power
domain level specific local states to suspend to system affinity level. The
`req_state` will be utilized to do the PSCI state coordination and
`pwr_domain_suspend()` will be invoked with the coordinated target state to
enter system suspend.
BL3-1 platform initialization code must also detect the system topology and
the state of each affinity instance in the topology. This information is
critical for the PSCI runtime service to function correctly. More details are
provided in the description of the `plat_get_aff_count()` and
`plat_get_aff_state()` functions above.
3.4 Interrupt Management framework (in BL3-1)
----------------------------------------------
@ -1478,6 +1525,12 @@ register x0.
4. Build flags
---------------
* **ENABLE_PLAT_COMPAT**
All the platforms ports conforming to this API specification should define
the build flag `ENABLE_PLAT_COMPAT` to 0 as the compatibility layer should
be disabled. For more details on compatibility layer, refer
[Migration Guide].
There are some build flags which can be defined by the platform to control
inclusion or exclusion of certain BL stages from the FIP image. These flags
need to be defined in the platform makefile which will get included by the
@ -1592,6 +1645,9 @@ _Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved._
[User Guide]: user-guide.md
[FreeBSD]: http://www.freebsd.org
[Firmware Design]: firmware-design.md
[Power Domain Topology Design]: psci-pd-tree.md
[PSCI]: http://infocenter.arm.com/help/topic/com.arm.doc.den0022c/DEN0022C_Power_State_Coordination_Interface.pdf
[Migration Guide]: platform-migration-guide.md
[plat/common/aarch64/platform_mp_stack.S]: ../plat/common/aarch64/platform_mp_stack.S
[plat/common/aarch64/platform_up_stack.S]: ../plat/common/aarch64/platform_up_stack.S

295
docs/psci-pd-tree.md Normal file
View file

@ -0,0 +1,295 @@
------------
Requirements
------------
1. A platform must export the `plat_get_aff_count()` and
`plat_get_aff_state()` APIs to enable the generic PSCI code to
populate a tree that describes the hierarchy of power domains in the
system. This approach is inflexible because a change to the topology
requires a change in the code.
It would be much simpler for the platform to describe its power domain tree
in a data structure.
2. The generic PSCI code generates MPIDRs in order to populate the power domain
tree. It also uses an MPIDR to find a node in the tree. The assumption that
a platform will use exactly the same MPIDRs as generated by the generic PSCI
code is not scalable. The use of an MPIDR also restricts the number of
levels in the power domain tree to four.
Therefore, there is a need to decouple allocation of MPIDRs from the
mechanism used to populate the power domain topology tree.
3. The current arrangement of the power domain tree requires a binary search
over the sibling nodes at a particular level to find a specified power
domain node. During a power management operation, the tree is traversed from
a 'start' to an 'end' power level. The binary search is required to find the
node at each level. The natural way to perform this traversal is to
start from a leaf node and follow the parent node pointer to reach the end
level.
Therefore, there is a need to define data structures that implement the tree in
a way which facilitates such a traversal.
4. The attributes of a core power domain differ from the attributes of power
domains at higher levels. For example, only a core power domain can be identified
using an MPIDR. There is no requirement to perform state coordination while
performing a power management operation on the core power domain.
Therefore, there is a need to implement the tree in a way which facilitates this
distinction between a leaf and non-leaf node and any associated
optimizations.
------
Design
------
### Describing a power domain tree
To fulfill requirement 1., the existing platform APIs
`plat_get_aff_count()` and `plat_get_aff_state()` have been
removed. A platform must define an array of unsigned chars such that:
1. The first entry in the array specifies the number of power domains at the
highest power level implemented in the platform. This caters for platforms
where the power domain tree does not have a single root node, for example,
the FVP has two cluster power domains at the highest level (1).
2. Each subsequent entry corresponds to a power domain and contains the number
of power domains that are its direct children.
3. The size of the array minus the first entry will be equal to the number of
non-leaf power domains.
4. The value in each entry in the array is used to find the number of entries
to consider at the next level. The sum of the values (number of children) of
all the entries at a level specifies the number of entries in the array for
the next level.
The following example power domain topology tree will be used to describe the
above text further. The leaf and non-leaf nodes in this tree have been numbered
separately.
```
+-+
|0|
+-+
/ \
/ \
/ \
/ \
/ \
/ \
/ \
/ \
/ \
/ \
+-+ +-+
|1| |2|
+-+ +-+
/ \ / \
/ \ / \
/ \ / \
/ \ / \
+-+ +-+ +-+ +-+
|3| |4| |5| |6|
+-+ +-+ +-+ +-+
+---+-----+ +----+----| +----+----+ +----+-----+-----+
| | | | | | | | | | | | |
| | | | | | | | | | | | |
v v v v v v v v v v v v v
+-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +--+ +--+ +--+
|0| |1| |2| |3| |4| |5| |6| |7| |8| |9| |10| |11| |12|
+-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +-+ +--+ +--+ +--+
```
This tree is defined by the platform as the array described above as follows:
```
#define PLAT_NUM_POWER_DOMAINS 20
#define PLATFORM_CORE_COUNT 13
#define PSCI_NUM_NON_CPU_PWR_DOMAINS \
(PLAT_NUM_POWER_DOMAINS - PLATFORM_CORE_COUNT)
unsigned char plat_power_domain_tree_desc[] = { 1, 2, 2, 2, 3, 3, 3, 4};
```
### Removing assumptions about MPIDRs used in a platform
To fulfill requirement 2., it is assumed that the platform assigns a
unique number (core index) between `0` and `PLAT_CORE_COUNT - 1` to each core
power domain. MPIDRs could be allocated in any manner and will not be used to
populate the tree.
`plat_core_pos_by_mpidr(mpidr)` will return the core index for the core
corresponding to the MPIDR. It will return an error (-1) if an MPIDR is passed
which is not allocated or corresponds to an absent core. The semantics of this
platform API have changed since it is required to validate the passed MPIDR. It
has been made a mandatory API as a result.
Another mandatory API, `plat_my_core_pos()` has been added to return the core
index for the calling core. This API provides a more lightweight mechanism to get
the index since there is no need to validate the MPIDR of the calling core.
The platform should assign the core indices (as illustrated in the diagram above)
such that, if the core nodes are numbered from left to right, then the index
for a core domain will be the same as the index returned by
`plat_core_pos_by_mpidr()` or `plat_my_core_pos()` for that core. This
relationship allows the core nodes to be allocated in a separate array
(requirement 4.) during `psci_setup()` in such an order that the index of the
core in the array is the same as the return value from these APIs.
#### Dealing with holes in MPIDR allocation
For platforms where the number of allocated MPIDRs is equal to the number of
core power domains, for example, Juno and FVPs, the logic to convert an MPIDR to
a core index should remain unchanged. Both Juno and FVP use a simple collision
proof hash function to do this.
It is possible that on some platforms, the allocation of MPIDRs is not
contiguous or certain cores have been disabled. This essentially means that the
MPIDRs have been sparsely allocated, that is, the size of the range of MPIDRs
used by the platform is not equal to the number of core power domains.
The platform could adopt one of the following approaches to deal with this
scenario:
1. Implement more complex logic to convert a valid MPIDR to a core index while
maintaining the relationship described earlier. This means that the power
domain tree descriptor will not describe any core power domains which are
disabled or absent. Entries will not be allocated in the tree for these
domains.
2. Treat unallocated MPIDRs and disabled cores as absent but still describe them
in the power domain descriptor, that is, the number of core nodes described
is equal to the size of the range of MPIDRs allocated. This approach will
lead to memory wastage since entries will be allocated in the tree but will
allow use of a simpler logic to convert an MPIDR to a core index.
### Traversing through and distinguishing between core and non-core power domains
To fulfill requirement 3 and 4, separate data structures have been defined
to represent leaf and non-leaf power domain nodes in the tree.
```
/*******************************************************************************
* The following two data structures implement the power domain tree. The tree
* is used to track the state of all the nodes i.e. power domain instances
* described by the platform. The tree consists of nodes that describe CPU power
* domains i.e. leaf nodes and all other power domains which are parents of a
* CPU power domain i.e. non-leaf nodes.
******************************************************************************/
typedef struct non_cpu_pwr_domain_node {
/*
* Index of the first CPU power domain node level 0 which has this node
* as its parent.
*/
unsigned int cpu_start_idx;
/*
* Number of CPU power domains which are siblings of the domain indexed
* by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
* -> cpu_start_idx + ncpus' have this node as their parent.
*/
unsigned int ncpus;
/* Index of the parent power domain node */
unsigned int parent_node;
-----
} non_cpu_pd_node_t;
typedef struct cpu_pwr_domain_node {
unsigned long mpidr;
/* Index of the parent power domain node */
unsigned int parent_node;
-----
} cpu_pd_node_t;
```
The power domain tree is implemented as a combination of the following data
structures.
```
non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
```
### Populating the power domain tree
The `populate_power_domain_tree()` function in `psci_setup.c` implements the
algorithm to parse the power domain descriptor exported by the platform to
populate the two arrays. It is essentially a breadth-first-search. The nodes for
each level starting from the root are laid out one after another in the
`psci_non_cpu_pd_nodes` and `psci_cpu_pd_nodes` arrays as follows:
```
psci_non_cpu_pd_nodes -> [[Level 3 nodes][Level 2 nodes][Level 1 nodes]]
psci_cpu_pd_nodes -> [Level 0 nodes]
```
For the example power domain tree illustrated above, the `psci_cpu_pd_nodes`
will be populated as follows. The value in each entry is the index of the parent
node. Other fields have been ignored for simplicity.
```
+-------------+ ^
CPU0 | 3 | |
+-------------+ |
CPU1 | 3 | |
+-------------+ |
CPU2 | 3 | |
+-------------+ |
CPU3 | 4 | |
+-------------+ |
CPU4 | 4 | |
+-------------+ |
CPU5 | 4 | | PLATFORM_CORE_COUNT
+-------------+ |
CPU6 | 5 | |
+-------------+ |
CPU7 | 5 | |
+-------------+ |
CPU8 | 5 | |
+-------------+ |
CPU9 | 6 | |
+-------------+ |
CPU10 | 6 | |
+-------------+ |
CPU11 | 6 | |
+-------------+ |
CPU12 | 6 | v
+-------------+
```
The `psci_non_cpu_pd_nodes` array will be populated as follows. The value in
each entry is the index of the parent node.
```
+-------------+ ^
PD0 | -1 | |
+-------------+ |
PD1 | 0 | |
+-------------+ |
PD2 | 0 | |
+-------------+ |
PD3 | 1 | | PLAT_NUM_POWER_DOMAINS -
+-------------+ | PLATFORM_CORE_COUNT
PD4 | 1 | |
+-------------+ |
PD5 | 2 | |
+-------------+ |
PD6 | 2 | |
+-------------+ v
```
Each core can find its node in the `psci_cpu_pd_nodes` array using the
`plat_my_core_pos()` function. When a core is turned on, the normal world
provides an MPIDR. The `plat_core_pos_by_mpidr()` function is used to validate
the MPIDR before using it to find the corresponding core node. The non-core power
domain nodes do not need to be identified.

View file

@ -351,7 +351,26 @@ performed.
* `PROGRAMMABLE_RESET_ADDRESS`: This option indicates whether the reset
vector address can be programmed or is fixed on the platform. It can take
either 0 (fixed) or 1 (programmable). Default is 0.
either 0 (fixed) or 1 (programmable). Default is 0. If the platform has a
programmable reset address, it is expected that a CPU will start executing
code directly at the right address, both on a cold and warm reset. In this
case, there is no need to identify the entrypoint on boot and this has
implication for `plat_get_my_entrypoint()` platform porting interface.
(see the [Porting Guide] for details)
* `PSCI_EXTENDED_STATE_ID`: As per PSCI1.0 Specification, there are 2 formats
possible for the PSCI power-state parameter viz original and extended
State-ID formats. This flag if set to 1, configures the generic PSCI layer
to use the extended format. The default value of this flag is 0, which
means by default the original power-state format is used by the PSCI
implementation. This flag should be specified by the platform makefile
and it governs the return value of PSCI_FEATURES API for CPU_SUSPEND
smc function id.
* `WARN_DEPRECATED`: This option decides whether to warn the usage of
deprecated platform APIs and context management helpers within Trusted
Firmware. It can take the value 1 (warn the use of deprecated APIs) or
0. The default is 0.
#### ARM development platform specific build options
@ -380,6 +399,14 @@ map is explained in the [Firmware Design].
this option, `arm_rotprivk_rsa.pem` must be specified as `ROT_KEY` when
creating the certificates.
* `ARM_RECOM_STATE_ID_ENC`: The PSCI1.0 specification recommends an encoding
for the construction of composite state-ID in the power-state parameter.
The existing PSCI clients currently do not support this encoding of
State-ID yet. Hence this flag is used to configure whether to use the
recommended State-ID encoding or not. The default value of this flag is 0,
in which case the platform is configured to expect NULL in the State-ID
field of power-state parameter.
#### ARM CSS platform specific build options
* `CSS_DETECT_PRE_1_7_0_SCP`: Boolean flag to detect SCP version
@ -1070,4 +1097,5 @@ _Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved._
[Juno Software Guide]: http://community.arm.com/docs/DOC-8396
[DS-5]: http://www.arm.com/products/tools/software-tools/ds-5/index.php
[mbedTLS Repository]: https://github.com/ARMmbed/mbedtls.git
[Porting Guide]: ./porting-guide.md
[Trusted Board Boot]: trusted-board-boot.md

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,6 +31,7 @@
#ifndef __CM_H__
#define __CM_H__
#include <common_def.h>
#include <cpu_data.h>
#include <stdint.h>
@ -43,13 +44,23 @@ struct entry_point_info;
* Function & variable prototypes
******************************************************************************/
void cm_init(void);
void *cm_get_context_by_mpidr(uint64_t mpidr, uint32_t security_state);
void *cm_get_context_by_mpidr(uint64_t mpidr,
uint32_t security_state) __warn_deprecated;
static inline void *cm_get_context(uint32_t security_state);
void cm_set_context_by_mpidr(uint64_t mpidr,
void *context,
uint32_t security_state);
uint32_t security_state) __warn_deprecated;
void *cm_get_context_by_index(unsigned int cpu_idx,
unsigned int security_state);
void cm_set_context_by_index(unsigned int cpu_idx,
void *context,
unsigned int security_state);
static inline void cm_set_context(void *context, uint32_t security_state);
void cm_init_context(uint64_t mpidr, const struct entry_point_info *ep);
void cm_init_context(uint64_t mpidr,
const struct entry_point_info *ep) __warn_deprecated;
void cm_init_my_context(const struct entry_point_info *ep);
void cm_init_context_by_index(unsigned int cpu_idx,
const struct entry_point_info *ep);
void cm_prepare_el3_exit(uint32_t security_state);
void cm_el1_sysregs_context_save(uint32_t security_state);
void cm_el1_sysregs_context_restore(uint32_t security_state);

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -103,7 +103,6 @@ CASSERT(CPU_DATA_CPU_OPS_PTR == __builtin_offsetof
assert_cpu_data_cpu_ops_ptr_offset_mismatch);
struct cpu_data *_cpu_data_by_index(uint32_t cpu_index);
struct cpu_data *_cpu_data_by_mpidr(uint64_t mpidr);
/* Return the cpu_data structure for the current CPU. */
static inline struct cpu_data *_cpu_data(void)
@ -123,12 +122,13 @@ void init_cpu_ops(void);
#define set_cpu_data(_m, _v) _cpu_data()->_m = _v
#define get_cpu_data_by_index(_ix, _m) _cpu_data_by_index(_ix)->_m
#define set_cpu_data_by_index(_ix, _m, _v) _cpu_data_by_index(_ix)->_m = _v
#define get_cpu_data_by_mpidr(_id, _m) _cpu_data_by_mpidr(_id)->_m
#define set_cpu_data_by_mpidr(_id, _m, _v) _cpu_data_by_mpidr(_id)->_m = _v
#define flush_cpu_data(_m) flush_dcache_range((uint64_t) \
&(_cpu_data()->_m), \
sizeof(_cpu_data()->_m))
#define inv_cpu_data(_m) inv_dcache_range((uint64_t) \
&(_cpu_data()->_m), \
sizeof(_cpu_data()->_m))
#define flush_cpu_data_by_index(_ix, _m) \
flush_dcache_range((uint64_t) \
&(_cpu_data_by_index(_ix)->_m), \

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -32,17 +32,33 @@
#define __PSCI_H__
#include <bakery_lock.h>
#include <platform_def.h> /* for PLATFORM_NUM_AFFS */
#include <platform_def.h> /* for PLAT_NUM_PWR_DOMAINS */
#if ENABLE_PLAT_COMPAT
#include <psci_compat.h>
#endif
/*******************************************************************************
* Number of affinity instances whose state this psci imp. can track
* Number of power domains whose state this psci imp. can track
******************************************************************************/
#ifdef PLATFORM_NUM_AFFS
#define PSCI_NUM_AFFS PLATFORM_NUM_AFFS
#ifdef PLAT_NUM_PWR_DOMAINS
#define PSCI_NUM_PWR_DOMAINS PLAT_NUM_PWR_DOMAINS
#else
#define PSCI_NUM_AFFS (2 * PLATFORM_CORE_COUNT)
#define PSCI_NUM_PWR_DOMAINS (2 * PLATFORM_CORE_COUNT)
#endif
#define PSCI_NUM_NON_CPU_PWR_DOMAINS (PSCI_NUM_PWR_DOMAINS - \
PLATFORM_CORE_COUNT)
/* This is the power level corresponding to a CPU */
#define PSCI_CPU_PWR_LVL 0
/*
* The maximum power level supported by PSCI. Since PSCI CPU_SUSPEND
* uses the old power_state parameter format which has 2 bits to specify the
* power level, this constant is defined to be 3.
*/
#define PSCI_MAX_PWR_LVL 3
/*******************************************************************************
* Defines for runtime services func ids
******************************************************************************/
@ -84,27 +100,35 @@
* PSCI CPU_SUSPEND 'power_state' parameter specific defines
******************************************************************************/
#define PSTATE_ID_SHIFT 0
#define PSTATE_TYPE_SHIFT 16
#define PSTATE_AFF_LVL_SHIFT 24
#if PSCI_EXTENDED_STATE_ID
#define PSTATE_VALID_MASK 0xB0000000
#define PSTATE_TYPE_SHIFT 30
#define PSTATE_ID_MASK 0xfffffff
#else
#define PSTATE_VALID_MASK 0xFCFE0000
#define PSTATE_TYPE_SHIFT 16
#define PSTATE_PWR_LVL_SHIFT 24
#define PSTATE_ID_MASK 0xffff
#define PSTATE_TYPE_MASK 0x1
#define PSTATE_AFF_LVL_MASK 0x3
#define PSTATE_VALID_MASK 0xFCFE0000
#define PSTATE_PWR_LVL_MASK 0x3
#define psci_get_pstate_pwrlvl(pstate) (((pstate) >> PSTATE_PWR_LVL_SHIFT) & \
PSTATE_PWR_LVL_MASK)
#define psci_make_powerstate(state_id, type, pwrlvl) \
(((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\
(((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\
(((pwrlvl) & PSTATE_PWR_LVL_MASK) << PSTATE_PWR_LVL_SHIFT)
#endif /* __PSCI_EXTENDED_STATE_ID__ */
#define PSTATE_TYPE_STANDBY 0x0
#define PSTATE_TYPE_POWERDOWN 0x1
#define PSTATE_TYPE_MASK 0x1
#define psci_get_pstate_id(pstate) (((pstate) >> PSTATE_ID_SHIFT) & \
PSTATE_ID_MASK)
#define psci_get_pstate_type(pstate) (((pstate) >> PSTATE_TYPE_SHIFT) & \
PSTATE_TYPE_MASK)
#define psci_get_pstate_afflvl(pstate) (((pstate) >> PSTATE_AFF_LVL_SHIFT) & \
PSTATE_AFF_LVL_MASK)
#define psci_make_powerstate(state_id, type, afflvl) \
(((state_id) & PSTATE_ID_MASK) << PSTATE_ID_SHIFT) |\
(((type) & PSTATE_TYPE_MASK) << PSTATE_TYPE_SHIFT) |\
(((afflvl) & PSTATE_AFF_LVL_MASK) << PSTATE_AFF_LVL_SHIFT)
#define psci_check_power_state(pstate) ((pstate) & PSTATE_VALID_MASK)
/*******************************************************************************
* PSCI CPU_FEATURES feature flag specific defines
@ -113,6 +137,11 @@
#define FF_PSTATE_SHIFT 1
#define FF_PSTATE_ORIG 0
#define FF_PSTATE_EXTENDED 1
#if PSCI_EXTENDED_STATE_ID
#define FF_PSTATE FF_PSTATE_EXTENDED
#else
#define FF_PSTATE FF_PSTATE_ORIG
#endif
/* Features flags for CPU SUSPEND OS Initiated mode support. Bits [0:0] */
#define FF_MODE_SUPPORT_SHIFT 0
@ -136,33 +165,74 @@
#define PSCI_E_INTERN_FAIL -6
#define PSCI_E_NOT_PRESENT -7
#define PSCI_E_DISABLED -8
#define PSCI_E_INVALID_ADDRESS -9
/*******************************************************************************
* PSCI affinity state related constants. An affinity instance could be present
* or absent physically to cater for asymmetric topologies. If present then it
* could in one of the 4 further defined states.
******************************************************************************/
#define PSCI_STATE_SHIFT 1
#define PSCI_STATE_MASK 0xff
#define PSCI_AFF_ABSENT 0x0
#define PSCI_AFF_PRESENT 0x1
#define PSCI_STATE_ON 0x0
#define PSCI_STATE_OFF 0x1
#define PSCI_STATE_ON_PENDING 0x2
#define PSCI_STATE_SUSPEND 0x3
#define PSCI_INVALID_DATA -1
#define get_phys_state(x) (x != PSCI_STATE_ON ? \
PSCI_STATE_OFF : PSCI_STATE_ON)
#define psci_validate_power_state(pstate) (pstate & PSTATE_VALID_MASK)
#define PSCI_INVALID_MPIDR ~((u_register_t)0)
#ifndef __ASSEMBLY__
#include <stdint.h>
#include <types.h>
/*
* These are the states reported by the PSCI_AFFINITY_INFO API for the specified
* CPU. The definitions of these states can be found in Section 5.7.1 in the
* PSCI specification (ARM DEN 0022C).
*/
typedef enum {
AFF_STATE_ON = 0,
AFF_STATE_OFF = 1,
AFF_STATE_ON_PENDING = 2
} aff_info_state_t;
/*
* Macro to represent invalid affinity level within PSCI.
*/
#define PSCI_INVALID_PWR_LVL (PLAT_MAX_PWR_LVL + 1)
/*
* Type for representing the local power state at a particular level.
*/
typedef uint8_t plat_local_state_t;
/* The local state macro used to represent RUN state. */
#define PSCI_LOCAL_STATE_RUN 0
/*
* Macro to test whether the plat_local_state is RUN state
*/
#define is_local_state_run(plat_local_state) \
((plat_local_state) == PSCI_LOCAL_STATE_RUN)
/*
* Macro to test whether the plat_local_state is RETENTION state
*/
#define is_local_state_retn(plat_local_state) \
(((plat_local_state) > PSCI_LOCAL_STATE_RUN) && \
((plat_local_state) <= PLAT_MAX_RET_STATE))
/*
* Macro to test whether the plat_local_state is OFF state
*/
#define is_local_state_off(plat_local_state) \
(((plat_local_state) > PLAT_MAX_RET_STATE) && \
((plat_local_state) <= PLAT_MAX_OFF_STATE))
/*****************************************************************************
* This data structure defines the representation of the power state parameter
* for its exchange between the generic PSCI code and the platform port. For
* example, it is used by the platform port to specify the requested power
* states during a power management operation. It is used by the generic code to
* inform the platform about the target power states that each level should
* enter.
****************************************************************************/
typedef struct psci_power_state {
/*
* The pwr_domain_state[] stores the local power state at each level
* for the CPU.
*/
plat_local_state_t pwr_domain_state[PLAT_MAX_PWR_LVL + 1];
} psci_power_state_t;
/*******************************************************************************
* Structure used to store per-cpu information relevant to the PSCI service.
@ -170,11 +240,19 @@
* this information will not reside on a cache line shared with another cpu.
******************************************************************************/
typedef struct psci_cpu_data {
uint32_t power_state;
uint32_t max_phys_off_afflvl; /* Highest affinity level in physically
powered off state */
/* State as seen by PSCI Affinity Info API */
aff_info_state_t aff_info_state;
/*
* Highest power level which takes part in a power management
* operation.
*/
unsigned char target_pwrlvl;
/* The local power state of this CPU */
plat_local_state_t local_state;
#if !USE_COHERENT_MEM
bakery_info_t pcpu_bakery_info[PSCI_NUM_AFFS];
bakery_info_t pcpu_bakery_info[PSCI_NUM_NON_CPU_PWR_DOMAINS];
#endif
} psci_cpu_data_t;
@ -182,25 +260,22 @@ typedef struct psci_cpu_data {
* Structure populated by platform specific code to export routines which
* perform common low level pm functions
******************************************************************************/
typedef struct plat_pm_ops {
void (*affinst_standby)(unsigned int power_state);
int (*affinst_on)(unsigned long mpidr,
unsigned long sec_entrypoint,
unsigned int afflvl,
unsigned int state);
void (*affinst_off)(unsigned int afflvl, unsigned int state);
void (*affinst_suspend)(unsigned long sec_entrypoint,
unsigned int afflvl,
unsigned int state);
void (*affinst_on_finish)(unsigned int afflvl, unsigned int state);
void (*affinst_suspend_finish)(unsigned int afflvl,
unsigned int state);
typedef struct plat_psci_ops {
void (*cpu_standby)(plat_local_state_t cpu_state);
int (*pwr_domain_on)(u_register_t mpidr);
void (*pwr_domain_off)(const psci_power_state_t *target_state);
void (*pwr_domain_suspend)(const psci_power_state_t *target_state);
void (*pwr_domain_on_finish)(const psci_power_state_t *target_state);
void (*pwr_domain_suspend_finish)(
const psci_power_state_t *target_state);
void (*system_off)(void) __dead2;
void (*system_reset)(void) __dead2;
int (*validate_power_state)(unsigned int power_state);
int (*validate_ns_entrypoint)(unsigned long ns_entrypoint);
unsigned int (*get_sys_suspend_power_state)(void);
} plat_pm_ops_t;
int (*validate_power_state)(unsigned int power_state,
psci_power_state_t *req_state);
int (*validate_ns_entrypoint)(uintptr_t ns_entrypoint);
void (*get_sys_suspend_power_state)(
psci_power_state_t *req_state);
} plat_psci_ops_t;
/*******************************************************************************
* Optional structure populated by the Secure Payload Dispatcher to be given a
@ -224,22 +299,23 @@ typedef struct spd_pm_ops {
* Function & Data prototypes
******************************************************************************/
unsigned int psci_version(void);
int psci_affinity_info(unsigned long, unsigned int);
int psci_migrate(unsigned long);
int psci_cpu_on(u_register_t target_cpu,
uintptr_t entrypoint,
u_register_t context_id);
int psci_cpu_suspend(unsigned int power_state,
uintptr_t entrypoint,
u_register_t context_id);
int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id);
int psci_cpu_off(void);
int psci_affinity_info(u_register_t target_affinity,
unsigned int lowest_affinity_level);
int psci_migrate(u_register_t target_cpu);
int psci_migrate_info_type(void);
long psci_migrate_info_up_cpu(void);
int psci_cpu_on(unsigned long,
unsigned long,
unsigned long);
int psci_features(unsigned int psci_fid);
void __dead2 psci_power_down_wfi(void);
void psci_aff_on_finish_entry(void);
void psci_aff_suspend_finish_entry(void);
void psci_entrypoint(void);
void psci_register_spd_pm_hook(const spd_pm_ops_t *);
int psci_get_suspend_stateid_by_mpidr(unsigned long);
int psci_get_suspend_stateid(void);
int psci_get_suspend_afflvl(void);
uint32_t psci_get_max_phys_off_afflvl(void);
uint64_t psci_smc_handler(uint32_t smc_fid,
uint64_t x1,
uint64_t x2,
@ -250,10 +326,8 @@ uint64_t psci_smc_handler(uint32_t smc_fid,
uint64_t flags);
/* PSCI setup function */
int32_t psci_setup(void);
int psci_setup(void);
#endif /*__ASSEMBLY__*/
#endif /* __PSCI_H__ */

View file

@ -0,0 +1,116 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __PSCI_COMPAT_H__
#define __PSCI_COMPAT_H__
#include <arch.h>
#include <platform_def.h>
#ifndef __ASSEMBLY__
/*
* The below declarations are to enable compatibility for the platform ports
* using the old platform interface and psci helpers.
*/
#define PLAT_MAX_PWR_LVL PLATFORM_MAX_AFFLVL
#define PLAT_NUM_PWR_DOMAINS PLATFORM_NUM_AFFS
/*******************************************************************************
* PSCI affinity related constants. An affinity instance could
* be present or absent physically to cater for asymmetric topologies.
******************************************************************************/
#define PSCI_AFF_ABSENT 0x0
#define PSCI_AFF_PRESENT 0x1
#define PSCI_STATE_ON 0x0
#define PSCI_STATE_OFF 0x1
#define PSCI_STATE_ON_PENDING 0x2
#define PSCI_STATE_SUSPEND 0x3
/*
* Using the compatibility platform interfaces means that the local states
* used in psci_power_state_t need to only convey whether its power down
* or standby state. The onus is on the platform port to do the right thing
* including the state coordination in case multiple power down states are
* involved. Hence if we assume 3 generic states viz, run, standby and
* power down, we can assign 1 and 2 to standby and power down respectively.
*/
#define PLAT_MAX_RET_STATE 1
#define PLAT_MAX_OFF_STATE 2
/*
* Macro to represent invalid affinity level within PSCI.
*/
#define PSCI_INVALID_DATA -1
#define psci_get_pstate_afflvl(pstate) psci_get_pstate_pwrlvl(pstate)
/*
* This array stores the 'power_state' requests of each CPU during
* CPU_SUSPEND and SYSTEM_SUSPEND which will be populated by the
* compatibility layer when appropriate platform hooks are invoked.
*/
extern unsigned int psci_power_state_compat[PLATFORM_CORE_COUNT];
/*******************************************************************************
* Structure populated by platform specific code to export routines which
* perform common low level pm functions
******************************************************************************/
typedef struct plat_pm_ops {
void (*affinst_standby)(unsigned int power_state);
int (*affinst_on)(unsigned long mpidr,
unsigned long sec_entrypoint,
unsigned int afflvl,
unsigned int state);
void (*affinst_off)(unsigned int afflvl, unsigned int state);
void (*affinst_suspend)(unsigned long sec_entrypoint,
unsigned int afflvl,
unsigned int state);
void (*affinst_on_finish)(unsigned int afflvl, unsigned int state);
void (*affinst_suspend_finish)(unsigned int afflvl,
unsigned int state);
void (*system_off)(void) __dead2;
void (*system_reset)(void) __dead2;
int (*validate_power_state)(unsigned int power_state);
int (*validate_ns_entrypoint)(unsigned long ns_entrypoint);
unsigned int (*get_sys_suspend_power_state)(void);
} plat_pm_ops_t;
/*******************************************************************************
* Function & Data prototypes to enable compatibility for older platform ports
******************************************************************************/
int psci_get_suspend_stateid_by_mpidr(unsigned long);
int psci_get_suspend_stateid(void);
int psci_get_suspend_powerstate(void);
unsigned int psci_get_max_phys_off_afflvl(void);
int psci_get_suspend_afflvl(void);
#endif /* ____ASSEMBLY__ */
#endif /* __PSCI_COMPAT_H__ */

View file

@ -99,6 +99,29 @@
.size \_name, . - \_name
.endm
/*
* Theses macros are used to create function labels for deprecated
* APIs. If WARN_DEPRECATED is non zero, the callers of these APIs
* will fail to link and cause build failure.
*/
#if WARN_DEPRECATED
.macro func_deprecated _name
func deprecated\_name
.endm
.macro endfunc_deprecated _name
endfunc deprecated\_name
.endm
#else
.macro func_deprecated _name
func \_name
.endm
.macro endfunc_deprecated _name
endfunc \_name
.endm
#endif
/*
* This macro declares an array of 1 or more stacks, properly
* aligned and in the requested section
@ -115,6 +138,7 @@
.space ((\_count) * (\_size)), 0
.endm
#if ENABLE_PLAT_COMPAT
/*
* This macro calculates the base address of an MP stack using the
* platform_get_core_pos() index, the name of the stack storage and
@ -129,6 +153,21 @@
mov x1, #\_size
madd x0, x0, x1, x2
.endm
#endif
/*
* This macro calculates the base address of the current CPU's MP stack
* using the plat_my_core_pos() index, the name of the stack storage
* and the size of each stack
* Out: X0 = physical address of stack base
* Clobber: X30, X1, X2
*/
.macro get_my_mp_stack _name, _size
bl plat_my_core_pos
ldr x2, =(\_name + \_size)
mov x1, #\_size
madd x0, x0, x1, x2
.endm
/*
* This macro calculates the base address of a UP stack using the

View file

@ -164,8 +164,7 @@
* then it means it is a warm boot so jump to this address.
* -------------------------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_get_entrypoint
bl plat_get_my_entrypoint
cbz x0, do_cold_boot
br x0
@ -181,9 +180,8 @@
* of that state and allows entry into the OS.
* -------------------------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_is_primary_cpu
cbnz x0, do_primary_cold_boot
bl plat_is_my_cpu_primary
cbnz w0, do_primary_cold_boot
/* This is a cold boot on a secondary CPU */
bl plat_secondary_cold_boot_setup
@ -249,8 +247,7 @@
* moment.
* ---------------------------------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_set_stack
bl plat_set_my_stack
.endm
#endif /* __EL3_COMMON_MACROS_S__ */

View file

@ -30,6 +30,7 @@
#ifndef __ARM_DEF_H__
#define __ARM_DEF_H__
#include <arch.h>
#include <common_def.h>
#include <platform_def.h>
#include <tbbr_img_def.h>
@ -47,6 +48,25 @@
#define ARM_CACHE_WRITEBACK_SHIFT 6
/*
* Macros mapping the MPIDR Affinity levels to ARM Platform Power levels. The
* power levels have a 1:1 mapping with the MPIDR affinity levels.
*/
#define ARM_PWR_LVL0 MPIDR_AFFLVL0
#define ARM_PWR_LVL1 MPIDR_AFFLVL1
/*
* Macros for local power states in ARM platforms encoded by State-ID field
* within the power-state parameter.
*/
/* Local power state for power domains in Run state. */
#define ARM_LOCAL_STATE_RUN 0
/* Local power state for retention. Valid only for CPU power domains */
#define ARM_LOCAL_STATE_RET 1
/* Local power state for OFF/power-down. Valid for CPU and cluster power
domains */
#define ARM_LOCAL_STATE_OFF 2
/* Memory location options for TSP */
#define ARM_TRUSTED_SRAM_ID 0
#define ARM_TRUSTED_DRAM_ID 1
@ -163,9 +183,22 @@
#define ADDR_SPACE_SIZE (1ull << 32)
#define PLATFORM_NUM_AFFS (ARM_CLUSTER_COUNT + \
#define PLAT_NUM_PWR_DOMAINS (ARM_CLUSTER_COUNT + \
PLATFORM_CORE_COUNT)
#define PLATFORM_MAX_AFFLVL MPIDR_AFFLVL1
#define PLAT_MAX_PWR_LVL ARM_PWR_LVL1
/*
* This macro defines the deepest retention state possible. A higher state
* id will represent an invalid or a power down state.
*/
#define PLAT_MAX_RET_STATE ARM_LOCAL_STATE_RET
/*
* This macro defines the deepest power down states possible. Any state ID
* higher than this is invalid.
*/
#define PLAT_MAX_OFF_STATE ARM_LOCAL_STATE_OFF
#define PLATFORM_CORE_COUNT (PLAT_ARM_CLUSTER0_CORE_COUNT + \
PLAT_ARM_CLUSTER1_CORE_COUNT)

View file

@ -148,6 +148,35 @@ CASSERT(PLAT_PCPU_DATA_SIZE == sizeof(arm_cpu_data_t),
#endif /* IMAGE_BL31 */
#if ARM_RECOM_STATE_ID_ENC
/*
* Macros used to parse state information from State-ID if it is using the
* recommended encoding for State-ID.
*/
#define ARM_LOCAL_PSTATE_WIDTH 4
#define ARM_LOCAL_PSTATE_MASK ((1 << ARM_LOCAL_PSTATE_WIDTH) - 1)
/* Macros to construct the composite power state */
/* Make composite power state parameter till power level 0 */
#if PSCI_EXTENDED_STATE_ID
#define arm_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
(((lvl0_state) << PSTATE_ID_SHIFT) | ((type) << PSTATE_TYPE_SHIFT))
#else
#define arm_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type) \
(((lvl0_state) << PSTATE_ID_SHIFT) | \
((pwr_lvl) << PSTATE_PWR_LVL_SHIFT) | \
((type) << PSTATE_TYPE_SHIFT))
#endif /* __PSCI_EXTENDED_STATE_ID__ */
/* Make composite power state parameter till power level 1 */
#define arm_make_pwrstate_lvl1(lvl1_state, lvl0_state, pwr_lvl, type) \
(((lvl1_state) << ARM_LOCAL_PSTATE_WIDTH) | \
arm_make_pwrstate_lvl0(lvl0_state, pwr_lvl, type))
#endif /* __ARM_RECOM_STATE_ID_ENC__ */
/* CCI utility functions */
void arm_cci_init(void);
@ -159,8 +188,12 @@ void arm_io_setup(void);
void arm_tzc_setup(void);
/* PM utility functions */
int32_t arm_do_affinst_actions(unsigned int afflvl, unsigned int state);
int arm_validate_power_state(unsigned int power_state);
int arm_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state);
int arm_validate_ns_entrypoint(uintptr_t entrypoint);
/* Topology utility function */
int arm_check_mpidr(u_register_t mpidr);
/* BL1 utility functions */
void arm_bl1_early_platform_setup(void);
@ -199,7 +232,7 @@ int plat_arm_get_alt_image_source(
unsigned int image_id,
uintptr_t *dev_handle,
uintptr_t *image_spec);
void plat_arm_topology_setup(void);
unsigned int plat_arm_calc_core_pos(u_register_t mpidr);
#endif /* __PLAT_ARM_H__ */

View file

@ -39,8 +39,7 @@
*************************************************************************/
#define MHU_PAYLOAD_CACHED 0
#define TRUSTED_MAILBOXES_BASE ARM_TRUSTED_SRAM_BASE
#define TRUSTED_MAILBOX_SHIFT 4
#define TRUSTED_MAILBOX_BASE ARM_TRUSTED_SRAM_BASE
#define NSROM_BASE 0x1f000000
#define NSROM_SIZE 0x00001000

View file

@ -67,6 +67,17 @@
#define MAKE_ULL(x) x
#endif
/*
* Macros to wrap declarations of deprecated APIs within Trusted Firmware.
* The callers of these APIs will continue to compile as long as the build
* flag WARN_DEPRECATED is zero. Else the compiler will emit a warning
* when the callers of these APIs are compiled.
*/
#if WARN_DEPRECATED
#define __warn_deprecated __attribute__ ((deprecated))
#else
#define __warn_deprecated
#endif
#endif /* __COMMON_DEF_H__ */

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -31,13 +31,14 @@
#ifndef __PLATFORM_H__
#define __PLATFORM_H__
#include <psci.h>
#include <stdint.h>
#include <types.h>
/*******************************************************************************
* Forward declarations
******************************************************************************/
struct plat_pm_ops;
struct meminfo;
struct image_info;
struct entry_point_info;
@ -59,6 +60,8 @@ int plat_get_image_source(unsigned int image_id,
uintptr_t *dev_handle,
uintptr_t *image_spec);
unsigned long plat_get_ns_image_entrypoint(void);
unsigned int plat_my_core_pos(void);
int plat_core_pos_by_mpidr(u_register_t mpidr);
/*******************************************************************************
* Mandatory interrupt management functions
@ -74,8 +77,7 @@ uint32_t plat_interrupt_type_to_line(uint32_t type,
/*******************************************************************************
* Optional common functions (may be overridden)
******************************************************************************/
unsigned int platform_get_core_pos(unsigned long mpidr);
unsigned long platform_get_stack(unsigned long mpidr);
unsigned long plat_get_my_stack(void);
void plat_report_exception(unsigned long);
int plat_crash_console_init(void);
int plat_crash_console_putc(int c);
@ -181,9 +183,16 @@ struct entry_point_info *bl31_plat_get_next_image_ep_info(uint32_t type);
/*******************************************************************************
* Mandatory PSCI functions (BL3-1)
******************************************************************************/
int platform_setup_pm(const struct plat_pm_ops **);
unsigned int plat_get_aff_count(unsigned int, unsigned long);
unsigned int plat_get_aff_state(unsigned int, unsigned long);
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const struct plat_psci_ops **);
const unsigned char *plat_get_power_domain_tree_desc(void);
/*******************************************************************************
* Optional PSCI functions (BL3-1).
******************************************************************************/
plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
const plat_local_state_t *states,
unsigned int ncpu);
/*******************************************************************************
* Optional BL3-1 functions (may be overridden)
@ -201,4 +210,31 @@ void bl32_plat_enable_mmu(uint32_t flags);
int plat_get_rotpk_info(void *cookie, void **key_ptr, unsigned int *key_len,
unsigned int *flags);
#if ENABLE_PLAT_COMPAT
/*
* The below declarations are to enable compatibility for the platform ports
* using the old platform interface.
*/
/*******************************************************************************
* Optional common functions (may be overridden)
******************************************************************************/
unsigned int platform_get_core_pos(unsigned long mpidr);
/*******************************************************************************
* Mandatory PSCI Compatibility functions (BL3-1)
******************************************************************************/
int platform_setup_pm(const plat_pm_ops_t **);
unsigned int plat_get_aff_count(unsigned int, unsigned long);
unsigned int plat_get_aff_state(unsigned int, unsigned long);
#else
/*
* The below function enable Trusted Firmware components like SPDs which
* haven't migrated to the new platform API to compile on platforms which
* have the compatibility layer disabled.
*/
unsigned int platform_get_core_pos(unsigned long mpidr) __warn_deprecated;
#endif /* __ENABLE_PLAT_COMPAT__ */
#endif /* __PLATFORM_H__ */

View file

@ -128,7 +128,7 @@ void bakery_lock_get(bakery_lock_t *bakery)
unsigned int my_ticket, my_prio, their_ticket;
unsigned int their_bakery_data;
me = platform_get_core_pos(read_mpidr_el1());
me = plat_my_core_pos();
assert_bakery_entry_valid(me, bakery);
@ -174,7 +174,7 @@ void bakery_lock_get(bakery_lock_t *bakery)
/* Release the lock and signal contenders */
void bakery_lock_release(bakery_lock_t *bakery)
{
unsigned int me = platform_get_core_pos(read_mpidr_el1());
unsigned int me = plat_my_core_pos();
assert_bakery_entry_valid(me, bakery);
assert(bakery_ticket_number(bakery->lock_data[me]));

View file

@ -148,7 +148,7 @@ void bakery_lock_get(unsigned int id, unsigned int offset)
bakery_info_t *their_bakery_info;
unsigned int their_bakery_data;
me = platform_get_core_pos(read_mpidr_el1());
me = plat_my_core_pos();
is_cached = read_sctlr_el3() & SCTLR_C_BIT;

View file

@ -37,9 +37,8 @@
#include "../fvp_def.h"
.globl plat_secondary_cold_boot_setup
.globl platform_get_entrypoint
.globl platform_mem_init
.globl platform_is_primary_cpu
.globl plat_get_my_entrypoint
.globl plat_is_my_cpu_primary
.macro fvp_choose_gicmmap param1, param2, x_tmp, w_tmp, res
ldr \x_tmp, =V2M_SYSREGS_BASE + V2M_SYS_ID
@ -96,30 +95,31 @@ cb_panic:
b cb_panic
endfunc plat_secondary_cold_boot_setup
/* -----------------------------------------------------
* void platform_get_entrypoint (unsigned int mpid);
/* ---------------------------------------------------------------------
* unsigned long plat_get_my_entrypoint (void);
*
* Main job of this routine is to distinguish between
* a cold and warm boot.
* On a cold boot the secondaries first wait for the
* platform to be initialized after which they are
* hotplugged in. The primary proceeds to perform the
* platform initialization.
* On a warm boot, each cpu jumps to the address in its
* mailbox.
* Main job of this routine is to distinguish between a cold and warm
* boot. On FVP, this information can be queried from the power
* controller. The Power Control SYS Status Register (PSYSR) indicates
* the wake-up reason for the CPU.
*
* For a cold boot, return 0.
* For a warm boot, read the mailbox and return the address it contains.
*
* TODO: Not a good idea to save lr in a temp reg
* TODO: PSYSR is a common register and should be
* accessed using locks. Since its not possible
* to use locks immediately after a cold reset
* we are relying on the fact that after a cold
* reset all cpus will read the same WK field
* -----------------------------------------------------
* ---------------------------------------------------------------------
*/
func platform_get_entrypoint
mov x9, x30 // lr
mov x2, x0
func plat_get_my_entrypoint
/* ---------------------------------------------------------------------
* When bit PSYSR.WK indicates either "Wake by PPONR" or "Wake by GIC
* WakeRequest signal" then it is a warm boot.
* ---------------------------------------------------------------------
*/
mrs x2, mpidr_el1
ldr x1, =PWRC_BASE
str w2, [x1, #PSYSR_OFF]
ldr w2, [x1, #PSYSR_OFF]
@ -128,53 +128,43 @@ func platform_get_entrypoint
beq warm_reset
cmp w2, #WKUP_GICREQ
beq warm_reset
mov x0, #0
b exit
warm_reset:
/* ---------------------------------------------
* A per-cpu mailbox is maintained in the tru-
* sted DRAM. Its flushed out of the caches
* after every update using normal memory so
* its safe to read it here with SO attributes
* ---------------------------------------------
*/
ldr x10, =MBOX_BASE
bl platform_get_core_pos
lsl x0, x0, #ARM_CACHE_WRITEBACK_SHIFT
ldr x0, [x10, x0]
cbz x0, _panic
exit:
ret x9
_panic: b _panic
endfunc platform_get_entrypoint
/* Cold reset */
mov x0, #0
ret
warm_reset:
/* ---------------------------------------------------------------------
* A mailbox is maintained in the trusted SRAM. It is flushed out of the
* caches after every update using normal memory so it is safe to read
* it here with SO attributes.
* ---------------------------------------------------------------------
*/
mov_imm x0, MBOX_BASE
ldr x0, [x0]
cbz x0, _panic
ret
/* ---------------------------------------------------------------------
* The power controller indicates this is a warm reset but the mailbox
* is empty. This should never happen!
* ---------------------------------------------------------------------
*/
_panic:
b _panic
endfunc plat_get_my_entrypoint
/* -----------------------------------------------------
* void platform_mem_init (void);
* unsigned int plat_is_my_cpu_primary (void);
*
* Zero out the mailbox registers in the shared memory.
* The mmu is turned off right now and only the primary can
* ever execute this code. Secondaries will read the
* mailboxes using SO accesses. In short, BL31 will
* update the mailboxes after mapping the tzdram as
* normal memory. It will flush its copy after update.
* BL1 will always read the mailboxes with the MMU off
* Find out whether the current cpu is the primary
* cpu.
* -----------------------------------------------------
*/
func platform_mem_init
ldr x0, =MBOX_BASE
mov w1, #PLATFORM_CORE_COUNT
loop:
str xzr, [x0], #CACHE_WRITEBACK_GRANULE
subs w1, w1, #1
b.gt loop
ret
endfunc platform_mem_init
func platform_is_primary_cpu
func plat_is_my_cpu_primary
mrs x0, mpidr_el1
and x0, x0, #(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK)
cmp x0, #FVP_PRIMARY_CPU
cset x0, eq
cset w0, eq
ret
endfunc platform_is_primary_cpu
endfunc plat_is_my_cpu_primary

View file

@ -139,7 +139,6 @@
/* Entrypoint mailboxes */
#define MBOX_BASE ARM_SHARED_RAM_BASE
#define MBOX_SIZE 0x200
#endif /* __FVP_DEF_H__ */

View file

@ -44,24 +44,36 @@
#include "fvp_private.h"
typedef volatile struct mailbox {
unsigned long value __aligned(CACHE_WRITEBACK_GRANULE);
} mailbox_t;
#if ARM_RECOM_STATE_ID_ENC
/*
* The table storing the valid idle power states. Ensure that the
* array entries are populated in ascending order of state-id to
* enable us to use binary search during power state validation.
* The table must be terminated by a NULL entry.
*/
const unsigned int arm_pm_idle_states[] = {
/* State-id - 0x01 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RET,
ARM_PWR_LVL0, PSTATE_TYPE_STANDBY),
/* State-id - 0x02 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF,
ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
/* State-id - 0x22 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF,
ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
0,
};
#endif
/*******************************************************************************
* Private FVP function to program the mailbox for a cpu before it is released
* from reset.
******************************************************************************/
static void fvp_program_mailbox(uint64_t mpidr, uint64_t address)
static void fvp_program_mailbox(uintptr_t address)
{
uint64_t linear_id;
mailbox_t *fvp_mboxes;
linear_id = platform_get_core_pos(mpidr);
fvp_mboxes = (mailbox_t *)MBOX_BASE;
fvp_mboxes[linear_id].value = address;
flush_dcache_range((unsigned long) &fvp_mboxes[linear_id],
sizeof(unsigned long));
uintptr_t *mailbox = (void *) MBOX_BASE;
*mailbox = address;
flush_dcache_range((uintptr_t) mailbox, sizeof(*mailbox));
}
/*******************************************************************************
@ -93,10 +105,13 @@ static void fvp_cluster_pwrdwn_common(void)
}
/*******************************************************************************
* FVP handler called when an affinity instance is about to enter standby.
* FVP handler called when a CPU is about to enter standby.
******************************************************************************/
void fvp_affinst_standby(unsigned int power_state)
void fvp_cpu_standby(plat_local_state_t cpu_state)
{
assert(cpu_state == ARM_LOCAL_STATE_RET);
/*
* Enter standby state
* dsb is good practice before using wfi to enter low power states
@ -106,24 +121,14 @@ void fvp_affinst_standby(unsigned int power_state)
}
/*******************************************************************************
* FVP handler called when an affinity instance is about to be turned on. The
* level and mpidr determine the affinity instance.
* FVP handler called when a power domain is about to be turned on. The
* mpidr determines the CPU to be turned on.
******************************************************************************/
int fvp_affinst_on(unsigned long mpidr,
unsigned long sec_entrypoint,
unsigned int afflvl,
unsigned int state)
int fvp_pwr_domain_on(u_register_t mpidr)
{
int rc = PSCI_E_SUCCESS;
unsigned int psysr;
/*
* It's possible to turn on only affinity level 0 i.e. a cpu
* on the FVP. Ignore any other affinity level.
*/
if (afflvl != MPIDR_AFFLVL0)
return rc;
/*
* Ensure that we do not cancel an inflight power off request
* for the target cpu. That would leave it in a zombie wfi.
@ -135,69 +140,54 @@ int fvp_affinst_on(unsigned long mpidr,
psysr = fvp_pwrc_read_psysr(mpidr);
} while (psysr & PSYSR_AFF_L0);
fvp_program_mailbox(mpidr, sec_entrypoint);
fvp_pwrc_write_pponr(mpidr);
return rc;
}
/*******************************************************************************
* FVP handler called when an affinity instance is about to be turned off. The
* level and mpidr determine the affinity instance. The 'state' arg. allows the
* platform to decide whether the cluster is being turned off and take apt
* actions.
*
* CAUTION: There is no guarantee that caches will remain turned on across calls
* to this function as each affinity level is dealt with. So do not write & read
* global variables across calls. It will be wise to do flush a write to the
* global to prevent unpredictable results.
* FVP handler called when a power domain is about to be turned off. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
void fvp_affinst_off(unsigned int afflvl,
unsigned int state)
void fvp_pwr_domain_off(const psci_power_state_t *target_state)
{
/* Determine if any platform actions need to be executed */
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
return;
assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_OFF);
/*
* If execution reaches this stage then this affinity level will be
* suspended. Perform at least the cpu specific actions followed the
* cluster specific operations if applicable.
* If execution reaches this stage then this power domain will be
* suspended. Perform at least the cpu specific actions followed
* by the cluster specific operations if applicable.
*/
fvp_cpu_pwrdwn_common();
if (afflvl != MPIDR_AFFLVL0)
if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
ARM_LOCAL_STATE_OFF)
fvp_cluster_pwrdwn_common();
}
/*******************************************************************************
* FVP handler called when an affinity instance is about to be suspended. The
* level and mpidr determine the affinity instance. The 'state' arg. allows the
* platform to decide whether the cluster is being turned off and take apt
* actions.
*
* CAUTION: There is no guarantee that caches will remain turned on across calls
* to this function as each affinity level is dealt with. So do not write & read
* global variables across calls. It will be wise to do flush a write to the
* global to prevent unpredictable results.
* FVP handler called when a power domain is about to be suspended. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
void fvp_affinst_suspend(unsigned long sec_entrypoint,
unsigned int afflvl,
unsigned int state)
void fvp_pwr_domain_suspend(const psci_power_state_t *target_state)
{
unsigned long mpidr;
/* Determine if any platform actions need to be executed. */
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
/*
* FVP has retention only at cpu level. Just return
* as nothing is to be done for retention.
*/
if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_RET)
return;
assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_OFF);
/* Get the mpidr for this cpu */
mpidr = read_mpidr_el1();
/* Program the jump address for the this cpu */
fvp_program_mailbox(mpidr, sec_entrypoint);
/* Program the power controller to enable wakeup interrupts. */
fvp_pwrc_set_wen(mpidr);
@ -205,31 +195,29 @@ void fvp_affinst_suspend(unsigned long sec_entrypoint,
fvp_cpu_pwrdwn_common();
/* Perform the common cluster specific operations */
if (afflvl != MPIDR_AFFLVL0)
if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
ARM_LOCAL_STATE_OFF)
fvp_cluster_pwrdwn_common();
}
/*******************************************************************************
* FVP handler called when an affinity instance has just been powered on after
* being turned off earlier. The level and mpidr determine the affinity
* instance. The 'state' arg. allows the platform to decide whether the cluster
* was turned off prior to wakeup and do what's necessary to setup it up
* correctly.
* FVP handler called when a power domain has just been powered on after
* being turned off earlier. The target_state encodes the low power state that
* each level has woken up from.
******************************************************************************/
void fvp_affinst_on_finish(unsigned int afflvl,
unsigned int state)
void fvp_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
unsigned long mpidr;
/* Determine if any platform actions need to be executed. */
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
return;
assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_OFF);
/* Get the mpidr for this cpu */
mpidr = read_mpidr_el1();
/* Perform the common cluster specific operations */
if (afflvl != MPIDR_AFFLVL0) {
if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
ARM_LOCAL_STATE_OFF) {
/*
* This CPU might have woken up whilst the cluster was
* attempting to power down. In this case the FVP power
@ -251,9 +239,6 @@ void fvp_affinst_on_finish(unsigned int afflvl,
*/
fvp_pwrc_clr_wen(mpidr);
/* Zero the jump address in the mailbox for this cpu */
fvp_program_mailbox(mpidr, 0);
/* Enable the gic cpu interface */
arm_gic_cpuif_setup();
@ -262,16 +247,22 @@ void fvp_affinst_on_finish(unsigned int afflvl,
}
/*******************************************************************************
* FVP handler called when an affinity instance has just been powered on after
* having been suspended earlier. The level and mpidr determine the affinity
* instance.
* FVP handler called when a power domain has just been powered on after
* having been suspended earlier. The target_state encodes the low power state
* that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher.
******************************************************************************/
void fvp_affinst_suspend_finish(unsigned int afflvl,
unsigned int state)
void fvp_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
{
fvp_affinst_on_finish(afflvl, state);
/*
* Nothing to be done on waking up from retention from CPU level.
*/
if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_RET)
return;
fvp_pwr_domain_on_finish(target_state);
}
/*******************************************************************************
@ -304,23 +295,28 @@ static void __dead2 fvp_system_reset(void)
/*******************************************************************************
* Export the platform handlers to enable psci to invoke them
******************************************************************************/
static const plat_pm_ops_t fvp_plat_pm_ops = {
.affinst_standby = fvp_affinst_standby,
.affinst_on = fvp_affinst_on,
.affinst_off = fvp_affinst_off,
.affinst_suspend = fvp_affinst_suspend,
.affinst_on_finish = fvp_affinst_on_finish,
.affinst_suspend_finish = fvp_affinst_suspend_finish,
static const plat_psci_ops_t fvp_plat_psci_ops = {
.cpu_standby = fvp_cpu_standby,
.pwr_domain_on = fvp_pwr_domain_on,
.pwr_domain_off = fvp_pwr_domain_off,
.pwr_domain_suspend = fvp_pwr_domain_suspend,
.pwr_domain_on_finish = fvp_pwr_domain_on_finish,
.pwr_domain_suspend_finish = fvp_pwr_domain_suspend_finish,
.system_off = fvp_system_off,
.system_reset = fvp_system_reset,
.validate_power_state = arm_validate_power_state
.validate_power_state = arm_validate_power_state,
.validate_ns_entrypoint = arm_validate_ns_entrypoint
};
/*******************************************************************************
* Export the platform specific power ops & initialize the fvp power controller
* Export the platform specific psci ops & initialize the fvp power controller
******************************************************************************/
int platform_setup_pm(const plat_pm_ops_t **plat_ops)
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const plat_psci_ops_t **psci_ops)
{
*plat_ops = &fvp_plat_pm_ops;
*psci_ops = &fvp_plat_psci_ops;
/* Program the jump address */
fvp_program_mailbox(sec_entrypoint);
return 0;
}

View file

@ -29,204 +29,41 @@
*/
#include <arch.h>
#include <assert.h>
#include <plat_arm.h>
#include <platform_def.h>
/* TODO: Reusing psci error codes & state information. Get our own! */
#include <psci.h>
#include "drivers/pwrc/fvp_pwrc.h"
#include "fvp_def.h"
/* We treat '255' as an invalid affinity instance */
#define AFFINST_INVAL 0xff
/*
* The FVP power domain tree does not have a single system level power domain
* i.e. a single root node. The first entry in the power domain descriptor
* specifies the number of power domains at the highest power level. For the FVP
* this is 2 i.e. the number of cluster power domains.
*/
#define FVP_PWR_DOMAINS_AT_MAX_PWR_LVL ARM_CLUSTER_COUNT
/*******************************************************************************
* We support 3 flavours of the FVP: Foundation, Base AEM & Base Cortex. Each
* flavour has a different topology. The common bit is that there can be a max.
* of 2 clusters (affinity 1) and 4 cpus (affinity 0) per cluster. So we define
* a tree like data structure which caters to these maximum bounds. It simply
* marks the absent affinity level instances as PSCI_AFF_ABSENT e.g. there is no
* cluster 1 on the Foundation FVP. The 'data' field is currently unused.
******************************************************************************/
typedef struct affinity_info {
unsigned char sibling;
unsigned char child;
unsigned char state;
unsigned int data;
} affinity_info_t;
/*******************************************************************************
* The following two data structures store the topology tree for the fvp. There
* is a separate array for each affinity level i.e. cpus and clusters. The child
* and sibling references allow traversal inside and in between the two arrays.
******************************************************************************/
static affinity_info_t fvp_aff1_topology_map[ARM_CLUSTER_COUNT];
static affinity_info_t fvp_aff0_topology_map[PLATFORM_CORE_COUNT];
/* Simple global variable to safeguard us from stupidity */
static unsigned int topology_setup_done;
/* The FVP power domain tree descriptor */
const unsigned char arm_power_domain_tree_desc[] = {
/* No of root nodes */
FVP_PWR_DOMAINS_AT_MAX_PWR_LVL,
/* No of children for the first node */
PLAT_ARM_CLUSTER0_CORE_COUNT,
/* No of children for the second node */
PLAT_ARM_CLUSTER1_CORE_COUNT
};
/*******************************************************************************
* This function implements a part of the critical interface between the psci
* generic layer and the platform to allow the former to detect the platform
* topology. psci queries the platform to determine how many affinity instances
* are present at a particular level for a given mpidr e.g. consider a dual
* cluster platform where each cluster has 4 cpus. A call to this function with
* (0, 0x100) will return the number of cpus implemented under cluster 1 i.e. 4.
* Similarly a call with (1, 0x100) will return 2 i.e. the number of clusters.
* This is 'cause we are effectively asking how many affinity level 1 instances
* are implemented under affinity level 2 instance 0.
* generic layer and the platform that allows the former to query the platform
* to convert an MPIDR to a unique linear index. An error code (-1) is returned
* in case the MPIDR is invalid.
******************************************************************************/
unsigned int plat_get_aff_count(unsigned int aff_lvl,
unsigned long mpidr)
int plat_core_pos_by_mpidr(u_register_t mpidr)
{
unsigned int aff_count = 1, ctr;
unsigned char parent_aff_id;
if (arm_check_mpidr(mpidr) == -1)
return -1;
assert(topology_setup_done == 1);
if (fvp_pwrc_read_psysr(mpidr) == PSYSR_INVALID)
return -1;
switch (aff_lvl) {
case 3:
case 2:
/*
* Assert if the parent affinity instance is not 0.
* This also takes care of level 3 in an obfuscated way
*/
parent_aff_id = (mpidr >> MPIDR_AFF3_SHIFT) & MPIDR_AFFLVL_MASK;
assert(parent_aff_id == 0);
/*
* Report that we implement a single instance of
* affinity levels 2 & 3 which are AFF_ABSENT
*/
break;
case 1:
/* Assert if the parent affinity instance is not 0. */
parent_aff_id = (mpidr >> MPIDR_AFF2_SHIFT) & MPIDR_AFFLVL_MASK;
assert(parent_aff_id == 0);
/* Fetch the starting index in the aff1 array */
for (ctr = 0;
fvp_aff1_topology_map[ctr].sibling != AFFINST_INVAL;
ctr = fvp_aff1_topology_map[ctr].sibling) {
aff_count++;
}
break;
case 0:
/* Assert if the cluster id is anything apart from 0 or 1 */
parent_aff_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
assert(parent_aff_id < ARM_CLUSTER_COUNT);
/* Fetch the starting index in the aff0 array */
for (ctr = fvp_aff1_topology_map[parent_aff_id].child;
fvp_aff0_topology_map[ctr].sibling != AFFINST_INVAL;
ctr = fvp_aff0_topology_map[ctr].sibling) {
aff_count++;
}
break;
default:
assert(0);
}
return aff_count;
}
/*******************************************************************************
* This function implements a part of the critical interface between the psci
* generic layer and the platform to allow the former to detect the state of a
* affinity instance in the platform topology. psci queries the platform to
* determine whether an affinity instance is present or absent. This caters for
* topologies where an intermediate affinity level instance is missing e.g.
* consider a platform which implements a single cluster with 4 cpus and there
* is another cpu sitting directly on the interconnect along with the cluster.
* The mpidrs of the cluster would range from 0x0-0x3. The mpidr of the single
* cpu would be 0x100 to highlight that it does not belong to cluster 0. Cluster
* 1 is however missing but needs to be accounted to reach this single cpu in
* the topology tree. Hence it will be marked as PSCI_AFF_ABSENT. This is not
* applicable to the FVP but depicted as an example.
******************************************************************************/
unsigned int plat_get_aff_state(unsigned int aff_lvl,
unsigned long mpidr)
{
unsigned int aff_state = PSCI_AFF_ABSENT, idx;
idx = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
assert(topology_setup_done == 1);
switch (aff_lvl) {
case 3:
case 2:
/* Report affinity levels 2 & 3 as absent */
break;
case 1:
aff_state = fvp_aff1_topology_map[idx].state;
break;
case 0:
/*
* First get start index of the aff0 in its array & then add
* to it the affinity id that we want the state of
*/
idx = fvp_aff1_topology_map[idx].child;
idx += (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
aff_state = fvp_aff0_topology_map[idx].state;
break;
default:
assert(0);
}
return aff_state;
}
/*******************************************************************************
* This function populates the FVP specific topology information depending upon
* the FVP flavour its running on. We construct all the mpidrs we can handle
* and rely on the PWRC.PSYSR to flag absent cpus when their status is queried.
******************************************************************************/
void plat_arm_topology_setup(void)
{
unsigned char aff0, aff1, aff_state, aff0_offset = 0;
unsigned long mpidr;
topology_setup_done = 0;
for (aff1 = 0; aff1 < ARM_CLUSTER_COUNT; aff1++) {
fvp_aff1_topology_map[aff1].child = aff0_offset;
fvp_aff1_topology_map[aff1].sibling = aff1 + 1;
for (aff0 = 0; aff0 < FVP_MAX_CPUS_PER_CLUSTER; aff0++) {
mpidr = aff1 << MPIDR_AFF1_SHIFT;
mpidr |= aff0 << MPIDR_AFF0_SHIFT;
if (fvp_pwrc_read_psysr(mpidr) != PSYSR_INVALID) {
/*
* Presence of even a single aff0 indicates
* presence of parent aff1 on the FVP.
*/
aff_state = PSCI_AFF_PRESENT;
fvp_aff1_topology_map[aff1].state =
PSCI_AFF_PRESENT;
} else {
aff_state = PSCI_AFF_ABSENT;
}
fvp_aff0_topology_map[aff0_offset].child = AFFINST_INVAL;
fvp_aff0_topology_map[aff0_offset].state = aff_state;
fvp_aff0_topology_map[aff0_offset].sibling =
aff0_offset + 1;
/* Increment the absolute number of aff0s traversed */
aff0_offset++;
}
/* Tie-off the last aff0 sibling to -1 to avoid overflow */
fvp_aff0_topology_map[aff0_offset - 1].sibling = AFFINST_INVAL;
}
/* Tie-off the last aff1 sibling to AFFINST_INVAL to avoid overflow */
fvp_aff1_topology_map[aff1 - 1].sibling = AFFINST_INVAL;
topology_setup_done = 1;
return plat_arm_calc_core_pos(mpidr);
}

View file

@ -28,7 +28,6 @@
# POSSIBILITY OF SUCH DAMAGE.
#
PLAT_INCLUDES := -Iplat/arm/board/fvp/include
@ -63,5 +62,8 @@ BL31_SOURCES += lib/cpus/aarch64/aem_generic.S \
plat/arm/board/fvp/aarch64/fvp_helpers.S \
plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c
# Disable the PSCI platform compatibility layer
ENABLE_PLAT_COMPAT := 0
include plat/arm/board/common/board_common.mk
include plat/arm/common/arm_common.mk

View file

@ -29,6 +29,8 @@
#
# TSP source files specific to FVP platform
BL32_SOURCES += plat/arm/board/fvp/tsp/fvp_tsp_setup.c
BL32_SOURCES += plat/arm/board/fvp/fvp_topology.c \
plat/arm/board/fvp/drivers/pwrc/fvp_pwrc.c \
plat/arm/board/fvp/tsp/fvp_tsp_setup.c
include plat/arm/common/tsp/arm_tsp.mk

View file

@ -48,6 +48,9 @@ ERRATA_A57_813420 := 1
# power down sequence
SKIP_A57_L1_FLUSH_PWR_DWN := 1
# Disable the PSCI platform compatibility layer
ENABLE_PLAT_COMPAT := 0
include plat/arm/board/common/board_css.mk
include plat/arm/common/arm_common.mk
include plat/arm/soc/common/soc_css.mk

View file

@ -28,4 +28,6 @@
# POSSIBILITY OF SUCH DAMAGE.
#
BL32_SOURCES += plat/arm/css/common/css_topology.c
include plat/arm/common/tsp/arm_tsp.mk

View file

@ -30,11 +30,38 @@
#include <asm_macros.S>
#include <platform_def.h>
.weak plat_arm_calc_core_pos
.weak plat_my_core_pos
.globl plat_crash_console_init
.globl plat_crash_console_putc
.globl platform_mem_init
/* -----------------------------------------------------
* unsigned int plat_my_core_pos(void)
* This function uses the plat_arm_calc_core_pos()
* definition to get the index of the calling CPU.
* -----------------------------------------------------
*/
func plat_my_core_pos
mrs x0, mpidr_el1
b plat_arm_calc_core_pos
endfunc plat_my_core_pos
/* -----------------------------------------------------
* unsigned int plat_arm_calc_core_pos(uint64_t mpidr)
* Helper function to calculate the core position.
* With this function: CorePos = (ClusterId * 4) +
* CoreId
* -----------------------------------------------------
*/
func plat_arm_calc_core_pos
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6
ret
endfunc plat_arm_calc_core_pos
/* ---------------------------------------------
* int plat_crash_console_init(void)
* Function to initialize the crash console
@ -60,3 +87,12 @@ func plat_crash_console_putc
mov_imm x1, PLAT_ARM_CRASH_UART_BASE
b console_core_putc
endfunc plat_crash_console_putc
/* ---------------------------------------------------------------------
* We don't need to carry out any memory initialization on ARM
* platforms. The Secure RAM is accessible straight away.
* ---------------------------------------------------------------------
*/
func platform_mem_init
ret
endfunc platform_mem_init

View file

@ -226,9 +226,6 @@ void arm_bl31_platform_setup(void)
/* Initialize power controller before setting up topology */
plat_arm_pwrc_setup();
/* Topologies are best known to the platform. */
plat_arm_topology_setup();
}
void bl31_platform_setup(void)

View file

@ -46,6 +46,23 @@ endif
# Process flags
$(eval $(call add_define,ARM_TSP_RAM_LOCATION_ID))
# For the original power-state parameter format, the State-ID can be encoded
# according to the recommended encoding or zero. This flag determines which
# State-ID encoding to be parsed.
ARM_RECOM_STATE_ID_ENC := 0
# If the PSCI_EXTENDED_STATE_ID is set, then the recommended state ID need to
# be used. Else throw a build error.
ifeq (${PSCI_EXTENDED_STATE_ID}, 1)
ifeq (${ARM_RECOM_STATE_ID_ENC}, 0)
$(error "Incompatible STATE_ID build option specified")
endif
endif
# Process ARM_RECOM_STATE_ID_ENC flag
$(eval $(call assert_boolean,ARM_RECOM_STATE_ID_ENC))
$(eval $(call add_define,ARM_RECOM_STATE_ID_ENC))
PLAT_INCLUDES += -Iinclude/common/tbbr \
-Iinclude/plat/arm/common \
-Iinclude/plat/arm/common/aarch64
@ -83,7 +100,8 @@ BL31_SOURCES += drivers/arm/cci/cci.c \
plat/arm/common/arm_security.c \
plat/arm/common/arm_topology.c \
plat/common/plat_gic.c \
plat/common/aarch64/platform_mp_stack.S
plat/common/aarch64/platform_mp_stack.S \
plat/common/aarch64/plat_psci_common.c
ifneq (${TRUSTED_BOARD_BOOT},0)

View file

@ -29,53 +29,49 @@
*/
#include <arch_helpers.h>
#include <arm_def.h>
#include <assert.h>
#include <errno.h>
#include <plat_arm.h>
#include <psci.h>
#if ARM_RECOM_STATE_ID_ENC
extern unsigned int arm_pm_idle_states[];
#endif /* __ARM_RECOM_STATE_ID_ENC__ */
/*******************************************************************************
* ARM standard platform utility function which is used to determine if any
* platform actions should be performed for the specified affinity instance
* given its state. Nothing needs to be done if the 'state' is not off or if
* this is not the highest affinity level which will enter the 'state'.
******************************************************************************/
int32_t arm_do_affinst_actions(unsigned int afflvl, unsigned int state)
{
unsigned int max_phys_off_afflvl;
assert(afflvl <= MPIDR_AFFLVL1);
if (state != PSCI_STATE_OFF)
return -EAGAIN;
/*
* Find the highest affinity level which will be suspended and postpone
* all the platform specific actions until that level is hit.
*/
max_phys_off_afflvl = psci_get_max_phys_off_afflvl();
assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
if (afflvl != max_phys_off_afflvl)
return -EAGAIN;
return 0;
}
#if !ARM_RECOM_STATE_ID_ENC
/*******************************************************************************
* ARM standard platform handler called to check the validity of the power state
* parameter.
******************************************************************************/
int arm_validate_power_state(unsigned int power_state)
int arm_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
int pstate = psci_get_pstate_type(power_state);
int pwr_lvl = psci_get_pstate_pwrlvl(power_state);
int i;
assert(req_state);
if (pwr_lvl > PLAT_MAX_PWR_LVL)
return PSCI_E_INVALID_PARAMS;
/* Sanity check the requested state */
if (psci_get_pstate_type(power_state) == PSTATE_TYPE_STANDBY) {
if (pstate == PSTATE_TYPE_STANDBY) {
/*
* It's possible to enter standby only on affinity level 0
* (i.e. a CPU on ARM standard platforms).
* Ignore any other affinity level.
* It's possible to enter standby only on power level 0
* Ignore any other power level.
*/
if (psci_get_pstate_afflvl(power_state) != MPIDR_AFFLVL0)
if (pwr_lvl != ARM_PWR_LVL0)
return PSCI_E_INVALID_PARAMS;
req_state->pwr_domain_state[ARM_PWR_LVL0] =
ARM_LOCAL_STATE_RET;
} else {
for (i = ARM_PWR_LVL0; i <= pwr_lvl; i++)
req_state->pwr_domain_state[i] =
ARM_LOCAL_STATE_OFF;
}
/*
@ -86,3 +82,65 @@ int arm_validate_power_state(unsigned int power_state)
return PSCI_E_SUCCESS;
}
#else
/*******************************************************************************
* ARM standard platform handler called to check the validity of the power
* state parameter. The power state parameter has to be a composite power
* state.
******************************************************************************/
int arm_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
unsigned int state_id;
int i;
assert(req_state);
/*
* Currently we are using a linear search for finding the matching
* entry in the idle power state array. This can be made a binary
* search if the number of entries justify the additional complexity.
*/
for (i = 0; !!arm_pm_idle_states[i]; i++) {
if (power_state == arm_pm_idle_states[i])
break;
}
/* Return error if entry not found in the idle state array */
if (!arm_pm_idle_states[i])
return PSCI_E_INVALID_PARAMS;
i = 0;
state_id = psci_get_pstate_id(power_state);
/* Parse the State ID and populate the state info parameter */
while (state_id) {
req_state->pwr_domain_state[i++] = state_id &
ARM_LOCAL_PSTATE_MASK;
state_id >>= ARM_LOCAL_PSTATE_WIDTH;
}
return PSCI_E_SUCCESS;
}
#endif /* __ARM_RECOM_STATE_ID_ENC__ */
/*******************************************************************************
* ARM standard platform handler called to check the validity of the non secure
* entrypoint.
******************************************************************************/
int arm_validate_ns_entrypoint(uintptr_t entrypoint)
{
/*
* Check if the non secure entrypoint lies within the non
* secure DRAM.
*/
if ((entrypoint >= ARM_NS_DRAM1_BASE) && (entrypoint <
(ARM_NS_DRAM1_BASE + ARM_NS_DRAM1_SIZE)))
return PSCI_E_SUCCESS;
if ((entrypoint >= ARM_DRAM2_BASE) && (entrypoint <
(ARM_DRAM2_BASE + ARM_DRAM2_SIZE)))
return PSCI_E_SUCCESS;
return PSCI_E_INVALID_ADDRESS;
}

View file

@ -30,35 +30,49 @@
#include <arch.h>
#include <psci.h>
#include <plat_arm.h>
#include <platform_def.h>
/*
* Weak definitions use fixed topology. Strong definitions could make topology
* configurable
*/
#pragma weak plat_get_aff_count
#pragma weak plat_get_aff_state
#pragma weak plat_arm_topology_setup
#define get_arm_cluster_core_count(mpidr)\
(((mpidr) & 0x100) ? PLAT_ARM_CLUSTER1_CORE_COUNT :\
PLAT_ARM_CLUSTER0_CORE_COUNT)
/* The power domain tree descriptor which need to be exported by ARM platforms */
extern const unsigned char arm_power_domain_tree_desc[];
unsigned int plat_get_aff_count(unsigned int aff_lvl, unsigned long mpidr)
/*******************************************************************************
* This function returns the ARM default topology tree information.
******************************************************************************/
const unsigned char *plat_get_power_domain_tree_desc(void)
{
/* Report 1 (absent) instance at levels higher that the cluster level */
if (aff_lvl > MPIDR_AFFLVL1)
return 1;
if (aff_lvl == MPIDR_AFFLVL1)
return ARM_CLUSTER_COUNT;
return mpidr & 0x100 ? PLAT_ARM_CLUSTER1_CORE_COUNT :
PLAT_ARM_CLUSTER0_CORE_COUNT;
return arm_power_domain_tree_desc;
}
unsigned int plat_get_aff_state(unsigned int aff_lvl, unsigned long mpidr)
/*******************************************************************************
* This function validates an MPIDR by checking whether it falls within the
* acceptable bounds. An error code (-1) is returned if an incorrect mpidr
* is passed.
******************************************************************************/
int arm_check_mpidr(u_register_t mpidr)
{
return aff_lvl <= MPIDR_AFFLVL1 ? PSCI_AFF_PRESENT : PSCI_AFF_ABSENT;
}
unsigned int cluster_id, cpu_id;
void plat_arm_topology_setup(void)
{
mpidr &= MPIDR_AFFINITY_MASK;
if (mpidr & ~(MPIDR_CLUSTER_MASK | MPIDR_CPU_MASK))
return -1;
cluster_id = (mpidr >> MPIDR_AFF1_SHIFT) & MPIDR_AFFLVL_MASK;
cpu_id = (mpidr >> MPIDR_AFF0_SHIFT) & MPIDR_AFFLVL_MASK;
if (cluster_id >= ARM_CLUSTER_COUNT)
return -1;
/* Validate cpu_id by checking whether it represents a CPU in
one of the two clusters present on the platform. */
if (cpu_id >= get_arm_cluster_core_count(mpidr))
return -1;
return 0;
}

View file

@ -31,6 +31,7 @@
# TSP source files common to ARM standard platforms
BL32_SOURCES += drivers/arm/gic/arm_gic.c \
drivers/arm/gic/gic_v2.c \
plat/arm/common/arm_topology.c \
plat/arm/common/tsp/arm_tsp_setup.c \
plat/common/aarch64/platform_mp_stack.S \
plat/common/plat_gic.c

View file

@ -33,11 +33,9 @@
#include <css_def.h>
.weak plat_secondary_cold_boot_setup
.weak platform_get_entrypoint
.weak platform_mem_init
.globl platform_get_core_pos
.weak platform_is_primary_cpu
.weak plat_get_my_entrypoint
.globl plat_arm_calc_core_pos
.weak plat_is_my_cpu_primary
/* -----------------------------------------------------
* void plat_secondary_cold_boot_setup (void);
@ -54,69 +52,56 @@ cb_panic:
b cb_panic
endfunc plat_secondary_cold_boot_setup
/* -----------------------------------------------------
* void platform_get_entrypoint (unsigned int mpid);
/* ---------------------------------------------------------------------
* unsigned long plat_get_my_entrypoint (void);
*
* Main job of this routine is to distinguish between
* a cold and warm boot.
* On a cold boot the secondaries first wait for the
* platform to be initialized after which they are
* hotplugged in. The primary proceeds to perform the
* platform initialization.
* On a warm boot, each cpu jumps to the address in its
* mailbox.
* Main job of this routine is to distinguish between a cold and a warm
* boot. On CSS platforms, this distinction is based on the contents of
* the Trusted Mailbox. It is initialised to zero by the SCP before the
* AP cores are released from reset. Therefore, a zero mailbox means
* it's a cold reset.
*
* TODO: Not a good idea to save lr in a temp reg
* -----------------------------------------------------
* This functions returns the contents of the mailbox, i.e.:
* - 0 for a cold boot;
* - the warm boot entrypoint for a warm boot.
* ---------------------------------------------------------------------
*/
func platform_get_entrypoint
mov x9, x30 // lr
bl platform_get_core_pos
ldr x1, =TRUSTED_MAILBOXES_BASE
lsl x0, x0, #TRUSTED_MAILBOX_SHIFT
ldr x0, [x1, x0]
ret x9
endfunc platform_get_entrypoint
func plat_get_my_entrypoint
mov_imm x0, TRUSTED_MAILBOX_BASE
ldr x0, [x0]
ret
endfunc plat_get_my_entrypoint
/*
* Override the default implementation to swap the cluster order.
* This is necessary in order to match the format of the boot
* information passed by the SCP and read in platform_is_primary_cpu
* below.
/* -----------------------------------------------------------
* unsigned int plat_arm_calc_core_pos(uint64_t mpidr)
* Function to calculate the core position by
* swapping the cluster order. This is necessary in order to
* match the format of the boot information passed by the SCP
* and read in plat_is_my_cpu_primary below.
* -----------------------------------------------------------
*/
func platform_get_core_pos
func plat_arm_calc_core_pos
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
eor x0, x0, #(1 << MPIDR_AFFINITY_BITS) // swap cluster order
add x0, x1, x0, LSR #6
ret
endfunc platform_get_core_pos
endfunc plat_arm_calc_core_pos
/* -----------------------------------------------------
* void platform_mem_init(void);
* unsigned int plat_is_my_cpu_primary (void);
*
* We don't need to carry out any memory initialization
* on CSS platforms. The Secure RAM is accessible straight away.
* -----------------------------------------------------
*/
func platform_mem_init
ret
endfunc platform_mem_init
/* -----------------------------------------------------
* unsigned int platform_is_primary_cpu (unsigned int mpid);
*
* Given the mpidr say whether this cpu is the primary
* Find out whether the current cpu is the primary
* cpu (applicable ony after a cold boot)
* -----------------------------------------------------
*/
func platform_is_primary_cpu
func plat_is_my_cpu_primary
mov x9, x30
bl platform_get_core_pos
bl plat_my_core_pos
ldr x1, =SCP_BOOT_CFG_ADDR
ldr x1, [x1]
ubfx x1, x1, #PRIMARY_CPU_SHIFT, #PRIMARY_CPU_BIT_WIDTH
cmp x0, x1
cset x0, eq
cset w0, eq
ret x9
endfunc platform_is_primary_cpu
endfunc plat_is_my_cpu_primary

View file

@ -44,7 +44,8 @@ BL2_SOURCES += plat/arm/css/common/css_bl2_setup.c \
BL31_SOURCES += plat/arm/css/common/css_mhu.c \
plat/arm/css/common/css_pm.c \
plat/arm/css/common/css_scpi.c
plat/arm/css/common/css_scpi.c \
plat/arm/css/common/css_topology.c
ifneq (${RESET_TO_BL31},0)

View file

@ -41,42 +41,49 @@
#include <psci.h>
#include "css_scpi.h"
#if ARM_RECOM_STATE_ID_ENC
/*
* The table storing the valid idle power states. Ensure that the
* array entries are populated in ascending order of state-id to
* enable us to use binary search during power state validation.
* The table must be terminated by a NULL entry.
*/
const unsigned int arm_pm_idle_states[] = {
/* State-id - 0x01 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RET,
ARM_PWR_LVL0, PSTATE_TYPE_STANDBY),
/* State-id - 0x02 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF,
ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
/* State-id - 0x22 */
arm_make_pwrstate_lvl1(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF,
ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
0,
};
#endif
/*******************************************************************************
* Private function to program the mailbox for a cpu before it is released
* from reset.
******************************************************************************/
static void css_program_mailbox(uint64_t mpidr, uint64_t address)
static void css_program_mailbox(uintptr_t address)
{
uint64_t linear_id;
uint64_t mbox;
linear_id = platform_get_core_pos(mpidr);
mbox = TRUSTED_MAILBOXES_BASE + (linear_id << TRUSTED_MAILBOX_SHIFT);
*((uint64_t *) mbox) = address;
flush_dcache_range(mbox, sizeof(mbox));
uintptr_t *mailbox = (void *) TRUSTED_MAILBOX_BASE;
*mailbox = address;
flush_dcache_range((uintptr_t) mailbox, sizeof(*mailbox));
}
/*******************************************************************************
* Handler called when an affinity instance is about to be turned on. The
* Handler called when a power domain is about to be turned on. The
* level and mpidr determine the affinity instance.
******************************************************************************/
int32_t css_affinst_on(uint64_t mpidr,
uint64_t sec_entrypoint,
uint32_t afflvl,
uint32_t state)
int css_pwr_domain_on(u_register_t mpidr)
{
/*
* SCP takes care of powering up higher affinity levels so we
* SCP takes care of powering up parent power domains so we
* only need to care about level 0
*/
if (afflvl != MPIDR_AFFLVL0)
return PSCI_E_SUCCESS;
/*
* Setup mailbox with address for CPU entrypoint when it next powers up
*/
css_program_mailbox(mpidr, sec_entrypoint);
scpi_set_css_power_state(mpidr, scpi_power_on, scpi_power_on,
scpi_power_on);
@ -84,47 +91,37 @@ int32_t css_affinst_on(uint64_t mpidr,
}
/*******************************************************************************
* Handler called when an affinity instance has just been powered on after
* being turned off earlier. The level and mpidr determine the affinity
* instance. The 'state' arg. allows the platform to decide whether the cluster
* was turned off prior to wakeup and do what's necessary to setup it up
* correctly.
* Handler called when a power level has just been powered on after
* being turned off earlier. The target_state encodes the low power state that
* each level has woken up from.
******************************************************************************/
void css_affinst_on_finish(uint32_t afflvl, uint32_t state)
void css_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
unsigned long mpidr;
/* Determine if any platform actions need to be executed. */
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
return;
/* Get the mpidr for this cpu */
mpidr = read_mpidr_el1();
assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_OFF);
/*
* Perform the common cluster specific operations i.e enable coherency
* if this cluster was off.
*/
if (afflvl != MPIDR_AFFLVL0)
cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));
if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
ARM_LOCAL_STATE_OFF)
cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));
/* Enable the gic cpu interface */
arm_gic_cpuif_setup();
/* todo: Is this setup only needed after a cold boot? */
arm_gic_pcpu_distif_setup();
/* Clear the mailbox for this cpu. */
css_program_mailbox(mpidr, 0);
}
/*******************************************************************************
* Common function called while turning a cpu off or suspending it. It is called
* from css_off() or css_suspend() when these functions in turn are called for
* the highest affinity level which will be powered down. It performs the
* actions common to the OFF and SUSPEND calls.
* power domain at the highest power level which will be powered down. It
* performs the actions common to the OFF and SUSPEND calls.
******************************************************************************/
static void css_power_down_common(uint32_t afflvl)
static void css_power_down_common(const psci_power_state_t *target_state)
{
uint32_t cluster_state = scpi_power_on;
@ -132,7 +129,8 @@ static void css_power_down_common(uint32_t afflvl)
arm_gic_cpuif_deactivate();
/* Cluster is to be turned off, so disable coherency */
if (afflvl > MPIDR_AFFLVL0) {
if (target_state->pwr_domain_state[ARM_PWR_LVL1] ==
ARM_LOCAL_STATE_OFF) {
cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr()));
cluster_state = scpi_power_off;
}
@ -148,64 +146,55 @@ static void css_power_down_common(uint32_t afflvl)
}
/*******************************************************************************
* Handler called when an affinity instance is about to be turned off. The
* level and mpidr determine the affinity instance. The 'state' arg. allows the
* platform to decide whether the cluster is being turned off and take
* appropriate actions.
*
* CAUTION: There is no guarantee that caches will remain turned on across calls
* to this function as each affinity level is dealt with. So do not write & read
* global variables across calls. It will be wise to do flush a write to the
* global to prevent unpredictable results.
* Handler called when a power domain is about to be turned off. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
static void css_affinst_off(uint32_t afflvl, uint32_t state)
static void css_pwr_domain_off(const psci_power_state_t *target_state)
{
/* Determine if any platform actions need to be executed */
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
return;
assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_OFF);
css_power_down_common(afflvl);
css_power_down_common(target_state);
}
/*******************************************************************************
* Handler called when an affinity instance is about to be suspended. The
* level and mpidr determine the affinity instance. The 'state' arg. allows the
* platform to decide whether the cluster is being turned off and take apt
* actions. The 'sec_entrypoint' determines the address in BL3-1 from where
* execution should resume.
*
* CAUTION: There is no guarantee that caches will remain turned on across calls
* to this function as each affinity level is dealt with. So do not write & read
* global variables across calls. It will be wise to do flush a write to the
* global to prevent unpredictable results.
* Handler called when a power domain is about to be suspended. The
* target_state encodes the power state that each level should transition to.
******************************************************************************/
static void css_affinst_suspend(uint64_t sec_entrypoint,
uint32_t afflvl,
uint32_t state)
static void css_pwr_domain_suspend(const psci_power_state_t *target_state)
{
/* Determine if any platform actions need to be executed */
if (arm_do_affinst_actions(afflvl, state) == -EAGAIN)
return;
/*
* Setup mailbox with address for CPU entrypoint when it next powers up.
* Juno has retention only at cpu level. Just return
* as nothing is to be done for retention.
*/
css_program_mailbox(read_mpidr_el1(), sec_entrypoint);
if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_RET)
return;
css_power_down_common(afflvl);
assert(target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_OFF);
css_power_down_common(target_state);
}
/*******************************************************************************
* Handler called when an affinity instance has just been powered on after
* having been suspended earlier. The level and mpidr determine the affinity
* instance.
* Handler called when a power domain has just been powered on after
* having been suspended earlier. The target_state encodes the low power state
* that each level has woken up from.
* TODO: At the moment we reuse the on finisher and reinitialize the secure
* context. Need to implement a separate suspend finisher.
******************************************************************************/
static void css_affinst_suspend_finish(uint32_t afflvl,
uint32_t state)
static void css_pwr_domain_suspend_finish(
const psci_power_state_t *target_state)
{
css_affinst_on_finish(afflvl, state);
/*
* Return as nothing is to be done on waking up from retention.
*/
if (target_state->pwr_domain_state[ARM_PWR_LVL0] ==
ARM_LOCAL_STATE_RET)
return;
css_pwr_domain_on_finish(target_state);
}
/*******************************************************************************
@ -244,12 +233,14 @@ static void __dead2 css_system_reset(void)
}
/*******************************************************************************
* Handler called when an affinity instance is about to enter standby.
* Handler called when the CPU power domain is about to enter standby.
******************************************************************************/
void css_affinst_standby(unsigned int power_state)
void css_cpu_standby(plat_local_state_t cpu_state)
{
unsigned int scr;
assert(cpu_state == ARM_LOCAL_STATE_RET);
scr = read_scr_el3();
/* Enable PhysicalIRQ bit for NS world to wake the CPU */
write_scr_el3(scr | SCR_IRQ_BIT);
@ -267,23 +258,28 @@ void css_affinst_standby(unsigned int power_state)
/*******************************************************************************
* Export the platform handlers to enable psci to invoke them
******************************************************************************/
static const plat_pm_ops_t css_ops = {
.affinst_on = css_affinst_on,
.affinst_on_finish = css_affinst_on_finish,
.affinst_off = css_affinst_off,
.affinst_standby = css_affinst_standby,
.affinst_suspend = css_affinst_suspend,
.affinst_suspend_finish = css_affinst_suspend_finish,
static const plat_psci_ops_t css_ops = {
.pwr_domain_on = css_pwr_domain_on,
.pwr_domain_on_finish = css_pwr_domain_on_finish,
.pwr_domain_off = css_pwr_domain_off,
.cpu_standby = css_cpu_standby,
.pwr_domain_suspend = css_pwr_domain_suspend,
.pwr_domain_suspend_finish = css_pwr_domain_suspend_finish,
.system_off = css_system_off,
.system_reset = css_system_reset,
.validate_power_state = arm_validate_power_state
.validate_power_state = arm_validate_power_state,
.validate_ns_entrypoint = arm_validate_ns_entrypoint
};
/*******************************************************************************
* Export the platform specific power ops.
* Export the platform specific psci ops.
******************************************************************************/
int32_t platform_setup_pm(const plat_pm_ops_t **plat_ops)
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const plat_psci_ops_t **psci_ops)
{
*plat_ops = &css_ops;
*psci_ops = &css_ops;
/* Setup mailbox with entry point. */
css_program_mailbox(sec_entrypoint);
return 0;
}

View file

@ -0,0 +1,70 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <plat_arm.h>
/*
* On ARM platforms, by default the cluster power level is treated as the
* highest. The first entry in the power domain descriptor specifies the
* number of cluster power domains i.e. 2.
*/
#define CSS_PWR_DOMAINS_AT_MAX_PWR_LVL ARM_CLUSTER_COUNT
/*
* The CSS power domain tree descriptor. The cluster power domains are
* arranged so that when the PSCI generic code creates the power domain tree,
* the indices of the CPU power domain nodes it allocates match the linear
* indices returned by plat_core_pos_by_mpidr() i.e.
* CLUSTER1 CPUs are allocated indices from 0 to 3 and the higher indices for
* CLUSTER0 CPUs.
*/
const unsigned char arm_power_domain_tree_desc[] = {
/* No of root nodes */
CSS_PWR_DOMAINS_AT_MAX_PWR_LVL,
/* No of children for the first node */
PLAT_ARM_CLUSTER1_CORE_COUNT,
/* No of children for the second node */
PLAT_ARM_CLUSTER0_CORE_COUNT
};
/******************************************************************************
* This function implements a part of the critical interface between the psci
* generic layer and the platform that allows the former to query the platform
* to convert an MPIDR to a unique linear index. An error code (-1) is
* returned in case the MPIDR is invalid.
*****************************************************************************/
int plat_core_pos_by_mpidr(u_register_t mpidr)
{
if (arm_check_mpidr(mpidr) == 0)
return plat_arm_calc_core_pos(mpidr);
return -1;
}

View file

@ -27,7 +27,8 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <platform.h>
#include <xlat_tables.h>
/*
@ -47,3 +48,18 @@ void bl32_plat_enable_mmu(uint32_t flags)
{
enable_mmu_el1(flags);
}
#if !ENABLE_PLAT_COMPAT
/*
* Helper function for platform_get_pos() when platform compatibility is
* disabled. This is to enable SPDs using the older platform API to continue
* to work.
*/
unsigned int platform_core_pos_helper(unsigned long mpidr)
{
int idx = plat_core_pos_by_mpidr(mpidr);
assert(idx >= 0);
return idx;
}
#endif

View file

@ -0,0 +1,63 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <assert.h>
#include <platform.h>
#include <psci.h>
/*
* The PSCI generic code uses this API to let the platform participate in state
* coordination during a power management operation. It compares the platform
* specific local power states requested by each cpu for a given power domain
* and returns the coordinated target power state that the domain should
* enter. A platform assigns a number to a local power state. This default
* implementation assumes that the platform assigns these numbers in order of
* increasing depth of the power state i.e. for two power states X & Y, if X < Y
* then X represents a shallower power state than Y. As a result, the
* coordinated target local power state for a power domain will be the minimum
* of the requested local power states.
*/
plat_local_state_t plat_get_target_pwr_state(unsigned int lvl,
const plat_local_state_t *states,
unsigned int ncpu)
{
plat_local_state_t target = PLAT_MAX_OFF_STATE, temp;
assert(ncpu);
do {
temp = *states++;
if (temp < target)
target = temp;
} while (--ncpu);
return target;
}

View file

@ -32,37 +32,38 @@
#include <asm_macros.S>
#include <platform_def.h>
.weak platform_get_core_pos
.weak platform_check_mpidr
.weak plat_report_exception
.weak plat_crash_console_init
.weak plat_crash_console_putc
.weak plat_reset_handler
.weak plat_disable_acp
/* -----------------------------------------------------
* int platform_get_core_pos(int mpidr);
* With this function: CorePos = (ClusterId * 4) +
* CoreId
* -----------------------------------------------------
*/
func platform_get_core_pos
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6
ret
endfunc platform_get_core_pos
#if !ENABLE_PLAT_COMPAT
.globl platform_get_core_pos
/* -----------------------------------------------------
* Placeholder function which should be redefined by
* each platform.
* -----------------------------------------------------
#define MPIDR_RES_BIT_MASK 0xff000000
/* ------------------------------------------------------------------
* int platform_get_core_pos(int mpidr)
* Returns the CPU index of the CPU specified by mpidr. This is
* defined when platform compatibility is disabled to enable Trusted
* Firmware components like SPD using the old platform API to work.
* This API is deprecated and it assumes that the mpidr specified is
* that of a valid and present CPU. Instead, plat_my_core_pos()
* should be used for CPU index of the current CPU and
* plat_core_pos_by_mpidr() should be used for CPU index of a
* CPU specified by its mpidr.
* ------------------------------------------------------------------
*/
func platform_check_mpidr
mov x0, xzr
ret
endfunc platform_check_mpidr
func_deprecated platform_get_core_pos
bic x0, x0, #MPIDR_RES_BIT_MASK
mrs x1, mpidr_el1
bic x1, x1, #MPIDR_RES_BIT_MASK
cmp x0, x1
beq plat_my_core_pos
b platform_core_pos_helper
endfunc_deprecated platform_get_core_pos
#endif
/* -----------------------------------------------------
* Placeholder function which should be redefined by

View file

@ -30,13 +30,56 @@
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <platform_def.h>
.local platform_normal_stacks
.weak platform_set_stack
#if ENABLE_PLAT_COMPAT
.globl plat_get_my_stack
.globl plat_set_my_stack
.weak platform_get_stack
.weak platform_set_stack
#else
.weak plat_get_my_stack
.weak plat_set_my_stack
.globl platform_get_stack
.globl platform_set_stack
#endif /* __ENABLE_PLAT_COMPAT__ */
#if ENABLE_PLAT_COMPAT
/* ---------------------------------------------------------------------
* When the compatility layer is enabled, the new platform APIs
* viz plat_get_my_stack() and plat_set_my_stack() need to be
* defined using the previous APIs platform_get_stack() and
* platform_set_stack(). Also we need to provide weak definitions
* of platform_get_stack() and platform_set_stack() for the platforms
* to reuse.
* --------------------------------------------------------------------
*/
/* -----------------------------------------------------
* unsigned long plat_get_my_stack ()
*
* For the current CPU, this function returns the stack
* pointer for a stack allocated in device memory.
* -----------------------------------------------------
*/
func plat_get_my_stack
mrs x0, mpidr_el1
b platform_get_stack
endfunc plat_get_my_stack
/* -----------------------------------------------------
* void plat_set_my_stack ()
*
* For the current CPU, this function sets the stack
* pointer to a stack allocated in normal memory.
* -----------------------------------------------------
*/
func plat_set_my_stack
mrs x0, mpidr_el1
b platform_set_stack
endfunc plat_set_my_stack
/* -----------------------------------------------------
* unsigned long platform_get_stack (unsigned long mpidr)
@ -65,6 +108,85 @@ func platform_set_stack
ret x9
endfunc platform_set_stack
#else
/* ---------------------------------------------------------------------
* When the compatility layer is disabled, the new platform APIs
* viz plat_get_my_stack() and plat_set_my_stack() are
* supported by the platform and the previous APIs platform_get_stack()
* and platform_set_stack() are defined in terms of new APIs making use
* of the fact that they are only ever invoked for the current CPU.
* This is to enable components of Trusted Firmware like SPDs using the
* old platform APIs to continue to work.
* --------------------------------------------------------------------
*/
/* -------------------------------------------------------
* unsigned long platform_get_stack (unsigned long mpidr)
*
* For the current CPU, this function returns the stack
* pointer for a stack allocated in device memory. The
* 'mpidr' should correspond to that of the current CPU.
* This function is deprecated and plat_get_my_stack()
* should be used instead.
* -------------------------------------------------------
*/
func_deprecated platform_get_stack
#if ASM_ASSERTION
mrs x1, mpidr_el1
cmp x0, x1
ASM_ASSERT(eq)
#endif
b plat_get_my_stack
endfunc_deprecated platform_get_stack
/* -----------------------------------------------------
* void platform_set_stack (unsigned long mpidr)
*
* For the current CPU, this function sets the stack pointer
* to a stack allocated in normal memory. The
* 'mpidr' should correspond to that of the current CPU.
* This function is deprecated and plat_get_my_stack()
* should be used instead.
* -----------------------------------------------------
*/
func_deprecated platform_set_stack
#if ASM_ASSERTION
mrs x1, mpidr_el1
cmp x0, x1
ASM_ASSERT(eq)
#endif
b plat_set_my_stack
endfunc_deprecated platform_set_stack
/* -----------------------------------------------------
* unsigned long plat_get_my_stack ()
*
* For the current CPU, this function returns the stack
* pointer for a stack allocated in device memory.
* -----------------------------------------------------
*/
func plat_get_my_stack
mov x10, x30 // lr
get_my_mp_stack platform_normal_stacks, PLATFORM_STACK_SIZE
ret x10
endfunc plat_get_my_stack
/* -----------------------------------------------------
* void plat_set_my_stack ()
*
* For the current CPU, this function sets the stack
* pointer to a stack allocated in normal memory.
* -----------------------------------------------------
*/
func plat_set_my_stack
mov x9, x30 // lr
bl plat_get_my_stack
mov sp, x0
ret x9
endfunc plat_set_my_stack
#endif /*__ENABLE_PLAT_COMPAT__*/
/* -----------------------------------------------------
* Per-cpu stacks in normal memory. Each cpu gets a
* stack of PLATFORM_STACK_SIZE bytes.

View file

@ -34,35 +34,63 @@
.local platform_normal_stacks
.globl plat_set_my_stack
.globl plat_get_my_stack
.globl platform_set_stack
.globl platform_get_stack
/* -----------------------------------------------------
* unsigned long platform_get_stack (unsigned long)
* unsigned long plat_get_my_stack ()
*
* For cold-boot BL images, only the primary CPU needs a
* stack. This function returns the stack pointer for a
* stack allocated in device memory.
* -----------------------------------------------------
*/
func platform_get_stack
func plat_get_my_stack
get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
ret
endfunc platform_get_stack
endfunc plat_get_my_stack
/* -----------------------------------------------------
* void platform_set_stack (unsigned long)
* void plat_set_my_stack ()
*
* For cold-boot BL images, only the primary CPU needs a
* stack. This function sets the stack pointer to a stack
* allocated in normal memory.
* -----------------------------------------------------
*/
func platform_set_stack
func plat_set_my_stack
get_up_stack platform_normal_stacks, PLATFORM_STACK_SIZE
mov sp, x0
ret
endfunc platform_set_stack
endfunc plat_set_my_stack
/* -----------------------------------------------------
* unsigned long platform_get_stack ()
*
* For cold-boot BL images, only the primary CPU needs a
* stack. This function returns the stack pointer for a
* stack allocated in device memory. This function
* is deprecated.
* -----------------------------------------------------
*/
func_deprecated platform_get_stack
b plat_get_my_stack
endfunc_deprecated platform_get_stack
/* -----------------------------------------------------
* void platform_set_stack ()
*
* For cold-boot BL images, only the primary CPU needs a
* stack. This function sets the stack pointer to a stack
* allocated in normal memory.This function is
* deprecated.
* -----------------------------------------------------
*/
func_deprecated platform_set_stack
b plat_set_my_stack
endfunc_deprecated platform_set_stack
/* -----------------------------------------------------
* Single cpu stack in normal memory.

View file

@ -0,0 +1,71 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <platform_def.h>
.globl plat_my_core_pos
.globl plat_is_my_cpu_primary
.globl plat_get_my_entrypoint
.weak platform_get_core_pos
/* -----------------------------------------------------
* Compatibility wrappers for new platform APIs.
* -----------------------------------------------------
*/
func plat_my_core_pos
mrs x0, mpidr_el1
b platform_get_core_pos
endfunc plat_my_core_pos
func plat_is_my_cpu_primary
mrs x0, mpidr_el1
b platform_is_primary_cpu
endfunc plat_is_my_cpu_primary
func plat_get_my_entrypoint
mrs x0, mpidr_el1
b platform_get_entrypoint
endfunc plat_get_my_entrypoint
/* -----------------------------------------------------
* int platform_get_core_pos(int mpidr);
* With this function: CorePos = (ClusterId * 4) +
* CoreId
* -----------------------------------------------------
*/
func platform_get_core_pos
and x1, x0, #MPIDR_CPU_MASK
and x0, x0, #MPIDR_CLUSTER_MASK
add x0, x1, x0, LSR #6
ret
endfunc platform_get_core_pos

View file

@ -0,0 +1,41 @@
#
# Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of ARM nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
ifeq (${PSCI_EXTENDED_STATE_ID}, 1)
$(error "PSCI Compatibility mode can be enabled only if \
PSCI_EXTENDED_STATE_ID is not set")
endif
PLAT_BL_COMMON_SOURCES += plat/compat/aarch64/plat_helpers_compat.S
BL31_SOURCES += plat/common/aarch64/plat_psci_common.c \
plat/compat/plat_pm_compat.c \
plat/compat/plat_topology_compat.c

View file

@ -0,0 +1,337 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <errno.h>
#include <platform.h>
#include <psci.h>
/*
* The platform hooks exported by the platform using the earlier version of
* platform interface
*/
const plat_pm_ops_t *pm_ops;
/*
* The hooks exported by the compatibility layer
*/
static plat_psci_ops_t compat_psci_ops;
/*
* The secure entry point to be used on warm reset.
*/
static unsigned long secure_entrypoint;
/*
* This array stores the 'power_state' requests of each CPU during
* CPU_SUSPEND and SYSTEM_SUSPEND to support querying of state-ID
* by the platform.
*/
unsigned int psci_power_state_compat[PLATFORM_CORE_COUNT];
/*******************************************************************************
* The PSCI compatibility helper to parse the power state and populate the
* 'pwr_domain_state' for each power level. It is assumed that, when in
* compatibility mode, the PSCI generic layer need to know only whether the
* affinity level will be OFF or in RETENTION and if the platform supports
* multiple power down and retention states, it will be taken care within
* the platform layer.
******************************************************************************/
static int parse_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
int i;
int pstate = psci_get_pstate_type(power_state);
int aff_lvl = psci_get_pstate_pwrlvl(power_state);
if (aff_lvl > PLATFORM_MAX_AFFLVL)
return PSCI_E_INVALID_PARAMS;
/* Sanity check the requested state */
if (pstate == PSTATE_TYPE_STANDBY) {
/*
* Set the CPU local state as retention and ignore the higher
* levels. This allows the generic PSCI layer to invoke
* plat_psci_ops 'cpu_standby' hook and the compatibility
* layer invokes the 'affinst_standby' handler with the
* correct power_state parameter thus preserving the correct
* behavior.
*/
req_state->pwr_domain_state[0] =
PLAT_MAX_RET_STATE;
} else {
for (i = 0; i <= aff_lvl; i++)
req_state->pwr_domain_state[i] =
PLAT_MAX_OFF_STATE;
}
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* The PSCI compatibility helper to set the 'power_state' in
* psci_power_state_compat[] at index corresponding to the current core.
******************************************************************************/
static void set_psci_power_state_compat(unsigned int power_state)
{
unsigned int my_core_pos = plat_my_core_pos();
psci_power_state_compat[my_core_pos] = power_state;
flush_dcache_range((uintptr_t) &psci_power_state_compat[my_core_pos],
sizeof(psci_power_state_compat[my_core_pos]));
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'validate_power_state'
* hook.
******************************************************************************/
static int validate_power_state_compat(unsigned int power_state,
psci_power_state_t *req_state)
{
int rc;
assert(req_state);
if (pm_ops->validate_power_state) {
rc = pm_ops->validate_power_state(power_state);
if (rc != PSCI_E_SUCCESS)
return rc;
}
/* Store the 'power_state' parameter for the current CPU. */
set_psci_power_state_compat(power_state);
return parse_power_state(power_state, req_state);
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t
* 'get_sys_suspend_power_state' hook.
******************************************************************************/
void get_sys_suspend_power_state_compat(psci_power_state_t *req_state)
{
unsigned int power_state;
assert(req_state);
power_state = pm_ops->get_sys_suspend_power_state();
/* Store the 'power_state' parameter for the current CPU. */
set_psci_power_state_compat(power_state);
if (parse_power_state(power_state, req_state) != PSCI_E_SUCCESS)
assert(0);
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'validate_ns_entrypoint'
* hook.
******************************************************************************/
static int validate_ns_entrypoint_compat(uintptr_t ns_entrypoint)
{
return pm_ops->validate_ns_entrypoint(ns_entrypoint);
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'affinst_standby' hook.
******************************************************************************/
static void cpu_standby_compat(plat_local_state_t cpu_state)
{
unsigned int powerstate = psci_get_suspend_powerstate();
assert(powerstate != PSCI_INVALID_DATA);
pm_ops->affinst_standby(powerstate);
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'affinst_on' hook.
******************************************************************************/
static int pwr_domain_on_compat(u_register_t mpidr)
{
int level, rc;
/*
* The new PSCI framework does not hold the locks for higher level
* power domain nodes when this hook is invoked. Hence figuring out the
* target state of the parent power domains does not make much sense.
* Hence we hard-code the state as PSCI_STATE_OFF for all the levels.
* We expect the platform to perform the necessary CPU_ON operations
* when the 'affinst_on' is invoked only for level 0.
*/
for (level = PLATFORM_MAX_AFFLVL; level >= 0; level--) {
rc = pm_ops->affinst_on((unsigned long)mpidr, secure_entrypoint,
level, PSCI_STATE_OFF);
if (rc != PSCI_E_SUCCESS)
break;
}
return rc;
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'affinst_off' hook.
******************************************************************************/
static void pwr_domain_off_compat(const psci_power_state_t *target_state)
{
int level;
unsigned int plat_state;
for (level = 0; level <= PLATFORM_MAX_AFFLVL; level++) {
plat_state = (is_local_state_run(
target_state->pwr_domain_state[level]) ?
PSCI_STATE_ON : PSCI_STATE_OFF);
pm_ops->affinst_off(level, plat_state);
}
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'affinst_suspend' hook.
******************************************************************************/
static void pwr_domain_suspend_compat(const psci_power_state_t *target_state)
{
int level;
unsigned int plat_state;
for (level = 0; level <= psci_get_suspend_afflvl(); level++) {
plat_state = (is_local_state_run(
target_state->pwr_domain_state[level]) ?
PSCI_STATE_ON : PSCI_STATE_OFF);
pm_ops->affinst_suspend(secure_entrypoint, level, plat_state);
}
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'affinst_on_finish'
* hook.
******************************************************************************/
static void pwr_domain_on_finish_compat(const psci_power_state_t *target_state)
{
int level;
unsigned int plat_state;
for (level = PLATFORM_MAX_AFFLVL; level >= 0; level--) {
plat_state = (is_local_state_run(
target_state->pwr_domain_state[level]) ?
PSCI_STATE_ON : PSCI_STATE_OFF);
pm_ops->affinst_on_finish(level, plat_state);
}
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t
* 'affinst_suspend_finish' hook.
******************************************************************************/
static void pwr_domain_suspend_finish_compat(
const psci_power_state_t *target_state)
{
int level;
unsigned int plat_state;
for (level = psci_get_suspend_afflvl(); level >= 0; level--) {
plat_state = (is_local_state_run(
target_state->pwr_domain_state[level]) ?
PSCI_STATE_ON : PSCI_STATE_OFF);
pm_ops->affinst_suspend_finish(level, plat_state);
}
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'system_off' hook.
******************************************************************************/
static void __dead2 system_off_compat(void)
{
pm_ops->system_off();
}
/*******************************************************************************
* The PSCI compatibility helper for plat_pm_ops_t 'system_reset' hook.
******************************************************************************/
static void __dead2 system_reset_compat(void)
{
pm_ops->system_reset();
}
/*******************************************************************************
* Export the compatibility compat_psci_ops. The assumption made is that the
* power domains correspond to affinity instances on the platform.
******************************************************************************/
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const plat_psci_ops_t **psci_ops)
{
platform_setup_pm(&pm_ops);
secure_entrypoint = (unsigned long) sec_entrypoint;
/*
* It is compulsory for the platform ports using the new porting
* interface to export a hook to validate the power state parameter
*/
compat_psci_ops.validate_power_state = validate_power_state_compat;
/*
* Populate the compatibility plat_psci_ops_t hooks if available
*/
if (pm_ops->validate_ns_entrypoint)
compat_psci_ops.validate_ns_entrypoint =
validate_ns_entrypoint_compat;
if (pm_ops->affinst_standby)
compat_psci_ops.cpu_standby = cpu_standby_compat;
if (pm_ops->affinst_on)
compat_psci_ops.pwr_domain_on = pwr_domain_on_compat;
if (pm_ops->affinst_off)
compat_psci_ops.pwr_domain_off = pwr_domain_off_compat;
if (pm_ops->affinst_suspend)
compat_psci_ops.pwr_domain_suspend = pwr_domain_suspend_compat;
if (pm_ops->affinst_on_finish)
compat_psci_ops.pwr_domain_on_finish =
pwr_domain_on_finish_compat;
if (pm_ops->affinst_suspend_finish)
compat_psci_ops.pwr_domain_suspend_finish =
pwr_domain_suspend_finish_compat;
if (pm_ops->system_off)
compat_psci_ops.system_off = system_off_compat;
if (pm_ops->system_reset)
compat_psci_ops.system_reset = system_reset_compat;
if (pm_ops->get_sys_suspend_power_state)
compat_psci_ops.get_sys_suspend_power_state =
get_sys_suspend_power_state_compat;
*psci_ops = &compat_psci_ops;
return 0;
}

View file

@ -0,0 +1,218 @@
/*
* Copyright (c) 2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch_helpers.h>
#include <assert.h>
#include <platform.h>
#include <platform_def.h>
#include <psci.h>
/* The power domain tree descriptor */
static unsigned char power_domain_tree_desc
[PLATFORM_NUM_AFFS - PLATFORM_CORE_COUNT + 1];
/*******************************************************************************
* Simple routine to set the id of an affinity instance at a given level
* in the mpidr. The assumption is that the affinity level and the power
* domain level are the same.
******************************************************************************/
unsigned long mpidr_set_aff_inst(unsigned long mpidr,
unsigned char aff_inst,
int aff_lvl)
{
unsigned long aff_shift;
assert(aff_lvl <= MPIDR_AFFLVL3);
/*
* Decide the number of bits to shift by depending upon
* the power level
*/
aff_shift = get_afflvl_shift(aff_lvl);
/* Clear the existing power instance & set the new one*/
mpidr &= ~((unsigned long)MPIDR_AFFLVL_MASK << aff_shift);
mpidr |= (unsigned long)aff_inst << aff_shift;
return mpidr;
}
/******************************************************************************
* This function uses insertion sort to sort a given list of mpidr's in the
* ascending order of the index returned by platform_get_core_pos.
*****************************************************************************/
void sort_mpidr_by_cpu_idx(unsigned int aff_count, unsigned long mpidr_list[])
{
int i, j;
unsigned long temp_mpidr;
for (i = 1; i < aff_count; i++) {
temp_mpidr = mpidr_list[i];
for (j = i;
j > 0 &&
platform_get_core_pos(mpidr_list[j-1]) >
platform_get_core_pos(temp_mpidr);
j--)
mpidr_list[j] = mpidr_list[j-1];
mpidr_list[j] = temp_mpidr;
}
}
/*******************************************************************************
* The compatibility routine to construct the power domain tree description.
* The assumption made is that the power domains correspond to affinity
* instances on the platform. This routine's aim is to traverse to the target
* affinity level and populate the number of siblings at that level in
* 'power_domain_tree_desc' array. It uses the current affinity level to keep
* track of how many levels from the root of the tree have been traversed.
* If the current affinity level != target affinity level, then the platform
* is asked to return the number of children that each affinity instance has
* at the current affinity level. Traversal is then done for each child at the
* next lower level i.e. current affinity level - 1.
*
* The power domain description needs to be constructed in such a way that
* affinity instances containing CPUs with lower cpu indices need to be
* described first. Hence when traversing the power domain levels, the list
* of mpidrs at that power domain level is sorted in the ascending order of CPU
* indices before the lower levels are recursively described.
*
* CAUTION: This routine assumes that affinity instance ids are allocated in a
* monotonically increasing manner at each affinity level in a mpidr starting
* from 0. If the platform breaks this assumption then this code will have to
* be reworked accordingly.
******************************************************************************/
static unsigned int init_pwr_domain_tree_desc(unsigned long mpidr,
unsigned int affmap_idx,
int cur_afflvl,
int tgt_afflvl)
{
unsigned int ctr, aff_count;
/*
* Temporary list to hold the MPIDR list at a particular power domain
* level so as to sort them.
*/
unsigned long mpidr_list[PLATFORM_CORE_COUNT];
assert(cur_afflvl >= tgt_afflvl);
/*
* Find the number of siblings at the current power level &
* assert if there are none 'cause then we have been invoked with
* an invalid mpidr.
*/
aff_count = plat_get_aff_count(cur_afflvl, mpidr);
assert(aff_count);
if (tgt_afflvl < cur_afflvl) {
for (ctr = 0; ctr < aff_count; ctr++) {
mpidr_list[ctr] = mpidr_set_aff_inst(mpidr, ctr,
cur_afflvl);
}
/* Need to sort mpidr list according to CPU index */
sort_mpidr_by_cpu_idx(aff_count, mpidr_list);
for (ctr = 0; ctr < aff_count; ctr++) {
affmap_idx = init_pwr_domain_tree_desc(mpidr_list[ctr],
affmap_idx,
cur_afflvl - 1,
tgt_afflvl);
}
} else {
power_domain_tree_desc[affmap_idx++] = aff_count;
}
return affmap_idx;
}
/*******************************************************************************
* This function constructs the topology tree description at runtime
* and returns it. The assumption made is that the power domains correspond
* to affinity instances on the platform.
******************************************************************************/
const unsigned char *plat_get_power_domain_tree_desc(void)
{
int afflvl, affmap_idx;
/*
* We assume that the platform allocates affinity instance ids from
* 0 onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0
*/
affmap_idx = 0;
for (afflvl = PLATFORM_MAX_AFFLVL; afflvl >= MPIDR_AFFLVL0; afflvl--) {
affmap_idx = init_pwr_domain_tree_desc(FIRST_MPIDR,
affmap_idx,
PLATFORM_MAX_AFFLVL,
afflvl);
}
assert(affmap_idx == (PLATFORM_NUM_AFFS - PLATFORM_CORE_COUNT + 1));
return power_domain_tree_desc;
}
/******************************************************************************
* The compatibility helper function for plat_core_pos_by_mpidr(). It
* validates the 'mpidr' by making sure that it is within acceptable bounds
* for the platform and queries the platform layer whether the CPU specified
* by the mpidr is present or not. If present, it returns the index of the
* core corresponding to the 'mpidr'. Else it returns -1.
*****************************************************************************/
int plat_core_pos_by_mpidr(u_register_t mpidr)
{
unsigned long shift, aff_inst;
int i;
/* Ignore the Reserved bits and U bit in MPIDR */
mpidr &= MPIDR_AFFINITY_MASK;
/*
* Check if any affinity field higher than
* the PLATFORM_MAX_AFFLVL is set.
*/
shift = get_afflvl_shift(PLATFORM_MAX_AFFLVL + 1);
if (mpidr >> shift)
return -1;
for (i = PLATFORM_MAX_AFFLVL; i >= 0; i--) {
shift = get_afflvl_shift(i);
aff_inst = ((mpidr &
((unsigned long)MPIDR_AFFLVL_MASK << shift)) >> shift);
if (aff_inst >= plat_get_aff_count(i, mpidr))
return -1;
}
if (plat_get_aff_state(0, mpidr) == PSCI_AFF_ABSENT)
return -1;
return platform_get_core_pos(mpidr);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -80,7 +80,6 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id,
void *cookie)
{
uint32_t linear_id;
uint64_t mpidr;
optee_context_t *optee_ctx;
/* Check the security state when the exception was generated */
@ -92,14 +91,13 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id,
#endif
/* Sanity check the pointer to this cpu's context */
mpidr = read_mpidr();
assert(handle == cm_get_context(NON_SECURE));
/* Save the non-secure context before entering the OPTEE */
cm_el1_sysregs_context_save(NON_SECURE);
/* Get a reference to this cpu's OPTEE context */
linear_id = platform_get_core_pos(mpidr);
linear_id = plat_my_core_pos();
optee_ctx = &opteed_sp_context[linear_id];
assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE));
@ -125,10 +123,9 @@ static uint64_t opteed_sel1_interrupt_handler(uint32_t id,
int32_t opteed_setup(void)
{
entry_point_info_t *optee_ep_info;
uint64_t mpidr = read_mpidr();
uint32_t linear_id;
linear_id = platform_get_core_pos(mpidr);
linear_id = plat_my_core_pos();
/*
* Get information about the Secure Payload (BL32) image. Its
@ -182,8 +179,7 @@ int32_t opteed_setup(void)
******************************************************************************/
static int32_t opteed_init(void)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
entry_point_info_t *optee_entry_point;
uint64_t rc;
@ -195,7 +191,7 @@ static int32_t opteed_init(void)
optee_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
assert(optee_entry_point);
cm_init_context(mpidr, optee_entry_point);
cm_init_my_context(optee_entry_point);
/*
* Arrange for an entry into OPTEE. It will be returned via
@ -226,8 +222,7 @@ uint64_t opteed_smc_handler(uint32_t smc_fid,
uint64_t flags)
{
cpu_context_t *ns_cpu_context;
unsigned long mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
uint64_t rc;

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -51,8 +51,7 @@ static void opteed_cpu_on_handler(uint64_t target_cpu)
static int32_t opteed_cpu_off_handler(uint64_t unused)
{
int32_t rc = 0;
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
assert(optee_vectors);
@ -85,8 +84,7 @@ static int32_t opteed_cpu_off_handler(uint64_t unused)
static void opteed_cpu_suspend_handler(uint64_t unused)
{
int32_t rc = 0;
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
assert(optee_vectors);
@ -116,8 +114,7 @@ static void opteed_cpu_suspend_handler(uint64_t unused)
static void opteed_cpu_on_finish_handler(uint64_t unused)
{
int32_t rc = 0;
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
entry_point_info_t optee_on_entrypoint;
@ -129,7 +126,7 @@ static void opteed_cpu_on_finish_handler(uint64_t unused)
optee_ctx);
/* Initialise this cpu's secure context */
cm_init_context(mpidr, &optee_on_entrypoint);
cm_init_my_context(&optee_on_entrypoint);
/* Enter OPTEE */
rc = opteed_synchronous_sp_entry(optee_ctx);
@ -153,8 +150,7 @@ static void opteed_cpu_on_finish_handler(uint64_t unused)
static void opteed_cpu_suspend_finish_handler(uint64_t suspend_level)
{
int32_t rc = 0;
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
assert(optee_vectors);
@ -193,8 +189,7 @@ static int32_t opteed_cpu_migrate_info(uint64_t *resident_cpu)
******************************************************************************/
static void opteed_system_off(void)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
assert(optee_vectors);
@ -214,8 +209,7 @@ static void opteed_system_off(void)
******************************************************************************/
static void opteed_system_reset(void)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
optee_context_t *optee_ctx = &opteed_sp_context[linear_id];
assert(optee_vectors);

View file

@ -121,7 +121,6 @@ int32_t tlkd_setup(void)
******************************************************************************/
int32_t tlkd_init(void)
{
uint64_t mpidr = read_mpidr();
entry_point_info_t *tlk_entry_point;
/*
@ -131,7 +130,7 @@ int32_t tlkd_init(void)
tlk_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
assert(tlk_entry_point);
cm_init_context(mpidr, tlk_entry_point);
cm_init_my_context(tlk_entry_point);
/*
* Arrange for an entry into the test secure payload.

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -101,7 +101,6 @@ static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
void *cookie)
{
uint32_t linear_id;
uint64_t mpidr;
tsp_context_t *tsp_ctx;
/* Check the security state when the exception was generated */
@ -113,14 +112,13 @@ static uint64_t tspd_sel1_interrupt_handler(uint32_t id,
#endif
/* Sanity check the pointer to this cpu's context */
mpidr = read_mpidr();
assert(handle == cm_get_context(NON_SECURE));
/* Save the non-secure context before entering the TSP */
cm_el1_sysregs_context_save(NON_SECURE);
/* Get a reference to this cpu's TSP context */
linear_id = platform_get_core_pos(mpidr);
linear_id = plat_my_core_pos();
tsp_ctx = &tspd_sp_context[linear_id];
assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE));
@ -197,10 +195,9 @@ static uint64_t tspd_ns_interrupt_handler(uint32_t id,
int32_t tspd_setup(void)
{
entry_point_info_t *tsp_ep_info;
uint64_t mpidr = read_mpidr();
uint32_t linear_id;
linear_id = platform_get_core_pos(mpidr);
linear_id = plat_my_core_pos();
/*
* Get information about the Secure Payload (BL32) image. Its
@ -256,8 +253,7 @@ int32_t tspd_setup(void)
******************************************************************************/
int32_t tspd_init(void)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
entry_point_info_t *tsp_entry_point;
uint64_t rc;
@ -269,7 +265,7 @@ int32_t tspd_init(void)
tsp_entry_point = bl31_plat_get_next_image_ep_info(SECURE);
assert(tsp_entry_point);
cm_init_context(mpidr, tsp_entry_point);
cm_init_my_context(tsp_entry_point);
/*
* Arrange for an entry into the test secure payload. It will be
@ -300,8 +296,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
uint64_t flags)
{
cpu_context_t *ns_cpu_context;
unsigned long mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr), ns;
uint32_t linear_id = plat_my_core_pos(), ns;
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
uint64_t rc;
#if TSP_INIT_ASYNC
@ -453,7 +448,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
/*
* Disable the interrupt NS locally since it will be enabled globally
* within cm_init_context.
* within cm_init_my_context.
*/
disable_intr_rm_local(INTR_TYPE_NS, SECURE);
#endif
@ -471,7 +466,7 @@ uint64_t tspd_smc_handler(uint32_t smc_fid,
assert(NON_SECURE ==
GET_SECURITY_STATE(next_image_info->h.attr));
cm_init_context(read_mpidr_el1(), next_image_info);
cm_init_my_context(next_image_info);
cm_prepare_el3_exit(NON_SECURE);
SMC_RET0(cm_get_context(NON_SECURE));
#else

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -52,8 +52,7 @@ static void tspd_cpu_on_handler(uint64_t target_cpu)
static int32_t tspd_cpu_off_handler(uint64_t unused)
{
int32_t rc = 0;
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
assert(tsp_vectors);
@ -86,8 +85,7 @@ static int32_t tspd_cpu_off_handler(uint64_t unused)
static void tspd_cpu_suspend_handler(uint64_t unused)
{
int32_t rc = 0;
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
assert(tsp_vectors);
@ -117,8 +115,7 @@ static void tspd_cpu_suspend_handler(uint64_t unused)
static void tspd_cpu_on_finish_handler(uint64_t unused)
{
int32_t rc = 0;
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
entry_point_info_t tsp_on_entrypoint;
@ -131,12 +128,12 @@ static void tspd_cpu_on_finish_handler(uint64_t unused)
tsp_ctx);
/* Initialise this cpu's secure context */
cm_init_context(mpidr, &tsp_on_entrypoint);
cm_init_my_context(&tsp_on_entrypoint);
#if TSPD_ROUTE_IRQ_TO_EL3
/*
* Disable the NS interrupt locally since it will be enabled globally
* within cm_init_context.
* within cm_init_my_context.
*/
disable_intr_rm_local(INTR_TYPE_NS, SECURE);
#endif
@ -163,8 +160,7 @@ static void tspd_cpu_on_finish_handler(uint64_t unused)
static void tspd_cpu_suspend_finish_handler(uint64_t suspend_level)
{
int32_t rc = 0;
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
assert(tsp_vectors);
@ -203,8 +199,7 @@ static int32_t tspd_cpu_migrate_info(uint64_t *resident_cpu)
******************************************************************************/
static void tspd_system_off(void)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
assert(tsp_vectors);
@ -224,8 +219,7 @@ static void tspd_system_off(void)
******************************************************************************/
static void tspd_system_reset(void)
{
uint64_t mpidr = read_mpidr();
uint32_t linear_id = platform_get_core_pos(mpidr);
uint32_t linear_id = plat_my_core_pos();
tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id];
assert(tsp_vectors);

View file

@ -1,248 +0,0 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <debug.h>
#include <string.h>
#include "psci_private.h"
typedef void (*afflvl_off_handler_t)(aff_map_node_t *node);
/*******************************************************************************
* The next three functions implement a handler for each supported affinity
* level which is called when that affinity level is turned off.
******************************************************************************/
static void psci_afflvl0_off(aff_map_node_t *cpu_node)
{
assert(cpu_node->level == MPIDR_AFFLVL0);
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0);
/*
* Plat. management: Perform platform specific actions to turn this
* cpu off e.g. exit cpu coherency, program the power controller etc.
*/
psci_plat_pm_ops->affinst_off(cpu_node->level,
psci_get_phys_state(cpu_node));
}
static void psci_afflvl1_off(aff_map_node_t *cluster_node)
{
/* Sanity check the cluster level */
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Arch. Management. Flush all levels of caches to PoC if
* the cluster is to be shutdown.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1);
/*
* Plat. Management. Allow the platform to do its cluster
* specific bookeeping e.g. turn off interconnect coherency,
* program the power controller etc.
*/
psci_plat_pm_ops->affinst_off(cluster_node->level,
psci_get_phys_state(cluster_node));
}
static void psci_afflvl2_off(aff_map_node_t *system_node)
{
/* Cannot go beyond this level */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Keep the physical state of the system handy to decide what
* action needs to be taken
*/
/*
* Arch. Management. Flush all levels of caches to PoC if
* the system is to be shutdown.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2);
/*
* Plat. Management : Allow the platform to do its bookeeping
* at this affinity level
*/
psci_plat_pm_ops->affinst_off(system_node->level,
psci_get_phys_state(system_node));
}
static const afflvl_off_handler_t psci_afflvl_off_handlers[] = {
psci_afflvl0_off,
psci_afflvl1_off,
psci_afflvl2_off,
};
/*******************************************************************************
* This function takes an array of pointers to affinity instance nodes in the
* topology tree and calls the off handler for the corresponding affinity
* levels
******************************************************************************/
static void psci_call_off_handlers(aff_map_node_t *mpidr_nodes[],
int start_afflvl,
int end_afflvl)
{
int level;
aff_map_node_t *node;
for (level = start_afflvl; level <= end_afflvl; level++) {
node = mpidr_nodes[level];
if (node == NULL)
continue;
psci_afflvl_off_handlers[level](node);
}
}
/*******************************************************************************
* Top level handler which is called when a cpu wants to power itself down.
* It's assumed that along with turning the cpu off, higher affinity levels will
* be turned off as far as possible. It traverses through all the affinity
* levels performing generic, architectural, platform setup and state management
* e.g. for a cluster that's to be powered off, it will call the platform
* specific code which will disable coherency at the interconnect level if the
* cpu is the last in the cluster. For a cpu it could mean programming the power
* the power controller etc.
*
* The state of all the relevant affinity levels is changed prior to calling the
* affinity level specific handlers as their actions would depend upon the state
* the affinity level is about to enter.
*
* The affinity level specific handlers are called in ascending order i.e. from
* the lowest to the highest affinity level implemented by the platform because
* to turn off affinity level X it is neccesary to turn off affinity level X - 1
* first.
******************************************************************************/
int psci_afflvl_off(int start_afflvl,
int end_afflvl)
{
int rc;
mpidr_aff_map_nodes_t mpidr_nodes;
unsigned int max_phys_off_afflvl;
/*
* This function must only be called on platforms where the
* CPU_OFF platform hooks have been implemented.
*/
assert(psci_plat_pm_ops->affinst_off);
/*
* Collect the pointers to the nodes in the topology tree for
* each affinity instance in the mpidr. If this function does
* not return successfully then either the mpidr or the affinity
* levels are incorrect. Either way, this an internal TF error
* therefore assert.
*/
rc = psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
start_afflvl,
end_afflvl,
mpidr_nodes);
assert(rc == PSCI_E_SUCCESS);
/*
* This function acquires the lock corresponding to each affinity
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
psci_acquire_afflvl_locks(start_afflvl,
end_afflvl,
mpidr_nodes);
/*
* Call the cpu off handler registered by the Secure Payload Dispatcher
* to let it do any bookkeeping. Assume that the SPD always reports an
* E_DENIED error if SP refuse to power down
*/
if (psci_spd_pm && psci_spd_pm->svc_off) {
rc = psci_spd_pm->svc_off(0);
if (rc)
goto exit;
}
/*
* This function updates the state of each affinity instance
* corresponding to the mpidr in the range of affinity levels
* specified.
*/
psci_do_afflvl_state_mgmt(start_afflvl,
end_afflvl,
mpidr_nodes,
PSCI_STATE_OFF);
max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
end_afflvl,
mpidr_nodes);
assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
/* Stash the highest affinity level that will enter the OFF state. */
psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
/* Perform generic, architecture and platform specific handling */
psci_call_off_handlers(mpidr_nodes,
start_afflvl,
end_afflvl);
/*
* Invalidate the entry for the highest affinity level stashed earlier.
* This ensures that any reads of this variable outside the power
* up/down sequences return PSCI_INVALID_DATA.
*
*/
psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
exit:
/*
* Release the locks corresponding to each affinity level in the
* reverse order to which they were acquired.
*/
psci_release_afflvl_locks(start_afflvl,
end_afflvl,
mpidr_nodes);
/*
* Check if all actions needed to safely power down this cpu have
* successfully completed. Enter a wfi loop which will allow the
* power controller to physically power down this cpu.
*/
if (rc == PSCI_E_SUCCESS)
psci_power_down_wfi();
return rc;
}

View file

@ -1,405 +0,0 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <bl31.h>
#include <debug.h>
#include <context_mgmt.h>
#include <platform.h>
#include <runtime_svc.h>
#include <stddef.h>
#include "psci_private.h"
typedef int (*afflvl_on_handler_t)(unsigned long target_cpu,
aff_map_node_t *node);
/*******************************************************************************
* This function checks whether a cpu which has been requested to be turned on
* is OFF to begin with.
******************************************************************************/
static int cpu_on_validate_state(unsigned int psci_state)
{
if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
return PSCI_E_ALREADY_ON;
if (psci_state == PSCI_STATE_ON_PENDING)
return PSCI_E_ON_PENDING;
assert(psci_state == PSCI_STATE_OFF);
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* Handler routine to turn a cpu on. It takes care of any generic, architectural
* or platform specific setup required.
* TODO: Split this code across separate handlers for each type of setup?
******************************************************************************/
static int psci_afflvl0_on(unsigned long target_cpu,
aff_map_node_t *cpu_node)
{
unsigned long psci_entrypoint;
/* Sanity check to safeguard against data corruption */
assert(cpu_node->level == MPIDR_AFFLVL0);
/* Set the secure world (EL3) re-entry point after BL1 */
psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
return psci_plat_pm_ops->affinst_on(target_cpu,
psci_entrypoint,
cpu_node->level,
psci_get_phys_state(cpu_node));
}
/*******************************************************************************
* Handler routine to turn a cluster on. It takes care or any generic, arch.
* or platform specific setup required.
* TODO: Split this code across separate handlers for each type of setup?
******************************************************************************/
static int psci_afflvl1_on(unsigned long target_cpu,
aff_map_node_t *cluster_node)
{
unsigned long psci_entrypoint;
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* There is no generic and arch. specific cluster
* management required
*/
/* State management: Is not required while turning a cluster on */
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
return psci_plat_pm_ops->affinst_on(target_cpu,
psci_entrypoint,
cluster_node->level,
psci_get_phys_state(cluster_node));
}
/*******************************************************************************
* Handler routine to turn a cluster of clusters on. It takes care or any
* generic, arch. or platform specific setup required.
* TODO: Split this code across separate handlers for each type of setup?
******************************************************************************/
static int psci_afflvl2_on(unsigned long target_cpu,
aff_map_node_t *system_node)
{
unsigned long psci_entrypoint;
/* Cannot go beyond affinity level 2 in this psci imp. */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* There is no generic and arch. specific system management
* required
*/
/* State management: Is not required while turning a system on */
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
psci_entrypoint = (unsigned long) psci_aff_on_finish_entry;
return psci_plat_pm_ops->affinst_on(target_cpu,
psci_entrypoint,
system_node->level,
psci_get_phys_state(system_node));
}
/* Private data structure to make this handlers accessible through indexing */
static const afflvl_on_handler_t psci_afflvl_on_handlers[] = {
psci_afflvl0_on,
psci_afflvl1_on,
psci_afflvl2_on,
};
/*******************************************************************************
* This function takes an array of pointers to affinity instance nodes in the
* topology tree and calls the on handler for the corresponding affinity
* levels
******************************************************************************/
static int psci_call_on_handlers(aff_map_node_t *target_cpu_nodes[],
int start_afflvl,
int end_afflvl,
unsigned long target_cpu)
{
int rc = PSCI_E_INVALID_PARAMS, level;
aff_map_node_t *node;
for (level = end_afflvl; level >= start_afflvl; level--) {
node = target_cpu_nodes[level];
if (node == NULL)
continue;
/*
* TODO: In case of an error should there be a way
* of undoing what we might have setup at higher
* affinity levels.
*/
rc = psci_afflvl_on_handlers[level](target_cpu,
node);
if (rc != PSCI_E_SUCCESS)
break;
}
return rc;
}
/*******************************************************************************
* Generic handler which is called to physically power on a cpu identified by
* its mpidr. It traverses through all the affinity levels performing generic,
* architectural, platform setup and state management e.g. for a cpu that is
* to be powered on, it will ensure that enough information is stashed for it
* to resume execution in the non-secure security state.
*
* The state of all the relevant affinity levels is changed after calling the
* affinity level specific handlers as their actions would depend upon the state
* the affinity level is currently in.
*
* The affinity level specific handlers are called in descending order i.e. from
* the highest to the lowest affinity level implemented by the platform because
* to turn on affinity level X it is necessary to turn on affinity level X + 1
* first.
******************************************************************************/
int psci_afflvl_on(unsigned long target_cpu,
entry_point_info_t *ep,
int start_afflvl,
int end_afflvl)
{
int rc;
mpidr_aff_map_nodes_t target_cpu_nodes;
/*
* This function must only be called on platforms where the
* CPU_ON platform hooks have been implemented.
*/
assert(psci_plat_pm_ops->affinst_on &&
psci_plat_pm_ops->affinst_on_finish);
/*
* Collect the pointers to the nodes in the topology tree for
* each affinity instance in the mpidr. If this function does
* not return successfully then either the mpidr or the affinity
* levels are incorrect.
*/
rc = psci_get_aff_map_nodes(target_cpu,
start_afflvl,
end_afflvl,
target_cpu_nodes);
assert(rc == PSCI_E_SUCCESS);
/*
* This function acquires the lock corresponding to each affinity
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
psci_acquire_afflvl_locks(start_afflvl,
end_afflvl,
target_cpu_nodes);
/*
* Generic management: Ensure that the cpu is off to be
* turned on.
*/
rc = cpu_on_validate_state(psci_get_state(
target_cpu_nodes[MPIDR_AFFLVL0]));
if (rc != PSCI_E_SUCCESS)
goto exit;
/*
* Call the cpu on handler registered by the Secure Payload Dispatcher
* to let it do any bookeeping. If the handler encounters an error, it's
* expected to assert within
*/
if (psci_spd_pm && psci_spd_pm->svc_on)
psci_spd_pm->svc_on(target_cpu);
/*
* This function updates the state of each affinity instance
* corresponding to the mpidr in the range of affinity levels
* specified.
*/
psci_do_afflvl_state_mgmt(start_afflvl,
end_afflvl,
target_cpu_nodes,
PSCI_STATE_ON_PENDING);
/* Perform generic, architecture and platform specific handling. */
rc = psci_call_on_handlers(target_cpu_nodes,
start_afflvl,
end_afflvl,
target_cpu);
assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
if (rc == PSCI_E_SUCCESS)
/* Store the re-entry information for the non-secure world. */
cm_init_context(target_cpu, ep);
else
/* Restore the state on error. */
psci_do_afflvl_state_mgmt(start_afflvl,
end_afflvl,
target_cpu_nodes,
PSCI_STATE_OFF);
exit:
/*
* This loop releases the lock corresponding to each affinity level
* in the reverse order to which they were acquired.
*/
psci_release_afflvl_locks(start_afflvl,
end_afflvl,
target_cpu_nodes);
return rc;
}
/*******************************************************************************
* The following functions finish an earlier affinity power on request. They
* are called by the common finisher routine in psci_common.c.
******************************************************************************/
static void psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
{
unsigned int plat_state, state;
assert(cpu_node->level == MPIDR_AFFLVL0);
/* Ensure we have been explicitly woken up by another cpu */
state = psci_get_state(cpu_node);
assert(state == PSCI_STATE_ON_PENDING);
/*
* Plat. management: Perform the platform specific actions
* for this cpu e.g. enabling the gic or zeroing the mailbox
* register. The actual state of this cpu has already been
* changed.
*/
/* Get the physical state of this cpu */
plat_state = get_phys_state(state);
psci_plat_pm_ops->affinst_on_finish(cpu_node->level,
plat_state);
/*
* Arch. management: Enable data cache and manage stack memory
*/
psci_do_pwrup_cache_maintenance();
/*
* All the platform specific actions for turning this cpu
* on have completed. Perform enough arch.initialization
* to run in the non-secure address space.
*/
bl31_arch_setup();
/*
* Call the cpu on finish handler registered by the Secure Payload
* Dispatcher to let it do any bookeeping. If the handler encounters an
* error, it's expected to assert within
*/
if (psci_spd_pm && psci_spd_pm->svc_on_finish)
psci_spd_pm->svc_on_finish(0);
/*
* Generic management: Now we just need to retrieve the
* information that we had stashed away during the cpu_on
* call to set this cpu on its way.
*/
cm_prepare_el3_exit(NON_SECURE);
/* Clean caches before re-entering normal world */
dcsw_op_louis(DCCSW);
}
static void psci_afflvl1_on_finish(aff_map_node_t *cluster_node)
{
unsigned int plat_state;
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
plat_state = psci_get_phys_state(cluster_node);
psci_plat_pm_ops->affinst_on_finish(cluster_node->level,
plat_state);
}
static void psci_afflvl2_on_finish(aff_map_node_t *system_node)
{
unsigned int plat_state;
/* Cannot go beyond this affinity level */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Currently, there are no architectural actions to perform
* at the system level.
*/
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
plat_state = psci_get_phys_state(system_node);
psci_plat_pm_ops->affinst_on_finish(system_node->level,
plat_state);
}
const afflvl_power_on_finisher_t psci_afflvl_on_finishers[] = {
psci_afflvl0_on_finish,
psci_afflvl1_on_finish,
psci_afflvl2_on_finish,
};

View file

@ -1,469 +0,0 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <bl_common.h>
#include <arch.h>
#include <arch_helpers.h>
#include <context.h>
#include <context_mgmt.h>
#include <cpu_data.h>
#include <debug.h>
#include <platform.h>
#include <runtime_svc.h>
#include <stddef.h>
#include "psci_private.h"
typedef void (*afflvl_suspend_handler_t)(aff_map_node_t *node);
/*******************************************************************************
* This function saves the power state parameter passed in the current PSCI
* cpu_suspend call in the per-cpu data array.
******************************************************************************/
void psci_set_suspend_power_state(unsigned int power_state)
{
set_cpu_data(psci_svc_cpu_data.power_state, power_state);
flush_cpu_data(psci_svc_cpu_data.power_state);
}
/*******************************************************************************
* This function gets the affinity level till which the current cpu could be
* powered down during a cpu_suspend call. Returns PSCI_INVALID_DATA if the
* power state is invalid.
******************************************************************************/
int psci_get_suspend_afflvl(void)
{
unsigned int power_state;
power_state = get_cpu_data(psci_svc_cpu_data.power_state);
return ((power_state == PSCI_INVALID_DATA) ?
power_state : psci_get_pstate_afflvl(power_state));
}
/*******************************************************************************
* This function gets the state id of the current cpu from the power state
* parameter saved in the per-cpu data array. Returns PSCI_INVALID_DATA if the
* power state saved is invalid.
******************************************************************************/
int psci_get_suspend_stateid(void)
{
unsigned int power_state;
power_state = get_cpu_data(psci_svc_cpu_data.power_state);
return ((power_state == PSCI_INVALID_DATA) ?
power_state : psci_get_pstate_id(power_state));
}
/*******************************************************************************
* This function gets the state id of the cpu specified by the 'mpidr' parameter
* from the power state parameter saved in the per-cpu data array. Returns
* PSCI_INVALID_DATA if the power state saved is invalid.
******************************************************************************/
int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
{
unsigned int power_state;
power_state = get_cpu_data_by_mpidr(mpidr,
psci_svc_cpu_data.power_state);
return ((power_state == PSCI_INVALID_DATA) ?
power_state : psci_get_pstate_id(power_state));
}
/*******************************************************************************
* The next three functions implement a handler for each supported affinity
* level which is called when that affinity level is about to be suspended.
******************************************************************************/
static void psci_afflvl0_suspend(aff_map_node_t *cpu_node)
{
unsigned long psci_entrypoint;
/* Sanity check to safeguard against data corruption */
assert(cpu_node->level == MPIDR_AFFLVL0);
/* Set the secure world (EL3) re-entry point after BL1 */
psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0);
/*
* Plat. management: Allow the platform to perform the
* necessary actions to turn off this cpu e.g. set the
* platform defined mailbox with the psci entrypoint,
* program the power controller etc.
*/
psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
cpu_node->level,
psci_get_phys_state(cpu_node));
}
static void psci_afflvl1_suspend(aff_map_node_t *cluster_node)
{
unsigned int plat_state;
unsigned long psci_entrypoint;
/* Sanity check the cluster level */
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Arch. management: Flush all levels of caches to PoC if the
* cluster is to be shutdown.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1);
/*
* Plat. Management. Allow the platform to do its cluster specific
* bookeeping e.g. turn off interconnect coherency, program the power
* controller etc. Sending the psci entrypoint is currently redundant
* beyond affinity level 0 but one never knows what a platform might
* do. Also it allows us to keep the platform handler prototype the
* same.
*/
plat_state = psci_get_phys_state(cluster_node);
psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
cluster_node->level,
plat_state);
}
static void psci_afflvl2_suspend(aff_map_node_t *system_node)
{
unsigned int plat_state;
unsigned long psci_entrypoint;
/* Cannot go beyond this */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Keep the physical state of the system handy to decide what
* action needs to be taken
*/
plat_state = psci_get_phys_state(system_node);
/*
* Arch. management: Flush all levels of caches to PoC if the
* system is to be shutdown.
*/
psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2);
/*
* Plat. Management : Allow the platform to do its bookeeping
* at this affinity level
*/
/*
* Sending the psci entrypoint is currently redundant
* beyond affinity level 0 but one never knows what a
* platform might do. Also it allows us to keep the
* platform handler prototype the same.
*/
plat_state = psci_get_phys_state(system_node);
psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
psci_plat_pm_ops->affinst_suspend(psci_entrypoint,
system_node->level,
plat_state);
}
static const afflvl_suspend_handler_t psci_afflvl_suspend_handlers[] = {
psci_afflvl0_suspend,
psci_afflvl1_suspend,
psci_afflvl2_suspend,
};
/*******************************************************************************
* This function takes an array of pointers to affinity instance nodes in the
* topology tree and calls the suspend handler for the corresponding affinity
* levels
******************************************************************************/
static void psci_call_suspend_handlers(aff_map_node_t *mpidr_nodes[],
int start_afflvl,
int end_afflvl)
{
int level;
aff_map_node_t *node;
for (level = start_afflvl; level <= end_afflvl; level++) {
node = mpidr_nodes[level];
if (node == NULL)
continue;
psci_afflvl_suspend_handlers[level](node);
}
}
/*******************************************************************************
* Top level handler which is called when a cpu wants to suspend its execution.
* It is assumed that along with turning the cpu off, higher affinity levels
* until the target affinity level will be turned off as well. It traverses
* through all the affinity levels performing generic, architectural, platform
* setup and state management e.g. for a cluster that's to be suspended, it will
* call the platform specific code which will disable coherency at the
* interconnect level if the cpu is the last in the cluster. For a cpu it could
* mean programming the power controller etc.
*
* The state of all the relevant affinity levels is changed prior to calling the
* affinity level specific handlers as their actions would depend upon the state
* the affinity level is about to enter.
*
* The affinity level specific handlers are called in ascending order i.e. from
* the lowest to the highest affinity level implemented by the platform because
* to turn off affinity level X it is neccesary to turn off affinity level X - 1
* first.
*
* All the required parameter checks are performed at the beginning and after
* the state transition has been done, no further error is expected and it
* is not possible to undo any of the actions taken beyond that point.
******************************************************************************/
void psci_afflvl_suspend(entry_point_info_t *ep,
int start_afflvl,
int end_afflvl)
{
int skip_wfi = 0;
mpidr_aff_map_nodes_t mpidr_nodes;
unsigned int max_phys_off_afflvl;
/*
* This function must only be called on platforms where the
* CPU_SUSPEND platform hooks have been implemented.
*/
assert(psci_plat_pm_ops->affinst_suspend &&
psci_plat_pm_ops->affinst_suspend_finish);
/*
* Collect the pointers to the nodes in the topology tree for
* each affinity instance in the mpidr. If this function does
* not return successfully then either the mpidr or the affinity
* levels are incorrect. Either way, this an internal TF error
* therefore assert.
*/
if (psci_get_aff_map_nodes(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
start_afflvl, end_afflvl, mpidr_nodes) != PSCI_E_SUCCESS)
assert(0);
/*
* This function acquires the lock corresponding to each affinity
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
psci_acquire_afflvl_locks(start_afflvl,
end_afflvl,
mpidr_nodes);
/*
* We check if there are any pending interrupts after the delay
* introduced by lock contention to increase the chances of early
* detection that a wake-up interrupt has fired.
*/
if (read_isr_el1()) {
skip_wfi = 1;
goto exit;
}
/*
* Call the cpu suspend handler registered by the Secure Payload
* Dispatcher to let it do any bookeeping. If the handler encounters an
* error, it's expected to assert within
*/
if (psci_spd_pm && psci_spd_pm->svc_suspend)
psci_spd_pm->svc_suspend(0);
/*
* This function updates the state of each affinity instance
* corresponding to the mpidr in the range of affinity levels
* specified.
*/
psci_do_afflvl_state_mgmt(start_afflvl,
end_afflvl,
mpidr_nodes,
PSCI_STATE_SUSPEND);
max_phys_off_afflvl = psci_find_max_phys_off_afflvl(start_afflvl,
end_afflvl,
mpidr_nodes);
assert(max_phys_off_afflvl != PSCI_INVALID_DATA);
/* Stash the highest affinity level that will be turned off */
psci_set_max_phys_off_afflvl(max_phys_off_afflvl);
/*
* Store the re-entry information for the non-secure world.
*/
cm_init_context(read_mpidr_el1(), ep);
/* Perform generic, architecture and platform specific handling */
psci_call_suspend_handlers(mpidr_nodes,
start_afflvl,
end_afflvl);
/*
* Invalidate the entry for the highest affinity level stashed earlier.
* This ensures that any reads of this variable outside the power
* up/down sequences return PSCI_INVALID_DATA.
*/
psci_set_max_phys_off_afflvl(PSCI_INVALID_DATA);
exit:
/*
* Release the locks corresponding to each affinity level in the
* reverse order to which they were acquired.
*/
psci_release_afflvl_locks(start_afflvl,
end_afflvl,
mpidr_nodes);
if (!skip_wfi)
psci_power_down_wfi();
}
/*******************************************************************************
* The following functions finish an earlier affinity suspend request. They
* are called by the common finisher routine in psci_common.c.
******************************************************************************/
static void psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node)
{
unsigned int plat_state, state;
int32_t suspend_level;
uint64_t counter_freq;
assert(cpu_node->level == MPIDR_AFFLVL0);
/* Ensure we have been woken up from a suspended state */
state = psci_get_state(cpu_node);
assert(state == PSCI_STATE_SUSPEND);
/*
* Plat. management: Perform the platform specific actions
* before we change the state of the cpu e.g. enabling the
* gic or zeroing the mailbox register. If anything goes
* wrong then assert as there is no way to recover from this
* situation.
*/
/* Get the physical state of this cpu */
plat_state = get_phys_state(state);
psci_plat_pm_ops->affinst_suspend_finish(cpu_node->level,
plat_state);
/*
* Arch. management: Enable the data cache, manage stack memory and
* restore the stashed EL3 architectural context from the 'cpu_context'
* structure for this cpu.
*/
psci_do_pwrup_cache_maintenance();
/* Re-init the cntfrq_el0 register */
counter_freq = plat_get_syscnt_freq();
write_cntfrq_el0(counter_freq);
/*
* Call the cpu suspend finish handler registered by the Secure Payload
* Dispatcher to let it do any bookeeping. If the handler encounters an
* error, it's expected to assert within
*/
if (psci_spd_pm && psci_spd_pm->svc_suspend) {
suspend_level = psci_get_suspend_afflvl();
assert (suspend_level != PSCI_INVALID_DATA);
psci_spd_pm->svc_suspend_finish(suspend_level);
}
/* Invalidate the suspend context for the node */
psci_set_suspend_power_state(PSCI_INVALID_DATA);
/*
* Generic management: Now we just need to retrieve the
* information that we had stashed away during the suspend
* call to set this cpu on its way.
*/
cm_prepare_el3_exit(NON_SECURE);
/* Clean caches before re-entering normal world */
dcsw_op_louis(DCCSW);
}
static void psci_afflvl1_suspend_finish(aff_map_node_t *cluster_node)
{
unsigned int plat_state;
assert(cluster_node->level == MPIDR_AFFLVL1);
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
/* Get the physical state of this cpu */
plat_state = psci_get_phys_state(cluster_node);
psci_plat_pm_ops->affinst_suspend_finish(cluster_node->level,
plat_state);
}
static void psci_afflvl2_suspend_finish(aff_map_node_t *system_node)
{
unsigned int plat_state;
/* Cannot go beyond this affinity level */
assert(system_node->level == MPIDR_AFFLVL2);
/*
* Currently, there are no architectural actions to perform
* at the system level.
*/
/*
* Plat. management: Perform the platform specific actions
* as per the old state of the cluster e.g. enabling
* coherency at the interconnect depends upon the state with
* which this cluster was powered up. If anything goes wrong
* then assert as there is no way to recover from this
* situation.
*/
/* Get the physical state of the system */
plat_state = psci_get_phys_state(system_node);
psci_plat_pm_ops->affinst_suspend_finish(system_node->level,
plat_state);
}
const afflvl_power_on_finisher_t psci_afflvl_suspend_finishers[] = {
psci_afflvl0_suspend_finish,
psci_afflvl1_suspend_finish,
psci_afflvl2_suspend_finish,
};

File diff suppressed because it is too large Load diff

View file

@ -34,25 +34,16 @@
#include <psci.h>
#include <xlat_tables.h>
.globl psci_aff_on_finish_entry
.globl psci_aff_suspend_finish_entry
.globl psci_entrypoint
.globl psci_power_down_wfi
/* -----------------------------------------------------
* This cpu has been physically powered up. Depending
* upon whether it was resumed from suspend or simply
* turned on, call the common power on finisher with
* the handlers (chosen depending upon original state).
* -----------------------------------------------------
/* --------------------------------------------------------------------
* This CPU has been physically powered up. It is either resuming from
* suspend or has simply been turned on. In both cases, call the power
* on finisher.
* --------------------------------------------------------------------
*/
func psci_aff_on_finish_entry
adr x23, psci_afflvl_on_finishers
b psci_aff_common_finish_entry
psci_aff_suspend_finish_entry:
adr x23, psci_afflvl_suspend_finishers
psci_aff_common_finish_entry:
func psci_entrypoint
/*
* On the warm boot path, most of the EL3 initialisations performed by
* 'el3_entrypoint_common' must be skipped:
@ -98,19 +89,10 @@ psci_aff_common_finish_entry:
mov x0, #DISABLE_DCACHE
bl bl31_plat_enable_mmu
/* ---------------------------------------------
* Call the finishers starting from affinity
* level 0.
* ---------------------------------------------
*/
bl get_power_on_target_afflvl
mov x2, x23
mov x1, x0
mov x0, #MPIDR_AFFLVL0
bl psci_afflvl_power_on_finish
bl psci_power_up_finish
b el3_exit
endfunc psci_aff_on_finish_entry
endfunc psci_entrypoint
/* --------------------------------------------
* This function is called to indicate to the

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -28,7 +28,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <asm_macros.S>
#include <assert_macros.S>
#include <platform_def.h>
@ -38,14 +37,13 @@
.globl psci_do_pwrup_cache_maintenance
/* -----------------------------------------------------------------------
* void psci_do_pwrdown_cache_maintenance(uint32_t affinity level);
* void psci_do_pwrdown_cache_maintenance(unsigned int power level);
*
* This function performs cache maintenance if the specified affinity
* level is the equal to the level of the highest affinity instance which
* will be/is physically powered off. The levels of cache affected are
* determined by the affinity level which is passed as the argument i.e.
* level 0 results in a flush of the L1 cache. Both the L1 and L2 caches
* are flushed for a higher affinity level.
* This function performs cache maintenance for the specified power
* level. The levels of cache affected are determined by the power
* level which is passed as the argument i.e. level 0 results
* in a flush of the L1 cache. Both the L1 and L2 caches are flushed
* for a higher power level.
*
* Additionally, this function also ensures that stack memory is correctly
* flushed out to avoid coherency issues due to a change in its memory
@ -56,28 +54,19 @@ func psci_do_pwrdown_cache_maintenance
stp x29, x30, [sp,#-16]!
stp x19, x20, [sp,#-16]!
mov x19, x0
bl psci_get_max_phys_off_afflvl
#if ASM_ASSERTION
cmp x0, #PSCI_INVALID_DATA
ASM_ASSERT(ne)
#endif
cmp x0, x19
b.ne 1f
/* ---------------------------------------------
* Determine to how many levels of cache will be
* subject to cache maintenance. Affinity level
* subject to cache maintenance. Power level
* 0 implies that only the cpu is being powered
* down. Only the L1 data cache needs to be
* flushed to the PoU in this case. For a higher
* affinity level we are assuming that a flush
* power level we are assuming that a flush
* of L1 data and L2 unified cache is enough.
* This information should be provided by the
* platform.
* ---------------------------------------------
*/
cmp x0, #MPIDR_AFFLVL0
cmp w0, #PSCI_CPU_PWR_LVL
b.eq do_core_pwr_dwn
bl prepare_cluster_pwr_dwn
b do_stack_maintenance
@ -92,8 +81,7 @@ do_core_pwr_dwn:
* ---------------------------------------------
*/
do_stack_maintenance:
mrs x0, mpidr_el1
bl platform_get_stack
bl plat_get_my_stack
/* ---------------------------------------------
* Calculate and store the size of the used
@ -116,7 +104,6 @@ do_stack_maintenance:
sub x1, sp, x0
bl inv_dcache_range
1:
ldp x19, x20, [sp], #16
ldp x29, x30, [sp], #16
ret
@ -147,8 +134,7 @@ func psci_do_pwrup_cache_maintenance
* stack base address in x0.
* ---------------------------------------------
*/
mrs x0, mpidr_el1
bl platform_get_stack
bl plat_get_my_stack
mov x1, sp
sub x1, x0, x1
mov x0, sp

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -35,56 +35,39 @@
#include <platform.h>
#include <runtime_svc.h>
#include <std_svc.h>
#include <string.h>
#include "psci_private.h"
/*******************************************************************************
* PSCI frontend api for servicing SMCs. Described in the PSCI spec.
******************************************************************************/
int psci_cpu_on(unsigned long target_cpu,
unsigned long entrypoint,
unsigned long context_id)
int psci_cpu_on(u_register_t target_cpu,
uintptr_t entrypoint,
u_register_t context_id)
{
int rc;
unsigned int start_afflvl, end_afflvl;
unsigned int end_pwrlvl;
entry_point_info_t ep;
/* Determine if the cpu exists of not */
rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
if (rc != PSCI_E_SUCCESS) {
rc = psci_validate_mpidr(target_cpu);
if (rc != PSCI_E_SUCCESS)
return PSCI_E_INVALID_PARAMS;
}
/* Validate the entrypoint using platform pm_ops */
if (psci_plat_pm_ops->validate_ns_entrypoint) {
rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
if (rc != PSCI_E_SUCCESS) {
assert(rc == PSCI_E_INVALID_PARAMS);
return PSCI_E_INVALID_PARAMS;
}
}
/*
* Verify and derive the re-entry information for
* the non-secure world from the non-secure state from
* where this call originated.
*/
rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
/* Validate the entry point and get the entry_point_info */
rc = psci_validate_entry_point(&ep, entrypoint, context_id);
if (rc != PSCI_E_SUCCESS)
return rc;
/*
* To turn this cpu on, specify which affinity
* To turn this cpu on, specify which power
* levels need to be turned on
*/
start_afflvl = MPIDR_AFFLVL0;
end_afflvl = PLATFORM_MAX_AFFLVL;
rc = psci_afflvl_on(target_cpu,
end_pwrlvl = PLAT_MAX_PWR_LVL;
rc = psci_cpu_on_start(target_cpu,
&ep,
start_afflvl,
end_afflvl);
end_pwrlvl);
return rc;
}
@ -94,148 +77,125 @@ unsigned int psci_version(void)
}
int psci_cpu_suspend(unsigned int power_state,
unsigned long entrypoint,
unsigned long context_id)
uintptr_t entrypoint,
u_register_t context_id)
{
int rc;
unsigned int target_afflvl, pstate_type;
unsigned int target_pwrlvl, is_power_down_state;
entry_point_info_t ep;
psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
plat_local_state_t cpu_pd_state;
/* Check SBZ bits in power state are zero */
if (psci_validate_power_state(power_state))
return PSCI_E_INVALID_PARAMS;
/* Sanity check the requested state */
target_afflvl = psci_get_pstate_afflvl(power_state);
if (target_afflvl > PLATFORM_MAX_AFFLVL)
return PSCI_E_INVALID_PARAMS;
/* Validate the power_state using platform pm_ops */
if (psci_plat_pm_ops->validate_power_state) {
rc = psci_plat_pm_ops->validate_power_state(power_state);
if (rc != PSCI_E_SUCCESS) {
assert(rc == PSCI_E_INVALID_PARAMS);
return PSCI_E_INVALID_PARAMS;
}
/* Validate the power_state parameter */
rc = psci_validate_power_state(power_state, &state_info);
if (rc != PSCI_E_SUCCESS) {
assert(rc == PSCI_E_INVALID_PARAMS);
return rc;
}
/* Validate the entrypoint using platform pm_ops */
if (psci_plat_pm_ops->validate_ns_entrypoint) {
rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
if (rc != PSCI_E_SUCCESS) {
assert(rc == PSCI_E_INVALID_PARAMS);
return PSCI_E_INVALID_PARAMS;
}
}
/* Determine the 'state type' in the 'power_state' parameter */
pstate_type = psci_get_pstate_type(power_state);
/*
* Ensure that we have a platform specific handler for entering
* a standby state.
* Get the value of the state type bit from the power state parameter.
*/
if (pstate_type == PSTATE_TYPE_STANDBY) {
if (!psci_plat_pm_ops->affinst_standby)
is_power_down_state = psci_get_pstate_type(power_state);
/* Sanity check the requested suspend levels */
assert (psci_validate_suspend_req(&state_info, is_power_down_state)
== PSCI_E_SUCCESS);
target_pwrlvl = psci_find_target_suspend_lvl(&state_info);
/* Fast path for CPU standby.*/
if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) {
if (!psci_plat_pm_ops->cpu_standby)
return PSCI_E_INVALID_PARAMS;
psci_plat_pm_ops->affinst_standby(power_state);
/*
* Set the state of the CPU power domain to the platform
* specific retention state and enter the standby state.
*/
cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL];
psci_set_cpu_local_state(cpu_pd_state);
psci_plat_pm_ops->cpu_standby(cpu_pd_state);
/* Upon exit from standby, set the state back to RUN. */
psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
return PSCI_E_SUCCESS;
}
/*
* Verify and derive the re-entry information for
* the non-secure world from the non-secure state from
* where this call originated.
* If a power down state has been requested, we need to verify entry
* point and program entry information.
*/
rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
if (rc != PSCI_E_SUCCESS)
return rc;
/* Save PSCI power state parameter for the core in suspend context */
psci_set_suspend_power_state(power_state);
if (is_power_down_state) {
rc = psci_validate_entry_point(&ep, entrypoint, context_id);
if (rc != PSCI_E_SUCCESS)
return rc;
}
/*
* Do what is needed to enter the power down state. Upon success,
* enter the final wfi which will power down this CPU.
* enter the final wfi which will power down this CPU. This function
* might return if the power down was abandoned for any reason, e.g.
* arrival of an interrupt
*/
psci_afflvl_suspend(&ep,
MPIDR_AFFLVL0,
target_afflvl);
psci_cpu_suspend_start(&ep,
target_pwrlvl,
&state_info,
is_power_down_state);
/* Reset PSCI power state parameter for the core. */
psci_set_suspend_power_state(PSCI_INVALID_DATA);
return PSCI_E_SUCCESS;
}
int psci_system_suspend(unsigned long entrypoint,
unsigned long context_id)
int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id)
{
int rc;
unsigned int power_state;
psci_power_state_t state_info;
entry_point_info_t ep;
/* Validate the entrypoint using platform pm_ops */
if (psci_plat_pm_ops->validate_ns_entrypoint) {
rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
if (rc != PSCI_E_SUCCESS) {
assert(rc == PSCI_E_INVALID_PARAMS);
return PSCI_E_INVALID_PARAMS;
}
}
/* Check if the current CPU is the last ON CPU in the system */
if (!psci_is_last_on_cpu())
return PSCI_E_DENIED;
/*
* Verify and derive the re-entry information for
* the non-secure world from the non-secure state from
* where this call originated.
*/
rc = psci_get_ns_ep_info(&ep, entrypoint, context_id);
/* Validate the entry point and get the entry_point_info */
rc = psci_validate_entry_point(&ep, entrypoint, context_id);
if (rc != PSCI_E_SUCCESS)
return rc;
/*
* Assert that the required pm_ops hook is implemented to ensure that
* the capability detected during psci_setup() is valid.
*/
assert(psci_plat_pm_ops->get_sys_suspend_power_state);
/* Query the psci_power_state for system suspend */
psci_query_sys_suspend_pwrstate(&state_info);
/* Ensure that the psci_power_state makes sense */
assert(psci_find_target_suspend_lvl(&state_info) == PLAT_MAX_PWR_LVL);
assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN)
== PSCI_E_SUCCESS);
assert(is_local_state_off(state_info.pwr_domain_state[PLAT_MAX_PWR_LVL]));
/*
* Query the platform for the power_state required for system suspend
* Do what is needed to enter the system suspend state. This function
* might return if the power down was abandoned for any reason, e.g.
* arrival of an interrupt
*/
power_state = psci_plat_pm_ops->get_sys_suspend_power_state();
psci_cpu_suspend_start(&ep,
PLAT_MAX_PWR_LVL,
&state_info,
PSTATE_TYPE_POWERDOWN);
/* Save PSCI power state parameter for the core in suspend context */
psci_set_suspend_power_state(power_state);
/*
* Do what is needed to enter the power down state. Upon success,
* enter the final wfi which will power down this cpu.
*/
psci_afflvl_suspend(&ep,
MPIDR_AFFLVL0,
PLATFORM_MAX_AFFLVL);
/* Reset PSCI power state parameter for the core. */
psci_set_suspend_power_state(PSCI_INVALID_DATA);
return PSCI_E_SUCCESS;
}
int psci_cpu_off(void)
{
int rc;
int target_afflvl = PLATFORM_MAX_AFFLVL;
unsigned int target_pwrlvl = PLAT_MAX_PWR_LVL;
/*
* Traverse from the highest to the lowest affinity level. When the
* lowest affinity level is hit, all the locks are acquired. State
* management is done immediately followed by cpu, cluster ...
* ..target_afflvl specific actions as this function unwinds back.
* Do what is needed to power off this CPU and possible higher power
* levels if it able to do so. Upon success, enter the final wfi
* which will power down this CPU.
*/
rc = psci_afflvl_off(MPIDR_AFFLVL0, target_afflvl);
rc = psci_do_cpu_off(target_pwrlvl);
/*
* The only error cpu_off can return is E_DENIED. So check if that's
@ -246,41 +206,27 @@ int psci_cpu_off(void)
return rc;
}
int psci_affinity_info(unsigned long target_affinity,
int psci_affinity_info(u_register_t target_affinity,
unsigned int lowest_affinity_level)
{
int rc = PSCI_E_INVALID_PARAMS;
unsigned int aff_state;
aff_map_node_t *node;
unsigned int target_idx;
if (lowest_affinity_level > PLATFORM_MAX_AFFLVL)
return rc;
/* We dont support level higher than PSCI_CPU_PWR_LVL */
if (lowest_affinity_level > PSCI_CPU_PWR_LVL)
return PSCI_E_INVALID_PARAMS;
node = psci_get_aff_map_node(target_affinity, lowest_affinity_level);
if (node && (node->state & PSCI_AFF_PRESENT)) {
/* Calculate the cpu index of the target */
target_idx = plat_core_pos_by_mpidr(target_affinity);
if (target_idx == -1)
return PSCI_E_INVALID_PARAMS;
/*
* TODO: For affinity levels higher than 0 i.e. cpu, the
* state will always be either ON or OFF. Need to investigate
* how critical is it to support ON_PENDING here.
*/
aff_state = psci_get_state(node);
/* A suspended cpu is available & on for the OS */
if (aff_state == PSCI_STATE_SUSPEND) {
aff_state = PSCI_STATE_ON;
}
rc = aff_state;
}
return rc;
return psci_get_aff_info_state_by_idx(target_idx);
}
int psci_migrate(unsigned long target_cpu)
int psci_migrate(u_register_t target_cpu)
{
int rc;
unsigned long resident_cpu_mpidr;
u_register_t resident_cpu_mpidr;
rc = psci_spd_migrate_info(&resident_cpu_mpidr);
if (rc != PSCI_TOS_UP_MIG_CAP)
@ -295,7 +241,7 @@ int psci_migrate(unsigned long target_cpu)
return PSCI_E_NOT_PRESENT;
/* Check the validity of the specified target cpu */
rc = psci_validate_mpidr(target_cpu, MPIDR_AFFLVL0);
rc = psci_validate_mpidr(target_cpu);
if (rc != PSCI_E_SUCCESS)
return PSCI_E_INVALID_PARAMS;
@ -309,14 +255,14 @@ int psci_migrate(unsigned long target_cpu)
int psci_migrate_info_type(void)
{
unsigned long resident_cpu_mpidr;
u_register_t resident_cpu_mpidr;
return psci_spd_migrate_info(&resident_cpu_mpidr);
}
long psci_migrate_info_up_cpu(void)
{
unsigned long resident_cpu_mpidr;
u_register_t resident_cpu_mpidr;
int rc;
/*
@ -332,7 +278,7 @@ long psci_migrate_info_up_cpu(void)
int psci_features(unsigned int psci_fid)
{
uint32_t local_caps = psci_caps;
unsigned int local_caps = psci_caps;
/* Check if it is a 64 bit function */
if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64)
@ -352,10 +298,9 @@ int psci_features(unsigned int psci_fid)
if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 ||
psci_fid == PSCI_CPU_SUSPEND_AARCH64) {
/*
* The trusted firmware uses the original power state format
* and does not support OS Initiated Mode.
* The trusted firmware does not support OS Initiated Mode.
*/
return (FF_PSTATE_ORIG << FF_PSTATE_SHIFT) |
return (FF_PSTATE << FF_PSTATE_SHIFT) |
((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT);
}

View file

@ -0,0 +1,142 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <debug.h>
#include <platform.h>
#include <string.h>
#include "psci_private.h"
/******************************************************************************
* Construct the psci_power_state to request power OFF at all power levels.
******************************************************************************/
static void psci_set_power_off_state(psci_power_state_t *state_info)
{
int lvl;
for (lvl = PSCI_CPU_PWR_LVL; lvl <= PLAT_MAX_PWR_LVL; lvl++)
state_info->pwr_domain_state[lvl] = PLAT_MAX_OFF_STATE;
}
/******************************************************************************
* Top level handler which is called when a cpu wants to power itself down.
* It's assumed that along with turning the cpu power domain off, power
* domains at higher levels will be turned off as far as possible. It finds
* the highest level where a domain has to be powered off by traversing the
* node information and then performs generic, architectural, platform setup
* and state management required to turn OFF that power domain and domains
* below it. e.g. For a cpu that's to be powered OFF, it could mean programming
* the power controller whereas for a cluster that's to be powered off, it will
* call the platform specific code which will disable coherency at the
* interconnect level if the cpu is the last in the cluster and also the
* program the power controller.
******************************************************************************/
int psci_do_cpu_off(unsigned int end_pwrlvl)
{
int rc, idx = plat_my_core_pos();
psci_power_state_t state_info;
/*
* This function must only be called on platforms where the
* CPU_OFF platform hooks have been implemented.
*/
assert(psci_plat_pm_ops->pwr_domain_off);
/*
* This function acquires the lock corresponding to each power
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
psci_acquire_pwr_domain_locks(end_pwrlvl,
idx);
/*
* Call the cpu off handler registered by the Secure Payload Dispatcher
* to let it do any bookkeeping. Assume that the SPD always reports an
* E_DENIED error if SP refuse to power down
*/
if (psci_spd_pm && psci_spd_pm->svc_off) {
rc = psci_spd_pm->svc_off(0);
if (rc)
goto exit;
}
/* Construct the psci_power_state for CPU_OFF */
psci_set_power_off_state(&state_info);
/*
* This function is passed the requested state info and
* it returns the negotiated state info for each power level upto
* the end level specified.
*/
psci_do_state_coordination(end_pwrlvl, &state_info);
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches.
*/
psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info));
/*
* Plat. management: Perform platform specific actions to turn this
* cpu off e.g. exit cpu coherency, program the power controller etc.
*/
psci_plat_pm_ops->pwr_domain_off(&state_info);
exit:
/*
* Release the locks corresponding to each power level in the
* reverse order to which they were acquired.
*/
psci_release_pwr_domain_locks(end_pwrlvl,
idx);
/*
* Set the affinity info state to OFF. This writes directly to main
* memory as caches are disabled, so cache maintenance is required
* to ensure that later cached reads of aff_info_state return
* AFF_STATE_OFF.
*/
flush_cpu_data(psci_svc_cpu_data.aff_info_state);
psci_set_aff_info_state(AFF_STATE_OFF);
inv_cpu_data(psci_svc_cpu_data.aff_info_state);
/*
* Check if all actions needed to safely power down this cpu have
* successfully completed. Enter a wfi loop which will allow the
* power controller to physically power down this cpu.
*/
if (rc == PSCI_E_SUCCESS)
psci_power_down_wfi();
return rc;
}

View file

@ -0,0 +1,209 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <arch.h>
#include <arch_helpers.h>
#include <assert.h>
#include <bl_common.h>
#include <bl31.h>
#include <debug.h>
#include <context_mgmt.h>
#include <platform.h>
#include <runtime_svc.h>
#include <stddef.h>
#include "psci_private.h"
/*******************************************************************************
* This function checks whether a cpu which has been requested to be turned on
* is OFF to begin with.
******************************************************************************/
static int cpu_on_validate_state(aff_info_state_t aff_state)
{
if (aff_state == AFF_STATE_ON)
return PSCI_E_ALREADY_ON;
if (aff_state == AFF_STATE_ON_PENDING)
return PSCI_E_ON_PENDING;
assert(aff_state == AFF_STATE_OFF);
return PSCI_E_SUCCESS;
}
/*******************************************************************************
* This function sets the aff_info_state in the per-cpu data of the CPU
* specified by cpu_idx
******************************************************************************/
static void psci_set_aff_info_state_by_idx(unsigned int cpu_idx,
aff_info_state_t aff_state)
{
set_cpu_data_by_index(cpu_idx,
psci_svc_cpu_data.aff_info_state,
aff_state);
/*
* Flush aff_info_state as it will be accessed with caches turned OFF.
*/
flush_cpu_data_by_index(cpu_idx, psci_svc_cpu_data.aff_info_state);
}
/*******************************************************************************
* Generic handler which is called to physically power on a cpu identified by
* its mpidr. It performs the generic, architectural, platform setup and state
* management to power on the target cpu e.g. it will ensure that
* enough information is stashed for it to resume execution in the non-secure
* security state.
*
* The state of all the relevant power domains are changed after calling the
* platform handler as it can return error.
******************************************************************************/
int psci_cpu_on_start(u_register_t target_cpu,
entry_point_info_t *ep,
unsigned int end_pwrlvl)
{
int rc;
unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu);
/*
* This function must only be called on platforms where the
* CPU_ON platform hooks have been implemented.
*/
assert(psci_plat_pm_ops->pwr_domain_on &&
psci_plat_pm_ops->pwr_domain_on_finish);
/* Protect against multiple CPUs trying to turn ON the same target CPU */
psci_spin_lock_cpu(target_idx);
/*
* Generic management: Ensure that the cpu is off to be
* turned on.
*/
rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx));
if (rc != PSCI_E_SUCCESS)
goto exit;
/*
* Call the cpu on handler registered by the Secure Payload Dispatcher
* to let it do any bookeeping. If the handler encounters an error, it's
* expected to assert within
*/
if (psci_spd_pm && psci_spd_pm->svc_on)
psci_spd_pm->svc_on(target_cpu);
/*
* Set the Affinity info state of the target cpu to ON_PENDING.
*/
psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING);
/*
* Perform generic, architecture and platform specific handling.
*/
/*
* Plat. management: Give the platform the current state
* of the target cpu to allow it to perform the necessary
* steps to power on.
*/
rc = psci_plat_pm_ops->pwr_domain_on(target_cpu);
assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
if (rc == PSCI_E_SUCCESS)
/* Store the re-entry information for the non-secure world. */
cm_init_context_by_index(target_idx, ep);
else
/* Restore the state on error. */
psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF);
exit:
psci_spin_unlock_cpu(target_idx);
return rc;
}
/*******************************************************************************
* The following function finish an earlier power on request. They
* are called by the common finisher routine in psci_common.c. The `state_info`
* is the psci_power_state from which this CPU has woken up from.
******************************************************************************/
void psci_cpu_on_finish(unsigned int cpu_idx,
psci_power_state_t *state_info)
{
/*
* Plat. management: Perform the platform specific actions
* for this cpu e.g. enabling the gic or zeroing the mailbox
* register. The actual state of this cpu has already been
* changed.
*/
psci_plat_pm_ops->pwr_domain_on_finish(state_info);
/*
* Arch. management: Enable data cache and manage stack memory
*/
psci_do_pwrup_cache_maintenance();
/*
* All the platform specific actions for turning this cpu
* on have completed. Perform enough arch.initialization
* to run in the non-secure address space.
*/
bl31_arch_setup();
/*
* Lock the CPU spin lock to make sure that the context initialization
* is done. Since the lock is only used in this function to create
* a synchronization point with cpu_on_start(), it can be released
* immediately.
*/
psci_spin_lock_cpu(cpu_idx);
psci_spin_unlock_cpu(cpu_idx);
/* Ensure we have been explicitly woken up by another cpu */
assert(psci_get_aff_info_state() == AFF_STATE_ON_PENDING);
/*
* Call the cpu on finish handler registered by the Secure Payload
* Dispatcher to let it do any bookeeping. If the handler encounters an
* error, it's expected to assert within
*/
if (psci_spd_pm && psci_spd_pm->svc_on_finish)
psci_spd_pm->svc_on_finish(0);
/* Populate the mpidr field within the cpu node array */
/* This needs to be done only once */
psci_cpu_pd_nodes[cpu_idx].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK;
/*
* Generic management: Now we just need to retrieve the
* information that we had stashed away during the cpu_on
* call to set this cpu on its way.
*/
cm_prepare_el3_exit(NON_SECURE);
/* Clean caches before re-entering normal world */
dcsw_op_louis(DCCSW);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -34,22 +34,30 @@
#include <arch.h>
#include <bakery_lock.h>
#include <bl_common.h>
#include <cpu_data.h>
#include <psci.h>
#include <spinlock.h>
/*
* The following helper macros abstract the interface to the Bakery
* Lock API.
*/
#if USE_COHERENT_MEM
#define psci_lock_init(aff_map, idx) bakery_lock_init(&(aff_map)[(idx)].lock)
#define psci_lock_get(node) bakery_lock_get(&((node)->lock))
#define psci_lock_release(node) bakery_lock_release(&((node)->lock))
#define psci_lock_init(non_cpu_pd_node, idx) \
bakery_lock_init(&(non_cpu_pd_node)[(idx)].lock)
#define psci_lock_get(non_cpu_pd_node) \
bakery_lock_get(&((non_cpu_pd_node)->lock))
#define psci_lock_release(non_cpu_pd_node) \
bakery_lock_release(&((non_cpu_pd_node)->lock))
#else
#define psci_lock_init(aff_map, idx) ((aff_map)[(idx)].aff_map_index = (idx))
#define psci_lock_get(node) bakery_lock_get((node)->aff_map_index, \
CPU_DATA_PSCI_LOCK_OFFSET)
#define psci_lock_release(node) bakery_lock_release((node)->aff_map_index,\
CPU_DATA_PSCI_LOCK_OFFSET)
#define psci_lock_init(non_cpu_pd_node, idx) \
((non_cpu_pd_node)[(idx)].lock_index = (idx))
#define psci_lock_get(non_cpu_pd_node) \
bakery_lock_get((non_cpu_pd_node)->lock_index, \
CPU_DATA_PSCI_LOCK_OFFSET)
#define psci_lock_release(non_cpu_pd_node) \
bakery_lock_release((non_cpu_pd_node)->lock_index, \
CPU_DATA_PSCI_LOCK_OFFSET)
#endif
/*
@ -72,39 +80,99 @@
define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) | \
define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64))
/*
* Helper macros to get/set the fields of PSCI per-cpu data.
*/
#define psci_set_aff_info_state(aff_state) \
set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state)
#define psci_get_aff_info_state() \
get_cpu_data(psci_svc_cpu_data.aff_info_state)
#define psci_get_aff_info_state_by_idx(idx) \
get_cpu_data_by_index(idx, psci_svc_cpu_data.aff_info_state)
#define psci_get_suspend_pwrlvl() \
get_cpu_data(psci_svc_cpu_data.target_pwrlvl)
#define psci_set_suspend_pwrlvl(target_lvl) \
set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl)
#define psci_set_cpu_local_state(state) \
set_cpu_data(psci_svc_cpu_data.local_state, state)
#define psci_get_cpu_local_state() \
get_cpu_data(psci_svc_cpu_data.local_state)
#define psci_get_cpu_local_state_by_idx(idx) \
get_cpu_data_by_index(idx, psci_svc_cpu_data.local_state)
/*
* Helper macros for the CPU level spinlocks
*/
#define psci_spin_lock_cpu(idx) spin_lock(&psci_cpu_pd_nodes[idx].cpu_lock)
#define psci_spin_unlock_cpu(idx) spin_unlock(&psci_cpu_pd_nodes[idx].cpu_lock)
/* Helper macro to identify a CPU standby request in PSCI Suspend call */
#define is_cpu_standby_req(is_power_down_state, retn_lvl) \
(((!(is_power_down_state)) && ((retn_lvl) == 0)) ? 1 : 0)
/*******************************************************************************
* The following two data structures hold the topology tree which in turn tracks
* the state of the all the affinity instances supported by the platform.
* The following two data structures implement the power domain tree. The tree
* is used to track the state of all the nodes i.e. power domain instances
* described by the platform. The tree consists of nodes that describe CPU power
* domains i.e. leaf nodes and all other power domains which are parents of a
* CPU power domain i.e. non-leaf nodes.
******************************************************************************/
typedef struct aff_map_node {
unsigned long mpidr;
unsigned char ref_count;
unsigned char state;
typedef struct non_cpu_pwr_domain_node {
/*
* Index of the first CPU power domain node level 0 which has this node
* as its parent.
*/
unsigned int cpu_start_idx;
/*
* Number of CPU power domains which are siblings of the domain indexed
* by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
* -> cpu_start_idx + ncpus' have this node as their parent.
*/
unsigned int ncpus;
/*
* Index of the parent power domain node.
* TODO: Figure out whether to whether using pointer is more efficient.
*/
unsigned int parent_node;
plat_local_state_t local_state;
unsigned char level;
#if USE_COHERENT_MEM
bakery_lock_t lock;
#else
/* For indexing the bakery_info array in per CPU data */
unsigned char aff_map_index;
unsigned char lock_index;
#endif
} aff_map_node_t;
} non_cpu_pd_node_t;
typedef struct aff_limits_node {
int min;
int max;
} aff_limits_node_t;
typedef struct cpu_pwr_domain_node {
u_register_t mpidr;
typedef aff_map_node_t (*mpidr_aff_map_nodes_t[MPIDR_MAX_AFFLVL + 1]);
typedef void (*afflvl_power_on_finisher_t)(aff_map_node_t *);
/*
* Index of the parent power domain node.
* TODO: Figure out whether to whether using pointer is more efficient.
*/
unsigned int parent_node;
/*
* A CPU power domain does not require state coordination like its
* parent power domains. Hence this node does not include a bakery
* lock. A spinlock is required by the CPU_ON handler to prevent a race
* when multiple CPUs try to turn ON the same target CPU.
*/
spinlock_t cpu_lock;
} cpu_pd_node_t;
/*******************************************************************************
* Data prototypes
******************************************************************************/
extern const plat_pm_ops_t *psci_plat_pm_ops;
extern aff_map_node_t psci_aff_map[PSCI_NUM_AFFS];
extern aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
extern uint32_t psci_caps;
extern const plat_psci_ops_t *psci_plat_pm_ops;
extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
extern unsigned int psci_caps;
/*******************************************************************************
* SPD's power management hooks registered with PSCI
@ -115,62 +183,54 @@ extern const spd_pm_ops_t *psci_spd_pm;
* Function prototypes
******************************************************************************/
/* Private exported functions from psci_common.c */
unsigned short psci_get_state(aff_map_node_t *node);
unsigned short psci_get_phys_state(aff_map_node_t *node);
void psci_set_state(aff_map_node_t *node, unsigned short state);
unsigned long mpidr_set_aff_inst(unsigned long, unsigned char, int);
int psci_validate_mpidr(unsigned long, int);
int get_power_on_target_afflvl(void);
void psci_afflvl_power_on_finish(int,
int,
afflvl_power_on_finisher_t *);
int psci_get_ns_ep_info(entry_point_info_t *ep,
uint64_t entrypoint, uint64_t context_id);
int psci_check_afflvl_range(int start_afflvl, int end_afflvl);
void psci_do_afflvl_state_mgmt(uint32_t start_afflvl,
uint32_t end_afflvl,
aff_map_node_t *mpidr_nodes[],
uint32_t state);
void psci_acquire_afflvl_locks(int start_afflvl,
int end_afflvl,
aff_map_node_t *mpidr_nodes[]);
void psci_release_afflvl_locks(int start_afflvl,
int end_afflvl,
mpidr_aff_map_nodes_t mpidr_nodes);
void psci_print_affinity_map(void);
void psci_set_max_phys_off_afflvl(uint32_t afflvl);
uint32_t psci_find_max_phys_off_afflvl(uint32_t start_afflvl,
uint32_t end_afflvl,
aff_map_node_t *mpidr_nodes[]);
int psci_validate_power_state(unsigned int power_state,
psci_power_state_t *state_info);
void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
int psci_validate_mpidr(u_register_t mpidr);
void psci_init_req_local_pwr_states(void);
void psci_power_up_finish(void);
int psci_validate_entry_point(entry_point_info_t *ep,
uintptr_t entrypoint, u_register_t context_id);
void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
unsigned int end_lvl,
unsigned int node_index[]);
void psci_do_state_coordination(unsigned int end_pwrlvl,
psci_power_state_t *state_info);
void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
unsigned int cpu_idx);
void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
unsigned int cpu_idx);
int psci_validate_suspend_req(const psci_power_state_t *state_info,
unsigned int is_power_down_state_req);
unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
void psci_print_power_domain_map(void);
unsigned int psci_is_last_on_cpu(void);
int psci_spd_migrate_info(uint64_t *mpidr);
int psci_spd_migrate_info(u_register_t *mpidr);
/* Private exported functions from psci_setup.c */
int psci_get_aff_map_nodes(unsigned long mpidr,
int start_afflvl,
int end_afflvl,
aff_map_node_t *mpidr_nodes[]);
aff_map_node_t *psci_get_aff_map_node(unsigned long, int);
/* Private exported functions from psci_on.c */
int psci_cpu_on_start(unsigned long target_cpu,
entry_point_info_t *ep,
unsigned int end_pwrlvl);
/* Private exported functions from psci_affinity_on.c */
int psci_afflvl_on(unsigned long target_cpu,
entry_point_info_t *ep,
int start_afflvl,
int end_afflvl);
void psci_cpu_on_finish(unsigned int cpu_idx,
psci_power_state_t *state_info);
/* Private exported functions from psci_affinity_off.c */
int psci_afflvl_off(int, int);
/* Private exported functions from psci_cpu_off.c */
int psci_do_cpu_off(unsigned int end_pwrlvl);
/* Private exported functions from psci_affinity_suspend.c */
void psci_afflvl_suspend(entry_point_info_t *ep,
int start_afflvl,
int end_afflvl);
/* Private exported functions from psci_pwrlvl_suspend.c */
void psci_cpu_suspend_start(entry_point_info_t *ep,
unsigned int end_pwrlvl,
psci_power_state_t *state_info,
unsigned int is_power_down_state_req);
unsigned int psci_afflvl_suspend_finish(int, int);
void psci_set_suspend_power_state(unsigned int power_state);
void psci_cpu_suspend_finish(unsigned int cpu_idx,
psci_power_state_t *state_info);
/* Private exported functions from psci_helpers.S */
void psci_do_pwrdown_cache_maintenance(uint32_t affinity_level);
void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
void psci_do_pwrup_cache_maintenance(void);
/* Private exported functions from psci_system_off.c */

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -42,351 +42,225 @@
* Per cpu non-secure contexts used to program the architectural state prior
* return to the normal world.
* TODO: Use the memory allocator to set aside memory for the contexts instead
* of relying on platform defined constants. Using PSCI_NUM_AFFS will be an
* overkill.
* of relying on platform defined constants.
******************************************************************************/
static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
/*******************************************************************************
* In a system, a certain number of affinity instances are present at an
* affinity level. The cumulative number of instances across all levels are
* stored in 'psci_aff_map'. The topology tree has been flattenned into this
* array. To retrieve nodes, information about the extents of each affinity
* level i.e. start index and end index needs to be present. 'psci_aff_limits'
* stores this information.
******************************************************************************/
aff_limits_node_t psci_aff_limits[MPIDR_MAX_AFFLVL + 1];
/******************************************************************************
* Define the psci capability variable.
*****************************************************************************/
uint32_t psci_caps;
unsigned int psci_caps;
/*******************************************************************************
* Routines for retrieving the node corresponding to an affinity level instance
* in the mpidr. The first one uses binary search to find the node corresponding
* to the mpidr (key) at a particular affinity level. The second routine decides
* extents of the binary search at each affinity level.
* Function which initializes the 'psci_non_cpu_pd_nodes' or the
* 'psci_cpu_pd_nodes' corresponding to the power level.
******************************************************************************/
static int psci_aff_map_get_idx(unsigned long key,
int min_idx,
int max_idx)
static void psci_init_pwr_domain_node(unsigned int node_idx,
unsigned int parent_idx,
unsigned int level)
{
int mid;
if (level > PSCI_CPU_PWR_LVL) {
psci_non_cpu_pd_nodes[node_idx].level = level;
psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
psci_non_cpu_pd_nodes[node_idx].local_state =
PLAT_MAX_OFF_STATE;
} else {
psci_cpu_data_t *svc_cpu_data;
/*
* Terminating condition: If the max and min indices have crossed paths
* during the binary search then the key has not been found.
*/
if (max_idx < min_idx)
return PSCI_E_INVALID_PARAMS;
psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
/*
* Make sure we are within array limits.
*/
assert(min_idx >= 0 && max_idx < PSCI_NUM_AFFS);
/* Initialize with an invalid mpidr */
psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
/*
* Bisect the array around 'mid' and then recurse into the array chunk
* where the key is likely to be found. The mpidrs in each node in the
* 'psci_aff_map' for a given affinity level are stored in an ascending
* order which makes the binary search possible.
*/
mid = min_idx + ((max_idx - min_idx) >> 1); /* Divide by 2 */
svc_cpu_data =
&(_cpu_data_by_index(node_idx)->psci_svc_cpu_data);
if (psci_aff_map[mid].mpidr > key)
return psci_aff_map_get_idx(key, min_idx, mid - 1);
else if (psci_aff_map[mid].mpidr < key)
return psci_aff_map_get_idx(key, mid + 1, max_idx);
else
return mid;
}
/* Set the Affinity Info for the cores as OFF */
svc_cpu_data->aff_info_state = AFF_STATE_OFF;
aff_map_node_t *psci_get_aff_map_node(unsigned long mpidr, int aff_lvl)
{
int rc;
/* Invalidate the suspend level for the cpu */
svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
if (aff_lvl > PLATFORM_MAX_AFFLVL)
return NULL;
/* Set the power state to OFF state */
svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
/* Right shift the mpidr to the required affinity level */
mpidr = mpidr_mask_lower_afflvls(mpidr, aff_lvl);
flush_dcache_range((uintptr_t)svc_cpu_data,
sizeof(*svc_cpu_data));
rc = psci_aff_map_get_idx(mpidr,
psci_aff_limits[aff_lvl].min,
psci_aff_limits[aff_lvl].max);
if (rc >= 0)
return &psci_aff_map[rc];
else
return NULL;
}
/*******************************************************************************
* This function populates an array with nodes corresponding to a given range of
* affinity levels in an mpidr. It returns successfully only when the affinity
* levels are correct, the mpidr is valid i.e. no affinity level is absent from
* the topology tree & the affinity instance at level 0 is not absent.
******************************************************************************/
int psci_get_aff_map_nodes(unsigned long mpidr,
int start_afflvl,
int end_afflvl,
aff_map_node_t *mpidr_nodes[])
{
int rc = PSCI_E_INVALID_PARAMS, level;
aff_map_node_t *node;
rc = psci_check_afflvl_range(start_afflvl, end_afflvl);
if (rc != PSCI_E_SUCCESS)
return rc;
for (level = start_afflvl; level <= end_afflvl; level++) {
/*
* Grab the node for each affinity level. No affinity level
* can be missing as that would mean that the topology tree
* is corrupted.
*/
node = psci_get_aff_map_node(mpidr, level);
if (node == NULL) {
rc = PSCI_E_INVALID_PARAMS;
break;
}
/*
* Skip absent affinity levels unless it's afffinity level 0.
* An absent cpu means that the mpidr is invalid. Save the
* pointer to the node for the present affinity level
*/
if (!(node->state & PSCI_AFF_PRESENT)) {
if (level == MPIDR_AFFLVL0) {
rc = PSCI_E_INVALID_PARAMS;
break;
}
mpidr_nodes[level] = NULL;
} else
mpidr_nodes[level] = node;
}
return rc;
}
/*******************************************************************************
* Function which initializes the 'aff_map_node' corresponding to an affinity
* level instance. Each node has a unique mpidr, level and bakery lock. The data
* field is opaque and holds affinity level specific data e.g. for affinity
* level 0 it contains the index into arrays that hold the secure/non-secure
* state for a cpu that's been turned on/off
******************************************************************************/
static void psci_init_aff_map_node(unsigned long mpidr,
int level,
unsigned int idx)
{
unsigned char state;
uint32_t linear_id;
psci_aff_map[idx].mpidr = mpidr;
psci_aff_map[idx].level = level;
psci_lock_init(psci_aff_map, idx);
/*
* If an affinity instance is present then mark it as OFF to begin with.
*/
state = plat_get_aff_state(level, mpidr);
psci_aff_map[idx].state = state;
if (level == MPIDR_AFFLVL0) {
/*
* Mark the cpu as OFF. Higher affinity level reference counts
* have already been memset to 0
*/
if (state & PSCI_AFF_PRESENT)
psci_set_state(&psci_aff_map[idx], PSCI_STATE_OFF);
/*
* Associate a non-secure context with this affinity
* instance through the context management library.
*/
linear_id = platform_get_core_pos(mpidr);
assert(linear_id < PLATFORM_CORE_COUNT);
/* Invalidate the suspend context for the node */
set_cpu_data_by_index(linear_id,
psci_svc_cpu_data.power_state,
PSCI_INVALID_DATA);
/*
* There is no state associated with the current execution
* context so ensure that any reads of the highest affinity
* level in a powered down state return PSCI_INVALID_DATA.
*/
set_cpu_data_by_index(linear_id,
psci_svc_cpu_data.max_phys_off_afflvl,
PSCI_INVALID_DATA);
flush_cpu_data_by_index(linear_id, psci_svc_cpu_data);
cm_set_context_by_mpidr(mpidr,
(void *) &psci_ns_context[linear_id],
cm_set_context_by_index(node_idx,
(void *) &psci_ns_context[node_idx],
NON_SECURE);
}
return;
}
/*******************************************************************************
* Core routine used by the Breadth-First-Search algorithm to populate the
* affinity tree. Each level in the tree corresponds to an affinity level. This
* routine's aim is to traverse to the target affinity level and populate nodes
* in the 'psci_aff_map' for all the siblings at that level. It uses the current
* affinity level to keep track of how many levels from the root of the tree
* have been traversed. If the current affinity level != target affinity level,
* then the platform is asked to return the number of children that each
* affinity instance has at the current affinity level. Traversal is then done
* for each child at the next lower level i.e. current affinity level - 1.
*
* CAUTION: This routine assumes that affinity instance ids are allocated in a
* monotonically increasing manner at each affinity level in a mpidr starting
* from 0. If the platform breaks this assumption then this code will have to
* be reworked accordingly.
******************************************************************************/
static unsigned int psci_init_aff_map(unsigned long mpidr,
unsigned int affmap_idx,
int cur_afflvl,
int tgt_afflvl)
* This functions updates cpu_start_idx and ncpus field for each of the node in
* psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
* the CPUs and check whether they match with the parent of the previous
* CPU. The basic assumption for this work is that children of the same parent
* are allocated adjacent indices. The platform should ensure this though proper
* mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
* plat_my_core_pos() APIs.
*******************************************************************************/
static void psci_update_pwrlvl_limits(void)
{
unsigned int ctr, aff_count;
int j;
unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
unsigned int temp_index[PLAT_MAX_PWR_LVL], cpu_idx;
assert(cur_afflvl >= tgt_afflvl);
/*
* Find the number of siblings at the current affinity level &
* assert if there are none 'cause then we have been invoked with
* an invalid mpidr.
*/
aff_count = plat_get_aff_count(cur_afflvl, mpidr);
assert(aff_count);
if (tgt_afflvl < cur_afflvl) {
for (ctr = 0; ctr < aff_count; ctr++) {
mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
affmap_idx = psci_init_aff_map(mpidr,
affmap_idx,
cur_afflvl - 1,
tgt_afflvl);
for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
psci_get_parent_pwr_domain_nodes(cpu_idx,
PLAT_MAX_PWR_LVL,
temp_index);
for (j = PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
if (temp_index[j] != nodes_idx[j]) {
nodes_idx[j] = temp_index[j];
psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
= cpu_idx;
}
psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
}
} else {
for (ctr = 0; ctr < aff_count; ctr++, affmap_idx++) {
mpidr = mpidr_set_aff_inst(mpidr, ctr, cur_afflvl);
psci_init_aff_map_node(mpidr, cur_afflvl, affmap_idx);
}
/* affmap_idx is 1 greater than the max index of cur_afflvl */
psci_aff_limits[cur_afflvl].max = affmap_idx - 1;
}
return affmap_idx;
}
/*******************************************************************************
* This function initializes the topology tree by querying the platform. To do
* so, it's helper routines implement a Breadth-First-Search. At each affinity
* level the platform conveys the number of affinity instances that exist i.e.
* the affinity count. The algorithm populates the psci_aff_map recursively
* using this information. On a platform that implements two clusters of 4 cpus
* each, the populated aff_map_array would look like this:
*
* <- cpus cluster0 -><- cpus cluster1 ->
* ---------------------------------------------------
* | 0 | 1 | 0 | 1 | 2 | 3 | 0 | 1 | 2 | 3 |
* ---------------------------------------------------
* ^ ^
* cluster __| cpu __|
* limit limit
*
* The first 2 entries are of the cluster nodes. The next 4 entries are of cpus
* within cluster 0. The last 4 entries are of cpus within cluster 1.
* The 'psci_aff_limits' array contains the max & min index of each affinity
* level within the 'psci_aff_map' array. This allows restricting search of a
* node at an affinity level between the indices in the limits array.
* Core routine to populate the power domain tree. The tree descriptor passed by
* the platform is populated breadth-first and the first entry in the map
* informs the number of root power domains. The parent nodes of the root nodes
* will point to an invalid entry(-1).
******************************************************************************/
int32_t psci_setup(void)
static void populate_power_domain_tree(const unsigned char *topology)
{
unsigned long mpidr = read_mpidr();
int afflvl, affmap_idx, max_afflvl;
aff_map_node_t *node;
psci_plat_pm_ops = NULL;
/* Find out the maximum affinity level that the platform implements */
max_afflvl = PLATFORM_MAX_AFFLVL;
assert(max_afflvl <= MPIDR_MAX_AFFLVL);
unsigned int i, j = 0, num_nodes_at_lvl = 1, num_nodes_at_next_lvl;
unsigned int node_index = 0, parent_node_index = 0, num_children;
int level = PLAT_MAX_PWR_LVL;
/*
* This call traverses the topology tree with help from the platform and
* populates the affinity map using a breadth-first-search recursively.
* We assume that the platform allocates affinity instance ids from 0
* onwards at each affinity level in the mpidr. FIRST_MPIDR = 0.0.0.0
* For each level the inputs are:
* - number of nodes at this level in plat_array i.e. num_nodes_at_level
* This is the sum of values of nodes at the parent level.
* - Index of first entry at this level in the plat_array i.e.
* parent_node_index.
* - Index of first free entry in psci_non_cpu_pd_nodes[] or
* psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
*/
affmap_idx = 0;
for (afflvl = max_afflvl; afflvl >= MPIDR_AFFLVL0; afflvl--) {
affmap_idx = psci_init_aff_map(FIRST_MPIDR,
affmap_idx,
max_afflvl,
afflvl);
while (level >= PSCI_CPU_PWR_LVL) {
num_nodes_at_next_lvl = 0;
/*
* For each entry (parent node) at this level in the plat_array:
* - Find the number of children
* - Allocate a node in a power domain array for each child
* - Set the parent of the child to the parent_node_index - 1
* - Increment parent_node_index to point to the next parent
* - Accumulate the number of children at next level.
*/
for (i = 0; i < num_nodes_at_lvl; i++) {
assert(parent_node_index <=
PSCI_NUM_NON_CPU_PWR_DOMAINS);
num_children = topology[parent_node_index];
for (j = node_index;
j < node_index + num_children; j++)
psci_init_pwr_domain_node(j,
parent_node_index - 1,
level);
node_index = j;
num_nodes_at_next_lvl += num_children;
parent_node_index++;
}
num_nodes_at_lvl = num_nodes_at_next_lvl;
level--;
/* Reset the index for the cpu power domain array */
if (level == PSCI_CPU_PWR_LVL)
node_index = 0;
}
/* Validate the sanity of array exported by the platform */
assert(j == PLATFORM_CORE_COUNT);
#if !USE_COHERENT_MEM
/* Flush the non CPU power domain data to memory */
flush_dcache_range((uintptr_t) &psci_non_cpu_pd_nodes,
sizeof(psci_non_cpu_pd_nodes));
#endif
}
/*******************************************************************************
* This function initializes the power domain topology tree by querying the
* platform. The power domain nodes higher than the CPU are populated in the
* array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in
* psci_cpu_pd_nodes[]. The platform exports its static topology map through the
* populate_power_domain_topology_tree() API. The algorithm populates the
* psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
* topology map. On a platform that implements two clusters of 2 cpus each, and
* supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look
* like this:
*
* ---------------------------------------------------
* | system node | cluster 0 node | cluster 1 node |
* ---------------------------------------------------
*
* And populated psci_cpu_pd_nodes would look like this :
* <- cpus cluster0 -><- cpus cluster1 ->
* ------------------------------------------------
* | CPU 0 | CPU 1 | CPU 2 | CPU 3 |
* ------------------------------------------------
******************************************************************************/
int psci_setup(void)
{
const unsigned char *topology_tree;
/* Query the topology map from the platform */
topology_tree = plat_get_power_domain_tree_desc();
/* Populate the power domain arrays using the platform topology map */
populate_power_domain_tree(topology_tree);
/* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
psci_update_pwrlvl_limits();
/* Populate the mpidr field of cpu node for this CPU */
psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
read_mpidr() & MPIDR_AFFINITY_MASK;
#if !USE_COHERENT_MEM
/*
* The psci_aff_map only needs flushing when it's not allocated in
* The psci_non_cpu_pd_nodes only needs flushing when it's not allocated in
* coherent memory.
*/
flush_dcache_range((uint64_t) &psci_aff_map, sizeof(psci_aff_map));
flush_dcache_range((uintptr_t) &psci_non_cpu_pd_nodes,
sizeof(psci_non_cpu_pd_nodes));
#endif
/*
* Set the bounds for the affinity counts of each level in the map. Also
* flush out the entire array so that it's visible to subsequent power
* management operations. The 'psci_aff_limits' array is allocated in
* normal memory. It will be accessed when the mmu is off e.g. after
* reset. Hence it needs to be flushed.
*/
for (afflvl = MPIDR_AFFLVL0; afflvl < max_afflvl; afflvl++) {
psci_aff_limits[afflvl].min =
psci_aff_limits[afflvl + 1].max + 1;
}
flush_dcache_range((uintptr_t) &psci_cpu_pd_nodes,
sizeof(psci_cpu_pd_nodes));
flush_dcache_range((unsigned long) psci_aff_limits,
sizeof(psci_aff_limits));
psci_init_req_local_pwr_states();
/*
* Mark the affinity instances in our mpidr as ON. No need to lock as
* this is the primary cpu.
* Set the requested and target state of this CPU and all the higher
* power domain levels for this CPU to run.
*/
mpidr &= MPIDR_AFFINITY_MASK;
for (afflvl = MPIDR_AFFLVL0; afflvl <= max_afflvl; afflvl++) {
psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
node = psci_get_aff_map_node(mpidr, afflvl);
assert(node);
/* Mark each present node as ON. */
if (node->state & PSCI_AFF_PRESENT)
psci_set_state(node, PSCI_STATE_ON);
}
platform_setup_pm(&psci_plat_pm_ops);
plat_setup_psci_ops((uintptr_t)psci_entrypoint,
&psci_plat_pm_ops);
assert(psci_plat_pm_ops);
/* Initialize the psci capability */
psci_caps = PSCI_GENERIC_CAP;
if (psci_plat_pm_ops->affinst_off)
if (psci_plat_pm_ops->pwr_domain_off)
psci_caps |= define_psci_cap(PSCI_CPU_OFF);
if (psci_plat_pm_ops->affinst_on && psci_plat_pm_ops->affinst_on_finish)
if (psci_plat_pm_ops->pwr_domain_on &&
psci_plat_pm_ops->pwr_domain_on_finish)
psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64);
if (psci_plat_pm_ops->affinst_suspend &&
psci_plat_pm_ops->affinst_suspend_finish) {
if (psci_plat_pm_ops->pwr_domain_suspend &&
psci_plat_pm_ops->pwr_domain_suspend_finish) {
psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
if (psci_plat_pm_ops->get_sys_suspend_power_state)
psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);

View file

@ -0,0 +1,265 @@
/*
* Copyright (c) 2013-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* Neither the name of ARM nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific
* prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <bl_common.h>
#include <arch.h>
#include <arch_helpers.h>
#include <context.h>
#include <context_mgmt.h>
#include <cpu_data.h>
#include <debug.h>
#include <platform.h>
#include <runtime_svc.h>
#include <stddef.h>
#include "psci_private.h"
/*******************************************************************************
* This function does generic and platform specific operations after a wake-up
* from standby/retention states at multiple power levels.
******************************************************************************/
static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
psci_power_state_t *state_info,
unsigned int end_pwrlvl)
{
psci_acquire_pwr_domain_locks(end_pwrlvl,
cpu_idx);
/*
* Plat. management: Allow the platform to do operations
* on waking up from retention.
*/
psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
/*
* Set the requested and target state of this CPU and all the higher
* power domain levels for this CPU to run.
*/
psci_set_pwr_domains_to_run(end_pwrlvl);
psci_release_pwr_domain_locks(end_pwrlvl,
cpu_idx);
}
/*******************************************************************************
* This function does generic and platform specific suspend to power down
* operations.
******************************************************************************/
static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
entry_point_info_t *ep,
psci_power_state_t *state_info)
{
/* Save PSCI target power level for the suspend finisher handler */
psci_set_suspend_pwrlvl(end_pwrlvl);
/*
* Flush the target power level as it will be accessed on power up with
* Data cache disabled.
*/
flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
/*
* Call the cpu suspend handler registered by the Secure Payload
* Dispatcher to let it do any book-keeping. If the handler encounters an
* error, it's expected to assert within
*/
if (psci_spd_pm && psci_spd_pm->svc_suspend)
psci_spd_pm->svc_suspend(0);
/*
* Store the re-entry information for the non-secure world.
*/
cm_init_my_context(ep);
/*
* Arch. management. Perform the necessary steps to flush all
* cpu caches. Currently we assume that the power level correspond
* the cache level.
* TODO : Introduce a mechanism to query the cache level to flush
* and the cpu-ops power down to perform from the platform.
*/
psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(state_info));
}
/*******************************************************************************
* Top level handler which is called when a cpu wants to suspend its execution.
* It is assumed that along with suspending the cpu power domain, power domains
* at higher levels until the target power level will be suspended as well. It
* coordinates with the platform to negotiate the target state for each of
* the power domain level till the target power domain level. It then performs
* generic, architectural, platform setup and state management required to
* suspend that power domain level and power domain levels below it.
* e.g. For a cpu that's to be suspended, it could mean programming the
* power controller whereas for a cluster that's to be suspended, it will call
* the platform specific code which will disable coherency at the interconnect
* level if the cpu is the last in the cluster and also the program the power
* controller.
*
* All the required parameter checks are performed at the beginning and after
* the state transition has been done, no further error is expected and it is
* not possible to undo any of the actions taken beyond that point.
******************************************************************************/
void psci_cpu_suspend_start(entry_point_info_t *ep,
unsigned int end_pwrlvl,
psci_power_state_t *state_info,
unsigned int is_power_down_state)
{
int skip_wfi = 0;
unsigned int idx = plat_my_core_pos();
/*
* This function must only be called on platforms where the
* CPU_SUSPEND platform hooks have been implemented.
*/
assert(psci_plat_pm_ops->pwr_domain_suspend &&
psci_plat_pm_ops->pwr_domain_suspend_finish);
/*
* This function acquires the lock corresponding to each power
* level so that by the time all locks are taken, the system topology
* is snapshot and state management can be done safely.
*/
psci_acquire_pwr_domain_locks(end_pwrlvl,
idx);
/*
* We check if there are any pending interrupts after the delay
* introduced by lock contention to increase the chances of early
* detection that a wake-up interrupt has fired.
*/
if (read_isr_el1()) {
skip_wfi = 1;
goto exit;
}
/*
* This function is passed the requested state info and
* it returns the negotiated state info for each power level upto
* the end level specified.
*/
psci_do_state_coordination(end_pwrlvl, state_info);
if (is_power_down_state)
psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
/*
* Plat. management: Allow the platform to perform the
* necessary actions to turn off this cpu e.g. set the
* platform defined mailbox with the psci entrypoint,
* program the power controller etc.
*/
psci_plat_pm_ops->pwr_domain_suspend(state_info);
exit:
/*
* Release the locks corresponding to each power level in the
* reverse order to which they were acquired.
*/
psci_release_pwr_domain_locks(end_pwrlvl,
idx);
if (skip_wfi)
return;
if (is_power_down_state)
psci_power_down_wfi();
/*
* We will reach here if only retention/standby states have been
* requested at multiple power levels. This means that the cpu
* context will be preserved.
*/
wfi();
/*
* After we wake up from context retaining suspend, call the
* context retaining suspend finisher.
*/
psci_suspend_to_standby_finisher(idx, state_info, end_pwrlvl);
}
/*******************************************************************************
* The following functions finish an earlier suspend request. They
* are called by the common finisher routine in psci_common.c. The `state_info`
* is the psci_power_state from which this CPU has woken up from.
******************************************************************************/
void psci_cpu_suspend_finish(unsigned int cpu_idx,
psci_power_state_t *state_info)
{
unsigned long long counter_freq;
unsigned int suspend_level;
/* Ensure we have been woken up from a suspended state */
assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\
state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]));
/*
* Plat. management: Perform the platform specific actions
* before we change the state of the cpu e.g. enabling the
* gic or zeroing the mailbox register. If anything goes
* wrong then assert as there is no way to recover from this
* situation.
*/
psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
/*
* Arch. management: Enable the data cache, manage stack memory and
* restore the stashed EL3 architectural context from the 'cpu_context'
* structure for this cpu.
*/
psci_do_pwrup_cache_maintenance();
/* Re-init the cntfrq_el0 register */
counter_freq = plat_get_syscnt_freq();
write_cntfrq_el0(counter_freq);
/*
* Call the cpu suspend finish handler registered by the Secure Payload
* Dispatcher to let it do any bookeeping. If the handler encounters an
* error, it's expected to assert within
*/
if (psci_spd_pm && psci_spd_pm->svc_suspend) {
suspend_level = psci_get_suspend_pwrlvl();
assert (suspend_level != PSCI_INVALID_PWR_LVL);
psci_spd_pm->svc_suspend_finish(suspend_level);
}
/* Invalidate the suspend level for the cpu */
psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
/*
* Generic management: Now we just need to retrieve the
* information that we had stashed away during the suspend
* call to set this cpu on its way.
*/
cm_prepare_el3_exit(NON_SECURE);
/* Clean caches before re-entering normal world */
dcsw_op_louis(DCCSW);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, ARM Limited and Contributors. All rights reserved.
* Copyright (c) 2014-2015, ARM Limited and Contributors. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@ -37,7 +37,7 @@
void psci_system_off(void)
{
psci_print_affinity_map();
psci_print_power_domain_map();
assert(psci_plat_pm_ops->system_off);
@ -54,7 +54,7 @@ void psci_system_off(void)
void psci_system_reset(void)
{
psci_print_affinity_map();
psci_print_power_domain_map();
assert(psci_plat_pm_ops->system_reset);