mirror of
https://github.com/ARM-software/arm-trusted-firmware.git
synced 2025-04-17 01:54:22 +00:00

Enforce full include path for includes. Deprecate old paths. The following folders inside include/lib have been left unchanged: - include/lib/cpus/${ARCH} - include/lib/el3_runtime/${ARCH} The reason for this change is that having a global namespace for includes isn't a good idea. It defeats one of the advantages of having folders and it introduces problems that are sometimes subtle (because you may not know the header you are actually including if there are two of them). For example, this patch had to be created because two headers were called the same way:e0ea0928d5
("Fix gpio includes of mt8173 platform to avoid collision."). More recently, this patch has had similar problems:46f9b2c3a2
("drivers: add tzc380 support"). This problem was introduced in commit4ecca33988
("Move include and source files to logical locations"). At that time, there weren't too many headers so it wasn't a real issue. However, time has shown that this creates problems. Platforms that want to preserve the way they include headers may add the removed paths to PLAT_INCLUDES, but this is discouraged. Change-Id: I39dc53ed98f9e297a5966e723d1936d6ccf2fc8f Signed-off-by: Antonio Nino Diaz <antonio.ninodiaz@arm.com>
191 lines
4.5 KiB
C
191 lines
4.5 KiB
C
/*
|
|
* Copyright (c) 2018, ARM Limited and Contributors. All rights reserved.
|
|
*
|
|
* SPDX-License-Identifier: BSD-3-Clause
|
|
*/
|
|
|
|
#include <assert.h>
|
|
#include <errno.h>
|
|
|
|
#include <platform_def.h>
|
|
|
|
#include <arch_helpers.h>
|
|
#include <common/debug.h>
|
|
#include <drivers/delay_timer.h>
|
|
#include <drivers/generic_delay_timer.h>
|
|
#include <lib/cassert.h>
|
|
#include <lib/psci/psci.h>
|
|
|
|
#include <sq_common.h>
|
|
#include "sq_scpi.h"
|
|
|
|
/* Macros to read the SQ power domain state */
|
|
#define SQ_PWR_LVL0 MPIDR_AFFLVL0
|
|
#define SQ_PWR_LVL1 MPIDR_AFFLVL1
|
|
#define SQ_PWR_LVL2 MPIDR_AFFLVL2
|
|
|
|
#define SQ_CORE_PWR_STATE(state) (state)->pwr_domain_state[SQ_PWR_LVL0]
|
|
#define SQ_CLUSTER_PWR_STATE(state) (state)->pwr_domain_state[SQ_PWR_LVL1]
|
|
#define SQ_SYSTEM_PWR_STATE(state) ((PLAT_MAX_PWR_LVL > SQ_PWR_LVL1) ?\
|
|
(state)->pwr_domain_state[SQ_PWR_LVL2] : 0)
|
|
|
|
uintptr_t sq_sec_entrypoint;
|
|
|
|
int sq_pwr_domain_on(u_register_t mpidr)
|
|
{
|
|
/*
|
|
* SCP takes care of powering up parent power domains so we
|
|
* only need to care about level 0
|
|
*/
|
|
scpi_set_sq_power_state(mpidr, scpi_power_on, scpi_power_on,
|
|
scpi_power_on);
|
|
|
|
return PSCI_E_SUCCESS;
|
|
}
|
|
|
|
static void sq_pwr_domain_on_finisher_common(
|
|
const psci_power_state_t *target_state)
|
|
{
|
|
assert(SQ_CORE_PWR_STATE(target_state) == SQ_LOCAL_STATE_OFF);
|
|
|
|
/*
|
|
* Perform the common cluster specific operations i.e enable coherency
|
|
* if this cluster was off.
|
|
*/
|
|
if (SQ_CLUSTER_PWR_STATE(target_state) == SQ_LOCAL_STATE_OFF)
|
|
plat_sq_interconnect_enter_coherency();
|
|
}
|
|
|
|
void sq_pwr_domain_on_finish(const psci_power_state_t *target_state)
|
|
{
|
|
/* Assert that the system power domain need not be initialized */
|
|
assert(SQ_SYSTEM_PWR_STATE(target_state) == SQ_LOCAL_STATE_RUN);
|
|
|
|
sq_pwr_domain_on_finisher_common(target_state);
|
|
|
|
/* Program the gic per-cpu distributor or re-distributor interface */
|
|
sq_gic_pcpu_init();
|
|
|
|
/* Enable the gic cpu interface */
|
|
sq_gic_cpuif_enable();
|
|
}
|
|
|
|
static void sq_power_down_common(const psci_power_state_t *target_state)
|
|
{
|
|
uint32_t cluster_state = scpi_power_on;
|
|
uint32_t system_state = scpi_power_on;
|
|
|
|
/* Prevent interrupts from spuriously waking up this cpu */
|
|
sq_gic_cpuif_disable();
|
|
|
|
/* Check if power down at system power domain level is requested */
|
|
if (SQ_SYSTEM_PWR_STATE(target_state) == SQ_LOCAL_STATE_OFF)
|
|
system_state = scpi_power_retention;
|
|
|
|
/* Cluster is to be turned off, so disable coherency */
|
|
if (SQ_CLUSTER_PWR_STATE(target_state) == SQ_LOCAL_STATE_OFF) {
|
|
plat_sq_interconnect_exit_coherency();
|
|
cluster_state = scpi_power_off;
|
|
}
|
|
|
|
/*
|
|
* Ask the SCP to power down the appropriate components depending upon
|
|
* their state.
|
|
*/
|
|
scpi_set_sq_power_state(read_mpidr_el1(),
|
|
scpi_power_off,
|
|
cluster_state,
|
|
system_state);
|
|
}
|
|
|
|
void sq_pwr_domain_off(const psci_power_state_t *target_state)
|
|
{
|
|
sq_power_down_common(target_state);
|
|
}
|
|
|
|
void __dead2 sq_system_off(void)
|
|
{
|
|
volatile uint32_t *gpio = (uint32_t *)PLAT_SQ_GPIO_BASE;
|
|
|
|
/* set PD[9] high to power off the system */
|
|
gpio[5] |= 0x2; /* set output */
|
|
gpio[1] |= 0x2; /* set high */
|
|
dmbst();
|
|
|
|
generic_delay_timer_init();
|
|
|
|
mdelay(1);
|
|
|
|
while (1) {
|
|
gpio[1] &= ~0x2; /* set low */
|
|
dmbst();
|
|
|
|
mdelay(1);
|
|
|
|
gpio[1] |= 0x2; /* set high */
|
|
dmbst();
|
|
|
|
mdelay(100);
|
|
}
|
|
|
|
wfi();
|
|
ERROR("SQ System Off: operation not handled.\n");
|
|
panic();
|
|
}
|
|
|
|
void __dead2 sq_system_reset(void)
|
|
{
|
|
uint32_t response;
|
|
|
|
/* Send the system reset request to the SCP */
|
|
response = scpi_sys_power_state(scpi_system_reboot);
|
|
|
|
if (response != SCP_OK) {
|
|
ERROR("SQ System Reset: SCP error %u.\n", response);
|
|
panic();
|
|
}
|
|
wfi();
|
|
ERROR("SQ System Reset: operation not handled.\n");
|
|
panic();
|
|
}
|
|
|
|
void sq_cpu_standby(plat_local_state_t cpu_state)
|
|
{
|
|
unsigned int scr;
|
|
|
|
assert(cpu_state == SQ_LOCAL_STATE_RET);
|
|
|
|
scr = read_scr_el3();
|
|
/* Enable PhysicalIRQ bit for NS world to wake the CPU */
|
|
write_scr_el3(scr | SCR_IRQ_BIT);
|
|
isb();
|
|
dsb();
|
|
wfi();
|
|
|
|
/*
|
|
* Restore SCR to the original value, synchronisation of scr_el3 is
|
|
* done by eret while el3_exit to save some execution cycles.
|
|
*/
|
|
write_scr_el3(scr);
|
|
}
|
|
|
|
const plat_psci_ops_t sq_psci_ops = {
|
|
.pwr_domain_on = sq_pwr_domain_on,
|
|
.pwr_domain_off = sq_pwr_domain_off,
|
|
.pwr_domain_on_finish = sq_pwr_domain_on_finish,
|
|
.cpu_standby = sq_cpu_standby,
|
|
.system_off = sq_system_off,
|
|
.system_reset = sq_system_reset,
|
|
};
|
|
|
|
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
|
|
const struct plat_psci_ops **psci_ops)
|
|
{
|
|
sq_sec_entrypoint = sec_entrypoint;
|
|
flush_dcache_range((uint64_t)&sq_sec_entrypoint,
|
|
sizeof(sq_sec_entrypoint));
|
|
|
|
*psci_ops = &sq_psci_ops;
|
|
|
|
return 0;
|
|
}
|