Merge changes from topic "versal2-pm-support" into integration

* changes:
  feat(versal2): extended SMCCC payload for EEMI
  feat(versal2): add support for platform management
  feat(versal2): add dependency macro for PM
This commit is contained in:
Joanna Farley 2025-02-21 10:42:57 +01:00 committed by TrustedFirmware Code Review
commit 49d02511d7
10 changed files with 1318 additions and 11 deletions

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2022, Xilinx, Inc. All rights reserved. * Copyright (c) 2022, Xilinx, Inc. All rights reserved.
* Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved. * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -22,4 +22,17 @@
#define NON_SECURE_FLAG 1U #define NON_SECURE_FLAG 1U
#define SECURE_FLAG 0U #define SECURE_FLAG 0U
/* Processor core device IDs */
#define PM_DEV_CLUSTER0_ACPU_0 (0x1810C0AFU)
#define PM_DEV_CLUSTER0_ACPU_1 (0x1810C0B0U)
#define PM_DEV_CLUSTER1_ACPU_0 (0x1810C0B3U)
#define PM_DEV_CLUSTER1_ACPU_1 (0x1810C0B4U)
#define PM_DEV_CLUSTER2_ACPU_0 (0x1810C0B7U)
#define PM_DEV_CLUSTER2_ACPU_1 (0x1810C0B8U)
#define PM_DEV_CLUSTER3_ACPU_0 (0x1810C0BBU)
#define PM_DEV_CLUSTER3_ACPU_1 (0x1810C0BCU)
#endif /* PLAT_PM_COMMON_H */ #endif /* PLAT_PM_COMMON_H */

View file

@ -1,7 +1,7 @@
/* /*
* Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved. * Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2021-2022, Xilinx, Inc. All rights reserved. * Copyright (c) 2021-2022, Xilinx, Inc. All rights reserved.
* Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved. * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -45,7 +45,7 @@ extern uint32_t rtlversion, psversion, pmcversion;
void board_detection(void); void board_detection(void);
const char *board_name_decode(void); const char *board_name_decode(void);
uint64_t smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
uint64_t x4, void *cookie, void *handle, uint64_t flags); uint64_t x4, const void *cookie, void *handle, uint64_t flags);
int32_t sip_svc_setup_init(void); int32_t sip_svc_setup_init(void);
/* /*
* Register handler to specific GIC entrance * Register handler to specific GIC entrance

View file

@ -11,6 +11,7 @@
#include <arch.h> #include <arch.h>
#include "def.h" #include "def.h"
#include <plat_common.h>
/******************************************************************************* /*******************************************************************************
* Generic platform constants * Generic platform constants
@ -122,6 +123,8 @@
#define PLAT_GICD_BASE_VALUE U(0xE2000000) #define PLAT_GICD_BASE_VALUE U(0xE2000000)
#define PLAT_GICR_BASE_VALUE U(0xE2060000) #define PLAT_GICR_BASE_VALUE U(0xE2060000)
#define PLAT_ARM_GICR_BASE PLAT_GICR_BASE_VALUE
#define PLAT_ARM_GICD_BASE PLAT_GICD_BASE_VALUE
/* /*
* Define a list of Group 1 Secure and Group 0 interrupts as per GICv3 * Define a list of Group 1 Secure and Group 0 interrupts as per GICv3
@ -138,6 +141,8 @@
#define PLAT_G0_IRQ_PROPS(grp) \ #define PLAT_G0_IRQ_PROPS(grp) \
INTR_PROP_DESC(PLAT_VERSAL_IPI_IRQ, GIC_HIGHEST_SEC_PRIORITY, grp, \ INTR_PROP_DESC(PLAT_VERSAL_IPI_IRQ, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_EDGE), \ GIC_INTR_CFG_EDGE), \
INTR_PROP_DESC(CPU_PWR_DOWN_REQ_INTR, GIC_HIGHEST_SEC_PRIORITY, grp, \
GIC_INTR_CFG_EDGE)
#define IRQ_MAX 200U #define IRQ_MAX 200U

View file

@ -1,7 +1,7 @@
/* /*
* Copyright (c) 2018-2020, Arm Limited and Contributors. All rights reserved. * Copyright (c) 2018-2020, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2021-2022, Xilinx, Inc. All rights reserved. * Copyright (c) 2021-2022, Xilinx, Inc. All rights reserved.
* Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved. * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -200,7 +200,7 @@ static int32_t no_pm_ioctl(uint32_t device_id, uint32_t ioctl_id,
} }
static uint64_t no_pm_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, static uint64_t no_pm_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
uint64_t x4, void *cookie, void *handle, uint64_t flags) uint64_t x4, const void *cookie, void *handle, uint64_t flags)
{ {
int32_t ret; int32_t ret;
uint32_t arg[4], api_id; uint32_t arg[4], api_id;
@ -240,7 +240,7 @@ static uint64_t no_pm_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64
} }
uint64_t smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4,
void *cookie, void *handle, uint64_t flags) const void *cookie, void *handle, uint64_t flags)
{ {
return no_pm_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags); return no_pm_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
} }

View file

@ -0,0 +1,340 @@
/*
* Copyright (c) 2022, Xilinx, Inc. All rights reserved.
* Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <assert.h>
#include <common/debug.h>
#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <lib/psci/psci.h>
#include <plat/arm/common/plat_arm.h>
#include <plat/common/platform.h>
#include <plat_arm.h>
#include "def.h"
#include <ipi.h>
#include <plat_private.h>
#include "pm_api_sys.h"
#include "pm_client.h"
#include <pm_common.h>
#include "pm_defs.h"
#include "pm_svc_main.h"
static uintptr_t sec_entry;
static int32_t versal2_pwr_domain_on(u_register_t mpidr)
{
int32_t cpu_id = plat_core_pos_by_mpidr(mpidr);
int32_t ret = (int32_t) PSCI_E_INTERN_FAIL;
enum pm_ret_status pm_ret;
const struct pm_proc *proc;
if (cpu_id != -1) {
proc = pm_get_proc((uint32_t)cpu_id);
if (proc != NULL) {
pm_ret = pm_req_wakeup(proc->node_id,
(uint32_t)
((sec_entry & 0xFFFFFFFFU) | 0x1U),
sec_entry >> 32, 0, 0);
if (pm_ret == PM_RET_SUCCESS) {
/* Clear power down request */
pm_client_wakeup(proc);
ret = (int32_t) PSCI_E_SUCCESS;
}
}
}
return ret;
}
/**
* versal2_pwr_domain_off() - Turn off core.
* @target_state: Targeted state.
*/
static void versal2_pwr_domain_off(const psci_power_state_t *target_state)
{
const struct pm_proc *proc;
uint32_t cpu_id = plat_my_core_pos();
enum pm_ret_status pm_ret;
size_t i;
proc = pm_get_proc(cpu_id);
if (proc == NULL) {
ERROR("Failed to get proc %d\n", cpu_id);
goto err;
}
for (i = 0; i <= PLAT_MAX_PWR_LVL; i++) {
VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
__func__, i, target_state->pwr_domain_state[i]);
}
plat_gic_cpuif_disable();
/*
* Send request to PMC to power down the appropriate APU CPU
* core.
* According to PSCI specification, CPU_off function does not
* have resume address and CPU core can only be woken up
* invoking CPU_on function, during which resume address will
* be set.
*/
pm_ret = pm_self_suspend(proc->node_id, MAX_LATENCY, PM_STATE_CPU_IDLE, 0,
SECURE_FLAG);
if (pm_ret != PM_RET_SUCCESS) {
ERROR("Failed to power down CPU %d\n", cpu_id);
}
err:
return;
}
/**
* versal2_system_reset() - Send the reset request to firmware for the
* system to reset. This function does not
* return as it resets system.
*/
static void __dead2 versal2_system_reset(void)
{
uint32_t timeout = 10000U;
enum pm_ret_status pm_ret;
int32_t ret;
request_cpu_pwrdwn();
/*
* Send the system reset request to the firmware if power down request
* is not received from firmware.
*/
if (pwrdwn_req_received == true) {
/*
* TODO: shutdown scope for this reset needs be revised once
* we have a clearer understanding of the overall reset scoping
* including the implementation of SYSTEM_RESET2.
*/
pm_ret = pm_system_shutdown(XPM_SHUTDOWN_TYPE_RESET,
pm_get_shutdown_scope(), SECURE_FLAG);
if (pm_ret != PM_RET_SUCCESS) {
WARN("System shutdown failed\n");
}
/*
* Wait for system shutdown request completed and idle callback
* not received.
*/
do {
ret = ipi_mb_enquire_status(primary_proc->ipi->local_ipi_id,
primary_proc->ipi->remote_ipi_id);
udelay(100);
timeout--;
} while ((ret != (int32_t)IPI_MB_STATUS_RECV_PENDING) && (timeout > 0U));
}
(void)psci_cpu_off();
while (true) {
wfi();
}
}
/**
* versal2_pwr_domain_suspend() - Send request to PMC to suspend core.
* @target_state: Targeted state.
*/
static void versal2_pwr_domain_suspend(const psci_power_state_t *target_state)
{
const struct pm_proc *proc;
uint32_t cpu_id = plat_my_core_pos();
uint32_t state;
enum pm_ret_status ret;
size_t i;
proc = pm_get_proc(cpu_id);
if (proc == NULL) {
ERROR("Failed to get proc %d\n", cpu_id);
goto err;
}
for (i = 0; i <= PLAT_MAX_PWR_LVL; i++) {
VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
__func__, i, target_state->pwr_domain_state[i]);
}
plat_gic_cpuif_disable();
if (target_state->pwr_domain_state[1] > PLAT_MAX_RET_STATE) {
plat_gic_save();
}
state = (target_state->pwr_domain_state[1] > PLAT_MAX_RET_STATE) ?
PM_STATE_SUSPEND_TO_RAM : PM_STATE_CPU_IDLE;
/* Send request to PMC to suspend this core */
ret = pm_self_suspend(proc->node_id, MAX_LATENCY, state, sec_entry,
SECURE_FLAG);
if (ret != PM_RET_SUCCESS) {
ERROR("Failed to power down CPU %d\n", cpu_id);
}
err:
return;
}
static void versal2_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
(void)target_state;
/* Enable the gic cpu interface */
plat_gic_pcpu_init();
/* Program the gic per-cpu distributor or re-distributor interface */
plat_gic_cpuif_enable();
}
/**
* versal2_pwr_domain_suspend_finish() - Performs actions to finish
* suspend procedure.
* @target_state: Targeted state.
*/
static void versal2_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
{
const struct pm_proc *proc;
uint32_t cpu_id = plat_my_core_pos();
size_t i;
proc = pm_get_proc(cpu_id);
if (proc == NULL) {
ERROR("Failed to get proc %d\n", cpu_id);
goto err;
}
for (i = 0; i <= PLAT_MAX_PWR_LVL; i++) {
VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n",
__func__, i, target_state->pwr_domain_state[i]);
}
/* Clear the APU power control register for this cpu */
pm_client_wakeup(proc);
/* APU was turned off, so restore GIC context */
if (target_state->pwr_domain_state[1] > PLAT_MAX_RET_STATE) {
plat_gic_resume();
}
plat_gic_cpuif_enable();
err:
return;
}
/**
* versal2_system_off() - Send the system off request to firmware.
* This function does not return as it puts core into WFI
*/
static void __dead2 versal2_system_off(void)
{
enum pm_ret_status ret;
/* Send the power down request to the PMC */
ret = pm_system_shutdown(XPM_SHUTDOWN_TYPE_SHUTDOWN,
pm_get_shutdown_scope(), SECURE_FLAG);
if (ret != PM_RET_SUCCESS) {
ERROR("System shutdown failed\n");
}
while (true) {
wfi();
}
}
/**
* versal2_validate_power_state() - Ensure that the power state
* parameter in request is valid.
* @power_state: Power state of core.
* @req_state: Requested state.
*
* Return: Returns status, either PSCI_E_SUCCESS or reason.
*/
static int32_t versal2_validate_power_state(unsigned int power_state,
psci_power_state_t *req_state)
{
uint32_t pstate = psci_get_pstate_type(power_state);
int32_t ret = PSCI_E_SUCCESS;
VERBOSE("%s: power_state: 0x%x\n", __func__, power_state);
assert(req_state);
/* Sanity check the requested state */
if (pstate == PSTATE_TYPE_STANDBY) {
req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_RET_STATE;
} else {
req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_OFF_STATE;
}
/* The 'state_id' is expected to be zero */
if (psci_get_pstate_id(power_state) != 0U) {
ret = PSCI_E_INVALID_PARAMS;
}
return ret;
}
/**
* versal2_get_sys_suspend_power_state() - Get power state for system
* suspend.
* @req_state: Requested state.
*/
static void versal2_get_sys_suspend_power_state(psci_power_state_t *req_state)
{
uint64_t i;
for (i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) {
req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE;
}
}
/**
* Export the platform specific power ops.
*/
static const struct plat_psci_ops versal2_nopmc_psci_ops = {
.pwr_domain_on = versal2_pwr_domain_on,
.pwr_domain_off = versal2_pwr_domain_off,
.pwr_domain_on_finish = versal2_pwr_domain_on_finish,
.pwr_domain_suspend = versal2_pwr_domain_suspend,
.pwr_domain_suspend_finish = versal2_pwr_domain_suspend_finish,
.system_off = versal2_system_off,
.system_reset = versal2_system_reset,
.validate_power_state = versal2_validate_power_state,
.get_sys_suspend_power_state = versal2_get_sys_suspend_power_state,
};
int plat_setup_psci_ops(uintptr_t sec_entrypoint,
const struct plat_psci_ops **psci_ops)
{
sec_entry = sec_entrypoint;
VERBOSE("Setting up entry point %lx\n", sec_entry);
*psci_ops = &versal2_nopmc_psci_ops;
return 0;
}
int32_t sip_svc_setup_init(void)
{
return pm_setup();
}
uint64_t smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4,
const void *cookie, void *handle, uint64_t flags)
{
return pm_smc_handler(smc_fid, x1, x2, x3, x4, cookie, handle, flags);
}

View file

@ -28,6 +28,9 @@ PL011_GENERIC_UART := 1
IPI_CRC_CHECK := 0 IPI_CRC_CHECK := 0
GIC_ENABLE_V4_EXTN := 0 GIC_ENABLE_V4_EXTN := 0
GICV3_SUPPORT_GIC600 := 1 GICV3_SUPPORT_GIC600 := 1
TFA_NO_PM := 0
CPU_PWRDWN_SGI ?= 6
$(eval $(call add_define_val,CPU_PWR_DOWN_REQ_INTR,ARM_IRQ_SEC_SGI_${CPU_PWRDWN_SGI}))
override CTX_INCLUDE_AARCH32_REGS := 0 override CTX_INCLUDE_AARCH32_REGS := 0
@ -35,6 +38,10 @@ override CTX_INCLUDE_AARCH32_REGS := 0
override PLAT_XLAT_TABLES_DYNAMIC := 1 override PLAT_XLAT_TABLES_DYNAMIC := 1
$(eval $(call add_define,PLAT_XLAT_TABLES_DYNAMIC)) $(eval $(call add_define,PLAT_XLAT_TABLES_DYNAMIC))
ifdef TFA_NO_PM
$(eval $(call add_define,TFA_NO_PM))
endif
ifdef MEM_BASE ifdef MEM_BASE
$(eval $(call add_define,MEM_BASE)) $(eval $(call add_define,MEM_BASE))
@ -129,8 +136,17 @@ BL31_SOURCES += drivers/arm/cci/cci.c \
drivers/scmi-msg/reset_domain.c \ drivers/scmi-msg/reset_domain.c \
${PLAT_PATH}/scmi.c ${PLAT_PATH}/scmi.c
BL31_SOURCES += ${PLAT_PATH}/plat_psci.c \ ifeq ($(TFA_NO_PM), 0)
common/fdt_wrappers.c \ BL31_SOURCES += plat/xilinx/common/pm_service/pm_api_sys.c \
plat/xilinx/common/pm_service/pm_ipi.c \
${PLAT_PATH}/plat_psci_pm.c \
${PLAT_PATH}/pm_service/pm_svc_main.c \
${PLAT_PATH}/pm_service/pm_client.c
else
BL31_SOURCES += ${PLAT_PATH}/plat_psci.c
endif
BL31_SOURCES += common/fdt_wrappers.c \
plat/xilinx/common/plat_console.c \ plat/xilinx/common/plat_console.c \
plat/xilinx/common/plat_startup.c \ plat/xilinx/common/plat_startup.c \
plat/xilinx/common/ipi.c \ plat/xilinx/common/ipi.c \

View file

@ -0,0 +1,387 @@
/*
* Copyright (c) 2022, Xilinx, Inc. All rights reserved.
* Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* APU specific definition of processors in the subsystem as well as functions
* for getting information about and changing state of the APU.
*/
#include <assert.h>
#include <drivers/arm/gic_common.h>
#include <drivers/arm/gicv3.h>
#include <lib/bakery_lock.h>
#include <lib/mmio.h>
#include <lib/spinlock.h>
#include <lib/utils.h>
#include <plat/common/platform.h>
#include <platform_def.h>
#include "def.h"
#include <plat_ipi.h>
#include "pm_api_sys.h"
#include "pm_client.h"
#define UNDEFINED_CPUID UINT32_MAX
DEFINE_RENAME_SYSREG_RW_FUNCS(cpu_pwrctrl_val, S3_0_C15_C2_7)
/*
* ARM v8.2, the cache will turn off automatically when cpu
* power down. Therefore, there is no doubt to use the spin_lock here.
*/
static spinlock_t pm_client_secure_lock;
static inline void pm_client_lock_get(void)
{
spin_lock(&pm_client_secure_lock);
}
static inline void pm_client_lock_release(void)
{
spin_unlock(&pm_client_secure_lock);
}
static const struct pm_ipi apu_ipi = {
.local_ipi_id = IPI_LOCAL_ID,
.remote_ipi_id = IPI_REMOTE_ID,
.buffer_base = IPI_BUFFER_LOCAL_BASE,
};
/* Order in pm_procs_all array must match cpu ids */
static const struct pm_proc pm_procs_all[] = {
{
.node_id = PM_DEV_CLUSTER0_ACPU_0,
.ipi = &apu_ipi,
},
{
.node_id = PM_DEV_CLUSTER0_ACPU_1,
.ipi = &apu_ipi,
},
{
.node_id = PM_DEV_CLUSTER1_ACPU_0,
.ipi = &apu_ipi,
},
{
.node_id = PM_DEV_CLUSTER1_ACPU_1,
.ipi = &apu_ipi,
},
{
.node_id = PM_DEV_CLUSTER2_ACPU_0,
.ipi = &apu_ipi,
},
{
.node_id = PM_DEV_CLUSTER2_ACPU_1,
.ipi = &apu_ipi,
},
{
.node_id = PM_DEV_CLUSTER3_ACPU_0,
.ipi = &apu_ipi,
},
{
.node_id = PM_DEV_CLUSTER3_ACPU_1,
.ipi = &apu_ipi,
},
};
const struct pm_proc *primary_proc = &pm_procs_all[0];
/**
* pm_get_proc() - returns pointer to the proc structure.
* @cpuid: id of the cpu whose proc struct pointer should be returned.
*
* Return: Pointer to a proc structure if proc is found, otherwise NULL.
*/
const struct pm_proc *pm_get_proc(uint32_t cpuid)
{
const struct pm_proc *proc = NULL;
if (cpuid < ARRAY_SIZE(pm_procs_all)) {
proc = &pm_procs_all[cpuid];
} else {
ERROR("cpuid: %d proc NULL\n", cpuid);
}
return proc;
}
/**
* irq_to_pm_node_idx - Get PM node index corresponding to the interrupt number.
* @irq: Interrupt number.
*
* Return: PM node index corresponding to the specified interrupt.
*/
enum pm_device_node_idx irq_to_pm_node_idx(uint32_t irq)
{
enum pm_device_node_idx dev_idx = XPM_NODEIDX_DEV_MIN;
assert(irq <= IRQ_MAX);
switch (irq) {
case 11:
dev_idx = XPM_NODEIDX_DEV_I2C_2;
break;
case 12:
dev_idx = XPM_NODEIDX_DEV_I2C_3;
break;
case 13:
dev_idx = XPM_NODEIDX_DEV_I2C_4;
break;
case 20:
dev_idx = XPM_NODEIDX_DEV_GPIO;
break;
case 21:
dev_idx = XPM_NODEIDX_DEV_I2C_0;
break;
case 22:
dev_idx = XPM_NODEIDX_DEV_I2C_1;
break;
case 23:
dev_idx = XPM_NODEIDX_DEV_SPI_0;
break;
case 24:
dev_idx = XPM_NODEIDX_DEV_SPI_1;
break;
case 25:
dev_idx = XPM_NODEIDX_DEV_UART_0;
break;
case 26:
dev_idx = XPM_NODEIDX_DEV_UART_1;
break;
case 27:
dev_idx = XPM_NODEIDX_DEV_CAN_FD_0;
break;
case 28:
dev_idx = XPM_NODEIDX_DEV_CAN_FD_1;
break;
case 29:
case 30:
case 31:
case 32:
case 33:
case 98:
dev_idx = XPM_NODEIDX_DEV_USB_0;
break;
case 34:
case 35:
case 36:
case 37:
case 38:
case 99:
dev_idx = XPM_NODEIDX_DEV_USB_1;
break;
case 39:
case 40:
dev_idx = XPM_NODEIDX_DEV_GEM_0;
break;
case 41:
case 42:
dev_idx = XPM_NODEIDX_DEV_GEM_1;
break;
case 43:
dev_idx = XPM_NODEIDX_DEV_TTC_0;
break;
case 44:
dev_idx = XPM_NODEIDX_DEV_TTC_1;
break;
case 45:
dev_idx = XPM_NODEIDX_DEV_TTC_2;
break;
case 46:
dev_idx = XPM_NODEIDX_DEV_TTC_3;
break;
case 47:
dev_idx = XPM_NODEIDX_DEV_TTC_4;
break;
case 48:
dev_idx = XPM_NODEIDX_DEV_TTC_5;
break;
case 49:
dev_idx = XPM_NODEIDX_DEV_TTC_6;
break;
case 50:
dev_idx = XPM_NODEIDX_DEV_TTC_7;
break;
case 72:
dev_idx = XPM_NODEIDX_DEV_ADMA_0;
break;
case 73:
dev_idx = XPM_NODEIDX_DEV_ADMA_1;
break;
case 74:
dev_idx = XPM_NODEIDX_DEV_ADMA_2;
break;
case 75:
dev_idx = XPM_NODEIDX_DEV_ADMA_3;
break;
case 76:
dev_idx = XPM_NODEIDX_DEV_ADMA_4;
break;
case 77:
dev_idx = XPM_NODEIDX_DEV_ADMA_5;
break;
case 78:
dev_idx = XPM_NODEIDX_DEV_ADMA_6;
break;
case 79:
dev_idx = XPM_NODEIDX_DEV_ADMA_7;
break;
case 95:
dev_idx = XPM_NODEIDX_DEV_CAN_FD_2;
break;
case 96:
dev_idx = XPM_NODEIDX_DEV_CAN_FD_3;
break;
case 100:
dev_idx = XPM_NODEIDX_DEV_I2C_5;
break;
case 101:
dev_idx = XPM_NODEIDX_DEV_I2C_6;
break;
case 102:
dev_idx = XPM_NODEIDX_DEV_I2C_7;
break;
case 200:
dev_idx = XPM_NODEIDX_DEV_RTC;
break;
case 218:
dev_idx = XPM_NODEIDX_DEV_SDIO_0;
break;
case 220:
dev_idx = XPM_NODEIDX_DEV_SDIO_1;
break;
default:
dev_idx = XPM_NODEIDX_DEV_MIN;
break;
}
return dev_idx;
}
/**
* pm_client_suspend() - Client-specific suspend actions. This function
* perform actions required prior to sending suspend
* request.
* Actions taken depend on the state system is
* suspending to.
* @proc: processor which need to suspend.
* @state: desired suspend state.
*/
void pm_client_suspend(const struct pm_proc *proc, uint32_t state)
{
uint32_t cpu_id = plat_my_core_pos();
uintptr_t val;
/*
* Get the core index, use it calculate offset for secondary cores
* to match with register database
*/
uint32_t core_index = cpu_id + ((cpu_id / 2U) * 2U);
pm_client_lock_get();
if (state == PM_STATE_SUSPEND_TO_RAM) {
pm_client_set_wakeup_sources((uint32_t)proc->node_id);
}
val = read_cpu_pwrctrl_val();
val |= CORE_PWRDN_EN_BIT_MASK;
write_cpu_pwrctrl_val(val);
isb();
/* Enable power down interrupt */
mmio_write_32(APU_PCIL_CORE_X_IEN_POWER_REG(core_index),
APU_PCIL_CORE_X_IEN_POWER_MASK);
/* Enable wake interrupt */
mmio_write_32(APU_PCIL_CORE_X_IEN_WAKE_REG(core_index),
APU_PCIL_CORE_X_IEN_WAKE_MASK);
pm_client_lock_release();
}
/**
* pm_get_cpuid() - get the local cpu ID for a global node ID.
* @nid: node id of the processor.
*
* Return: the cpu ID (starting from 0) for the subsystem.
*/
static uint32_t pm_get_cpuid(uint32_t nid)
{
uint32_t ret = (uint32_t) UNDEFINED_CPUID;
size_t i;
for (i = 0; i < ARRAY_SIZE(pm_procs_all); i++) {
if (pm_procs_all[i].node_id == nid) {
ret = (uint32_t)i;
break;
}
}
return ret;
}
/**
* pm_client_wakeup() - Client-specific wakeup actions.
* @proc: Processor which need to wakeup.
*
* This function should contain any PU-specific actions
* required for waking up another APU core.
*/
void pm_client_wakeup(const struct pm_proc *proc)
{
uint32_t cpuid = pm_get_cpuid(proc->node_id);
uintptr_t val;
if (cpuid != (uint32_t) UNDEFINED_CPUID) {
pm_client_lock_get();
/* Clear powerdown request */
val = read_cpu_pwrctrl_val();
val &= ~CORE_PWRDN_EN_BIT_MASK;
write_cpu_pwrctrl_val(val);
isb();
/* Disabled power down interrupt */
mmio_write_32(APU_PCIL_CORE_X_IDS_POWER_REG(cpuid),
APU_PCIL_CORE_X_IDS_POWER_MASK);
/* Disable wake interrupt */
mmio_write_32(APU_PCIL_CORE_X_IDS_WAKE_REG(cpuid),
APU_PCIL_CORE_X_IDS_WAKE_MASK);
pm_client_lock_release();
}
}
/**
* pm_client_abort_suspend() - Client-specific abort-suspend actions.
*
* This function should contain any PU-specific actions
* required for aborting a prior suspend request.
*/
void pm_client_abort_suspend(void)
{
uint32_t cpu_id = plat_my_core_pos();
uintptr_t val;
/* Enable interrupts at processor level (for current cpu) */
gicv3_cpuif_enable(plat_my_core_pos());
pm_client_lock_get();
/* Clear powerdown request */
val = read_cpu_pwrctrl_val();
val &= ~CORE_PWRDN_EN_BIT_MASK;
write_cpu_pwrctrl_val(val);
isb();
/* Disabled power down interrupt */
mmio_write_32(APU_PCIL_CORE_X_IDS_POWER_REG(cpu_id),
APU_PCIL_CORE_X_IDS_POWER_MASK);
pm_client_lock_release();
}

View file

@ -0,0 +1,529 @@
/*
* Copyright (c) 2019-2022, Xilinx, Inc. All rights reserved.
* Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
/*
* Top-level SMC handler for Versal2 power management calls and
* IPI setup functions for communication with PMC.
*/
#include <errno.h>
#include <stdbool.h>
#include "../drivers/arm/gic/v3/gicv3_private.h"
#include <common/runtime_svc.h>
#include <drivers/arm/gicv3.h>
#include <lib/psci/psci.h>
#include <plat/arm/common/plat_arm.h>
#include <plat/common/platform.h>
#include <plat_private.h>
#include "pm_api_sys.h"
#include "pm_client.h"
#include "pm_ipi.h"
#include "pm_svc_main.h"
#define MODE 0x80000000U
#define INVALID_SGI 0xFFU
#define PM_INIT_SUSPEND_CB (30U)
#define PM_NOTIFY_CB (32U)
#define EVENT_CPU_PWRDWN (4U)
#define MBOX_SGI_SHARED_IPI (7U)
/**
* upper_32_bits - return bits 32-63 of a number
* @n: the number we're accessing
*/
#define upper_32_bits(n) ((uint32_t)((n) >> 32U))
/**
* lower_32_bits - return bits 0-31 of a number
* @n: the number we're accessing
*/
#define lower_32_bits(n) ((uint32_t)((n) & 0xffffffffU))
/**
* EXTRACT_SMC_ARGS - extracts 32-bit payloads from 64-bit SMC arguments
* @pm_arg: array of 32-bit payloads
* @x: array of 64-bit SMC arguments
*/
#define EXTRACT_ARGS(pm_arg, x) \
for (uint32_t i = 0U; i < (PAYLOAD_ARG_CNT - 1U); i++) { \
if ((i % 2U) != 0U) { \
pm_arg[i] = lower_32_bits(x[(i / 2U) + 1U]); \
} else { \
pm_arg[i] = upper_32_bits(x[i / 2U]); \
} \
}
/* 1 sec of wait timeout for secondary core down */
#define PWRDWN_WAIT_TIMEOUT (1000U)
DEFINE_RENAME_SYSREG_RW_FUNCS(icc_asgi1r_el1, S3_0_C12_C11_6)
/* pm_up = true - UP, pm_up = false - DOWN */
static bool pm_up;
static uint32_t sgi = (uint32_t)INVALID_SGI;
bool pwrdwn_req_received;
static void notify_os(void)
{
plat_ic_raise_ns_sgi((int)sgi, read_mpidr_el1());
}
static uint64_t cpu_pwrdwn_req_handler(uint32_t id, uint32_t flags,
void *handle, void *cookie)
{
uint32_t cpu_id = plat_my_core_pos();
VERBOSE("Powering down CPU %d\n", cpu_id);
/* Deactivate CPU power down SGI */
plat_ic_end_of_interrupt(CPU_PWR_DOWN_REQ_INTR);
return (uint64_t) psci_cpu_off();
}
/**
* raise_pwr_down_interrupt() - Callback function to raise SGI.
* @mpidr: MPIDR for the target CPU.
*
* Raise SGI interrupt to trigger the CPU power down sequence on all the
* online secondary cores.
*/
static void raise_pwr_down_interrupt(u_register_t mpidr)
{
plat_ic_raise_el3_sgi((int)CPU_PWR_DOWN_REQ_INTR, mpidr);
}
void request_cpu_pwrdwn(void)
{
int ret;
VERBOSE("CPU power down request received\n");
/* Send powerdown request to online secondary core(s) */
ret = psci_stop_other_cores(plat_my_core_pos(), (unsigned int)PWRDWN_WAIT_TIMEOUT, raise_pwr_down_interrupt);
if (ret != (int)PSCI_E_SUCCESS) {
ERROR("Failed to powerdown secondary core(s)\n");
}
/* Clear IPI IRQ */
pm_ipi_irq_clear(primary_proc);
/* Deactivate IPI IRQ */
plat_ic_end_of_interrupt(PLAT_VERSAL_IPI_IRQ);
}
static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle,
void *cookie)
{
uint32_t payload[4] = {0};
enum pm_ret_status ret;
uint32_t ipi_status, i;
VERBOSE("Received IPI FIQ from firmware\n");
console_flush();
(void)plat_ic_acknowledge_interrupt();
/* Check status register for each IPI except PMC */
for (i = IPI_ID_APU; i <= IPI_ID_5; i++) {
ipi_status = (uint32_t)ipi_mb_enquire_status(IPI_ID_APU, i);
/* If any agent other than PMC has generated IPI FIQ then send SGI to mbox driver */
if ((ipi_status & (uint32_t)IPI_MB_STATUS_RECV_PENDING) > (uint32_t) 0) {
plat_ic_raise_ns_sgi((int)MBOX_SGI_SHARED_IPI, read_mpidr_el1());
break;
}
}
/* If PMC has not generated interrupt then end ISR */
ipi_status = (uint32_t)ipi_mb_enquire_status(IPI_ID_APU, IPI_ID_PMC);
if ((ipi_status & (uint32_t) IPI_MB_STATUS_RECV_PENDING) == (uint32_t) 0) {
plat_ic_end_of_interrupt(id);
goto end;
}
/* Handle PMC case */
ret = pm_get_callbackdata(payload, ARRAY_SIZE(payload), 0, 0);
if (ret != PM_RET_SUCCESS) {
payload[0] = (uint32_t) ret;
}
switch (payload[0]) {
case PM_INIT_SUSPEND_CB:
if (sgi != INVALID_SGI) {
notify_os();
}
break;
case PM_NOTIFY_CB:
if (sgi != INVALID_SGI) {
if (payload[2] == EVENT_CPU_PWRDWN) {
if (pwrdwn_req_received) {
pwrdwn_req_received = false;
request_cpu_pwrdwn();
(void)psci_cpu_off();
break;
} else {
/* No action needed, added for MISRA
* complaince
*/
}
pwrdwn_req_received = true;
} else {
/* No action needed, added for MISRA
* complaince
*/
}
notify_os();
} else if (payload[2] == EVENT_CPU_PWRDWN) {
request_cpu_pwrdwn();
(void)psci_cpu_off();
} else {
/* No action needed, added for MISRA
* complaince
*/
}
break;
case (uint32_t) PM_RET_ERROR_INVALID_CRC:
pm_ipi_irq_clear(primary_proc);
WARN("Invalid CRC in the payload\n");
break;
default:
pm_ipi_irq_clear(primary_proc);
WARN("Invalid IPI payload\n");
break;
}
/* Clear FIQ */
plat_ic_end_of_interrupt(id);
end:
return 0;
}
/**
* pm_register_sgi() - PM register the IPI interrupt.
* @sgi_num: SGI number to be used for communication.
* @reset: Reset to invalid SGI when reset=1.
*
* Return: On success, the initialization function must return 0.
* Any other return value will cause the framework to ignore
* the service.
*
* Update the SGI number to be used.
*
*/
int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset)
{
int32_t ret;
if (reset == 1U) {
sgi = INVALID_SGI;
ret = 0;
goto end;
}
if (sgi != INVALID_SGI) {
ret = -EBUSY;
goto end;
}
if (sgi_num >= GICV3_MAX_SGI_TARGETS) {
ret = -EINVAL;
goto end;
}
sgi = (uint32_t)sgi_num;
ret = 0;
end:
return ret;
}
/**
* pm_setup() - PM service setup.
*
* Return: On success, the initialization function must return 0.
* Any other return value will cause the framework to ignore
* the service.
*
* Initialization functions for Versal power management for
* communicaton with PMC.
*
* Called from sip_svc_setup initialization function with the
* rt_svc_init signature.
*
*/
int32_t pm_setup(void)
{
int32_t ret = 0;
pm_ipi_init(primary_proc);
pm_up = true;
/* register SGI handler for CPU power down request */
ret = request_intr_type_el3(CPU_PWR_DOWN_REQ_INTR, cpu_pwrdwn_req_handler);
if (ret != 0) {
WARN("BL31: registering SGI interrupt failed\n");
}
/*
* Enable IPI IRQ
* assume the rich OS is OK to handle callback IRQs now.
* Even if we were wrong, it would not enable the IRQ in
* the GIC.
*/
pm_ipi_irq_enable(primary_proc);
ret = request_intr_type_el3(PLAT_VERSAL_IPI_IRQ, ipi_fiq_handler);
if (ret != 0) {
WARN("BL31: registering IPI interrupt failed\n");
}
gicd_write_irouter(gicv3_driver_data->gicd_base, PLAT_VERSAL_IPI_IRQ, MODE);
/* Register for idle callback during force power down/restart */
ret = (int32_t)pm_register_notifier(primary_proc->node_id, EVENT_CPU_PWRDWN,
0x0U, 0x1U, SECURE_FLAG);
if (ret != 0) {
WARN("BL31: registering idle callback for restart/force power down failed\n");
}
return ret;
}
/**
* eemi_psci_debugfs_handler() - EEMI API invoked from PSCI.
* @api_id: identifier for the API being called.
* @pm_arg: pointer to the argument data for the API call.
* @handle: Pointer to caller's context structure.
* @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
*
* These EEMI APIs performs CPU specific power management tasks.
* These EEMI APIs are invoked either from PSCI or from debugfs in kernel.
* These calls require CPU specific processing before sending IPI request to
* Platform Management Controller. For example enable/disable CPU specific
* interrupts. This requires separate handler for these calls and may not be
* handled using common eemi handler.
*
* Return: If EEMI API found then, uintptr_t type address, else 0.
*
*/
static uintptr_t eemi_psci_debugfs_handler(uint32_t api_id, uint32_t *pm_arg,
void *handle, uint32_t security_flag)
{
enum pm_ret_status ret;
switch (api_id) {
case (uint32_t)PM_SELF_SUSPEND:
ret = pm_self_suspend(pm_arg[0], pm_arg[1], pm_arg[2],
pm_arg[3], security_flag);
SMC_RET1(handle, (u_register_t)ret);
case (uint32_t)PM_FORCE_POWERDOWN:
ret = pm_force_powerdown(pm_arg[0], (uint8_t)pm_arg[1], security_flag);
SMC_RET1(handle, (u_register_t)ret);
case (uint32_t)PM_REQ_SUSPEND:
ret = pm_req_suspend(pm_arg[0], (uint8_t)pm_arg[1], pm_arg[2],
pm_arg[3], security_flag);
SMC_RET1(handle, (u_register_t)ret);
case (uint32_t)PM_ABORT_SUSPEND:
ret = pm_abort_suspend(pm_arg[0], security_flag);
SMC_RET1(handle, (u_register_t)ret);
case (uint32_t)PM_SYSTEM_SHUTDOWN:
ret = pm_system_shutdown(pm_arg[0], pm_arg[1], security_flag);
SMC_RET1(handle, (u_register_t)ret);
default:
return (uintptr_t)0;
}
}
/**
* TF_A_specific_handler() - SMC handler for TF-A specific functionality.
* @api_id: identifier for the API being called.
* @pm_arg: pointer to the argument data for the API call.
* @handle: Pointer to caller's context structure.
* @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
*
* These EEMI calls performs functionality that does not require
* IPI transaction. The handler ends in TF-A and returns requested data to
* kernel from TF-A.
*
* Return: If TF-A specific API found then, uintptr_t type address, else 0
*
*/
static uintptr_t TF_A_specific_handler(uint32_t api_id, uint32_t *pm_arg,
void *handle, uint32_t security_flag)
{
switch (api_id) {
case TF_A_FEATURE_CHECK:
{
enum pm_ret_status ret;
uint32_t result[PAYLOAD_ARG_CNT] = {0U};
ret = eemi_feature_check(pm_arg[0], result);
SMC_RET1(handle, (uint64_t)ret | ((uint64_t)result[0] << 32U));
}
case TF_A_PM_REGISTER_SGI:
{
int32_t ret;
ret = pm_register_sgi(pm_arg[0], pm_arg[1]);
if (ret != 0) {
SMC_RET1(handle, (uint32_t)PM_RET_ERROR_ARGS);
}
SMC_RET1(handle, (uint32_t)PM_RET_SUCCESS);
}
case PM_GET_CALLBACK_DATA:
{
uint32_t result[4] = {0};
enum pm_ret_status ret;
ret = pm_get_callbackdata(result, ARRAY_SIZE(result), security_flag, 1U);
if (ret != PM_RET_SUCCESS) {
result[0] = (uint32_t) ret;
}
SMC_RET2(handle,
(uint64_t)result[0] | ((uint64_t)result[1] << 32U),
(uint64_t)result[2] | ((uint64_t)result[3] << 32U));
}
case PM_GET_TRUSTZONE_VERSION:
SMC_RET1(handle, ((uint64_t)PM_RET_SUCCESS) |
(((uint64_t)TZ_VERSION) << 32U));
default:
return (uintptr_t)0U;
}
}
/**
* eemi_api_handler() - Prepare EEMI payload and perform IPI transaction.
* @api_id: identifier for the API being called.
* @pm_arg: pointer to the argument data for the API call.
* @handle: Pointer to caller's context structure.
* @security_flag: SECURE_FLAG or NON_SECURE_FLAG.
*
* EEMI - Embedded Energy Management Interface is AMD-Xilinx proprietary
* protocol to allow communication between power management controller and
* different processing clusters.
*
* This handler prepares EEMI protocol payload received from kernel and performs
* IPI transaction.
*
* Return: If EEMI API found then, uintptr_t type address, else 0
*/
static uintptr_t eemi_api_handler(uint32_t api_id, const uint32_t *pm_arg,
void *handle, uint32_t security_flag)
{
enum pm_ret_status ret;
uint32_t buf[RET_PAYLOAD_ARG_CNT] = {0U};
uint32_t payload[PAYLOAD_ARG_CNT] = {0U};
uint32_t module_id;
module_id = (api_id & MODULE_ID_MASK) >> 8U;
PM_PACK_PAYLOAD7(payload, module_id, security_flag, api_id,
pm_arg[0], pm_arg[1], pm_arg[2], pm_arg[3],
pm_arg[4], pm_arg[5]);
ret = pm_ipi_send_sync(primary_proc, payload, (uint32_t *)buf,
RET_PAYLOAD_ARG_CNT);
SMC_RET4(handle, (uint64_t)ret | ((uint64_t)buf[0] << 32U),
(uint64_t)buf[1] | ((uint64_t)buf[2] << 32U),
(uint64_t)buf[3] | ((uint64_t)buf[4] << 32U),
(uint64_t)buf[5]);
}
/**
* pm_smc_handler() - SMC handler for PM-API calls coming from EL1/EL2.
* @smc_fid: Function Identifier.
* @x1: SMC64 Arguments from kernel.
* @x2: SMC64 Arguments from kernel.
* @x3: SMC64 Arguments from kernel (upper 32-bits).
* @x4: Unused.
* @cookie: Unused.
* @handle: Pointer to caller's context structure.
* @flags: SECURE_FLAG or NON_SECURE_FLAG.
*
* Return: Unused.
*
* Determines that smc_fid is valid and supported PM SMC Function ID from the
* list of pm_api_ids, otherwise completes the request with
* the unknown SMC Function ID.
*
* The SMC calls for PM service are forwarded from SIP Service SMC handler
* function with rt_svc_handle signature.
*
*/
uint64_t pm_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3,
uint64_t x4, const void *cookie, void *handle, uint64_t flags)
{
uintptr_t ret;
uint32_t pm_arg[PAYLOAD_ARG_CNT] = {0};
uint32_t security_flag = NON_SECURE_FLAG;
uint32_t api_id;
bool status = false, status_tmp = false;
uint64_t x[4] = {x1, x2, x3, x4};
/* Handle case where PM wasn't initialized properly */
if (pm_up == false) {
SMC_RET1(handle, SMC_UNK);
}
/*
* Mark BIT24 payload (i.e 1st bit of pm_arg[3] ) as secure (0)
* if smc called is secure
*
* Add redundant macro call to immune the code from glitches
*/
SECURE_REDUNDANT_CALL(status, status_tmp, is_caller_secure, flags);
if ((status != false) && (status_tmp != false)) {
security_flag = SECURE_FLAG;
}
if ((smc_fid & FUNCID_NUM_MASK) == PASS_THROUGH_FW_CMD_ID) {
api_id = lower_32_bits(x[0]);
EXTRACT_ARGS(pm_arg, x);
return eemi_api_handler(api_id, pm_arg, handle, security_flag);
}
pm_arg[0] = (uint32_t)x1;
pm_arg[1] = (uint32_t)(x1 >> 32U);
pm_arg[2] = (uint32_t)x2;
pm_arg[3] = (uint32_t)(x2 >> 32U);
pm_arg[4] = (uint32_t)x3;
(void)(x4);
api_id = smc_fid & FUNCID_NUM_MASK;
ret = eemi_psci_debugfs_handler(api_id, pm_arg, handle, (uint32_t)flags);
if (ret != (uintptr_t)0)
goto error;
ret = TF_A_specific_handler(api_id, pm_arg, handle, security_flag);
if (ret != (uintptr_t)0)
goto error;
error:
return ret;
}

View file

@ -1,7 +1,7 @@
/* /*
* Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved. * Copyright (c) 2018-2019, Arm Limited and Contributors. All rights reserved.
* Copyright (c) 2018-2022, Xilinx, Inc. All rights reserved. * Copyright (c) 2018-2022, Xilinx, Inc. All rights reserved.
* Copyright (c) 2022-2024, Advanced Micro Devices, Inc. All rights reserved. * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -27,7 +27,7 @@
/* SiP Service Calls version numbers */ /* SiP Service Calls version numbers */
#define SIP_SVC_VERSION_MAJOR (0U) #define SIP_SVC_VERSION_MAJOR (0U)
#define SIP_SVC_VERSION_MINOR (1U) #define SIP_SVC_VERSION_MINOR (2U)
/* These macros are used to identify PM calls from the SMC function ID */ /* These macros are used to identify PM calls from the SMC function ID */
#define SIP_FID_MASK GENMASK(23, 16) #define SIP_FID_MASK GENMASK(23, 16)

View file

@ -1,6 +1,6 @@
/* /*
* Copyright (c) 2019, Xilinx, Inc. All rights reserved. * Copyright (c) 2019, Xilinx, Inc. All rights reserved.
* Copyright (c) 2022-2023, Advanced Micro Devices, Inc. All rights reserved. * Copyright (c) 2022-2025, Advanced Micro Devices, Inc. All rights reserved.
* *
* SPDX-License-Identifier: BSD-3-Clause * SPDX-License-Identifier: BSD-3-Clause
*/ */
@ -132,6 +132,18 @@ enum pm_device_node_idx {
XPM_NODEIDX_DEV_TTC_2 = 0x26, XPM_NODEIDX_DEV_TTC_2 = 0x26,
XPM_NODEIDX_DEV_TTC_3 = 0x27, XPM_NODEIDX_DEV_TTC_3 = 0x27,
XPM_NODEIDX_DEV_SWDT_LPD = 0x28, XPM_NODEIDX_DEV_SWDT_LPD = 0x28,
XPM_NODEIDX_DEV_I2C_2 = 0x117,
XPM_NODEIDX_DEV_I2C_3 = 0x118,
XPM_NODEIDX_DEV_I2C_4 = 0x119,
XPM_NODEIDX_DEV_I2C_5 = 0x11A,
XPM_NODEIDX_DEV_I2C_6 = 0x11B,
XPM_NODEIDX_DEV_I2C_7 = 0x11C,
XPM_NODEIDX_DEV_CAN_FD_2 = 0x11D,
XPM_NODEIDX_DEV_CAN_FD_3 = 0x11E,
XPM_NODEIDX_DEV_TTC_4 = 0x11F,
XPM_NODEIDX_DEV_TTC_5 = 0x120,
XPM_NODEIDX_DEV_TTC_6 = 0x121,
XPM_NODEIDX_DEV_TTC_7 = 0x122,
/* FPD Peripheral devices */ /* FPD Peripheral devices */
XPM_NODEIDX_DEV_SWDT_FPD = 0x29, XPM_NODEIDX_DEV_SWDT_FPD = 0x29,
@ -237,6 +249,11 @@ enum pm_device_node_idx {
XPM_NODEIDX_DEV_FPD_SWDT_2 = 0xDD, XPM_NODEIDX_DEV_FPD_SWDT_2 = 0xDD,
XPM_NODEIDX_DEV_FPD_SWDT_3 = 0xDE, XPM_NODEIDX_DEV_FPD_SWDT_3 = 0xDE,
#endif #endif
#if defined(PLAT_versal2)
XPM_NODEIDX_DEV_USB_1 = 0xD7,
#endif
XPM_NODEIDX_DEV_MAX, XPM_NODEIDX_DEV_MAX,
}; };