diff --git a/plat/amd/versal2/bl31_setup.c b/plat/amd/versal2/bl31_setup.c index 68d19a7a3..05e4c96ce 100644 --- a/plat/amd/versal2/bl31_setup.c +++ b/plat/amd/versal2/bl31_setup.c @@ -170,16 +170,19 @@ int request_intr_type_el3(uint32_t id, interrupt_type_handler_t handler) { static uint32_t index; uint32_t i; + int32_t ret = 0; /* Validate 'handler' and 'id' parameters */ if ((handler == NULL) || (index >= MAX_INTR_EL3)) { - return -EINVAL; + ret = -EINVAL; + goto exit_label; } /* Check if a handler has already been registered */ for (i = 0; i < index; i++) { if (id == type_el3_interrupt_table[i].id) { - return -EALREADY; + ret = -EALREADY; + goto exit_label; } } @@ -188,7 +191,8 @@ int request_intr_type_el3(uint32_t id, interrupt_type_handler_t handler) index++; - return 0; +exit_label: + return ret; } static uint64_t rdo_el3_interrupt_handler(uint32_t id, uint32_t flags, diff --git a/plat/amd/versal2/include/platform_def.h b/plat/amd/versal2/include/platform_def.h index 42c9b08a9..be1e3518e 100644 --- a/plat/amd/versal2/include/platform_def.h +++ b/plat/amd/versal2/include/platform_def.h @@ -24,6 +24,9 @@ #define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER_COUNT * PLATFORM_CORE_COUNT_PER_CLUSTER) +#define E_INVALID_CORE_COUNT -1 +#define E_INVALID_CLUSTER_COUNT -3 + #define PLAT_MAX_PWR_LVL U(2) #define PLAT_MAX_RET_STATE U(1) #define PLAT_MAX_OFF_STATE U(2) diff --git a/plat/amd/versal2/plat_psci.c b/plat/amd/versal2/plat_psci.c index e8dc5d3d6..55842cc7b 100644 --- a/plat/amd/versal2/plat_psci.c +++ b/plat/amd/versal2/plat_psci.c @@ -40,12 +40,14 @@ static int32_t zynqmp_nopmu_pwr_domain_on(u_register_t mpidr) int32_t cluster = cpu_id / PLATFORM_CORE_COUNT_PER_CLUSTER; uintptr_t apu_cluster_base = 0, apu_pcli_base, apu_pcli_cluster = 0; uintptr_t rst_apu_cluster = PSX_CRF + RST_APU0_OFFSET + ((uint64_t)cluster * 0x4U); + int32_t ret = PSCI_E_SUCCESS; VERBOSE("%s: mpidr: 0x%lx, cpuid: %x, cpu: %x, cluster: %x\n", __func__, mpidr, cpu_id, cpu, cluster); if (cpu_id == -1) { - return PSCI_E_INTERN_FAIL; + ret = PSCI_E_INTERN_FAIL; + goto exit_label; } if (cluster > 3U) { @@ -84,7 +86,8 @@ static int32_t zynqmp_nopmu_pwr_domain_on(u_register_t mpidr) mmio_write_32(apu_pcli_base + PCLI_PSTATE_OFFSET, PCLI_PSTATE_VAL_CLEAR); mmio_write_32(apu_pcli_base + PCLI_PREQ_OFFSET, PREQ_CHANGE_REQUEST); - return PSCI_E_SUCCESS; +exit_label: + return ret; } static void zynqmp_nopmu_pwr_domain_off(const psci_power_state_t *target_state) @@ -101,13 +104,15 @@ static void __dead2 zynqmp_nopmu_system_reset(void) static int32_t zynqmp_validate_ns_entrypoint(uint64_t ns_entrypoint) { + int32_t ret = PSCI_E_INVALID_ADDRESS; + VERBOSE("Validate ns_entry point %lx\n", ns_entrypoint); if ((ns_entrypoint) != 0U) { - return PSCI_E_SUCCESS; - } else { - return PSCI_E_INVALID_ADDRESS; + ret = PSCI_E_SUCCESS; } + + return ret; } static void zynqmp_pwr_domain_on_finish(const psci_power_state_t *target_state) diff --git a/plat/amd/versal2/plat_topology.c b/plat/amd/versal2/plat_topology.c index 076313981..434e08a0b 100644 --- a/plat/amd/versal2/plat_topology.c +++ b/plat/amd/versal2/plat_topology.c @@ -41,6 +41,7 @@ const uint8_t *plat_get_power_domain_tree_desc(void) int32_t plat_core_pos_by_mpidr(u_register_t mpidr) { uint32_t cluster_id, cpu_id; + int32_t ret = 0; mpidr &= MPIDR_AFFINITY_MASK; @@ -48,7 +49,8 @@ int32_t plat_core_pos_by_mpidr(u_register_t mpidr) cpu_id = MPIDR_AFFLVL1_VAL(mpidr); if (cluster_id >= PLATFORM_CLUSTER_COUNT) { - return -3; + ret = E_INVALID_CLUSTER_COUNT; + goto exit_label; } /* @@ -56,8 +58,11 @@ int32_t plat_core_pos_by_mpidr(u_register_t mpidr) * one of the two clusters present on the platform. */ if (cpu_id >= PLATFORM_CORE_COUNT_PER_CLUSTER) { - return -1; + ret = E_INVALID_CORE_COUNT; + } else { + ret = (cpu_id + (cluster_id * PLATFORM_CORE_COUNT_PER_CLUSTER)); } - return (cpu_id + (cluster_id * PLATFORM_CORE_COUNT_PER_CLUSTER)); +exit_label: + return ret; } diff --git a/plat/amd/versal2/scmi.c b/plat/amd/versal2/scmi.c index eec8205f9..0d384a5e8 100644 --- a/plat/amd/versal2/scmi.c +++ b/plat/amd/versal2/scmi.c @@ -288,13 +288,16 @@ int32_t plat_scmi_clock_rates_array(unsigned int agent_id, unsigned int scmi_id, uint32_t start_idx) { const struct scmi_clk *clock = clk_find(agent_id, scmi_id); + int32_t ret = SCMI_SUCCESS; if (clock == NULL) { - return SCMI_NOT_FOUND; + ret = SCMI_NOT_FOUND; + goto exit_label; } if (start_idx > 0U) { - return SCMI_OUT_OF_RANGE; + ret = SCMI_OUT_OF_RANGE; + goto exit_label; } if (array == NULL) { @@ -304,10 +307,11 @@ int32_t plat_scmi_clock_rates_array(unsigned int agent_id, unsigned int scmi_id, VERBOSE("SCMI: CLK: id: %d, clk_name: %s, get_rate %lu\n", scmi_id, clock->name, *array); } else { - return SCMI_GENERIC_ERROR; + ret = SCMI_GENERIC_ERROR; } - return SCMI_SUCCESS; +exit_label: + return ret; } unsigned long plat_scmi_clock_get_rate(unsigned int agent_id, unsigned int scmi_id) @@ -529,12 +533,13 @@ size_t plat_scmi_pd_count(unsigned int agent_id) const char *plat_scmi_pd_get_name(unsigned int agent_id, unsigned int pd_id) { const struct scmi_pd *pd = find_pd(agent_id, pd_id); + const char *ret = NULL; - if (pd == NULL) { - return NULL; + if (pd != NULL) { + ret = pd->name; } - return pd->name; + return ret; } unsigned int plat_scmi_pd_statistics(unsigned int agent_id, unsigned long *pd_id) @@ -550,14 +555,15 @@ unsigned int plat_scmi_pd_get_attributes(unsigned int agent_id, unsigned int pd_ unsigned int plat_scmi_pd_get_state(unsigned int agent_id, unsigned int pd_id) { const struct scmi_pd *pd = find_pd(agent_id, pd_id); + uint32_t ret = SCMI_NOT_SUPPORTED; - if (pd == NULL) { - return SCMI_NOT_SUPPORTED; + if (pd != NULL) { + NOTICE("SCMI: PD: get id: %d, state: %x\n", pd_id, pd->state); + + ret = pd->state; } - NOTICE("SCMI: PD: get id: %d, state: %x\n", pd_id, pd->state); - - return pd->state; + return ret; } int32_t plat_scmi_pd_set_state(unsigned int agent_id, unsigned int flags, unsigned int pd_id, @@ -568,14 +574,15 @@ int32_t plat_scmi_pd_set_state(unsigned int agent_id, unsigned int flags, unsign if (pd == NULL) { ret = SCMI_NOT_SUPPORTED; - } else { - - NOTICE("SCMI: PD: set id: %d, orig state: %x, new state: %x, flags: %x\n", - pd_id, pd->state, state, flags); - - pd->state = state; + goto exit_label; } + NOTICE("SCMI: PD: set id: %d, orig state: %x, new state: %x, flags: %x\n", + pd_id, pd->state, state, flags); + + pd->state = state; + +exit_label: return ret; } @@ -638,8 +645,9 @@ void init_scmi_server(void) size_t i; int32_t ret; - for (i = 0U; i < ARRAY_SIZE(scmi_channel); i++) + for (i = 0U; i < ARRAY_SIZE(scmi_channel); i++) { scmi_smt_init_agent_channel(&scmi_channel[i]); + } INFO("SCMI: Server initialized\n"); @@ -649,12 +657,14 @@ void init_scmi_server(void) for (i = 0U; i < ARRAY_SIZE(scmi0_clock); i++) { /* Keep i2c on 100MHz to calculate rates properly */ - if ((i >= CLK_I2C0_0) && (i <= CLK_I2C7_0)) + if ((i >= CLK_I2C0_0) && (i <= CLK_I2C7_0)) { continue; + } /* Keep UFS clocks to default values to get the expected rates */ - if (i >= CLK_UFS0_0 && i <= CLK_UFS0_2) + if ((i >= CLK_UFS0_0) && (i <= CLK_UFS0_2)) { continue; + } /* * SPP supports multiple versions. diff --git a/plat/xilinx/common/ipi_mailbox_service/ipi_mailbox_svc.c b/plat/xilinx/common/ipi_mailbox_service/ipi_mailbox_svc.c index cdff3c8e7..18826699e 100644 --- a/plat/xilinx/common/ipi_mailbox_service/ipi_mailbox_svc.c +++ b/plat/xilinx/common/ipi_mailbox_service/ipi_mailbox_svc.c @@ -110,8 +110,9 @@ uint64_t ipi_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, disable_interrupt = ((x3 & IPI_SMC_ENQUIRY_DIRQ_MASK) != 0U); ret = ipi_mb_enquire_status(ipi_local_id, ipi_remote_id); - if ((((uint32_t)ret & IPI_MB_STATUS_RECV_PENDING) > 0U) && disable_interrupt) + if ((((uint32_t)ret & IPI_MB_STATUS_RECV_PENDING) > 0U) && disable_interrupt) { ipi_mb_disable_irq(ipi_local_id, ipi_remote_id); + } SMC_RET1(handle, ret); } case IPI_MAILBOX_NOTIFY: @@ -128,8 +129,9 @@ uint64_t ipi_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, enable_interrupt = ((x3 & IPI_SMC_ACK_EIRQ_MASK) != 0U); ipi_mb_ack(ipi_local_id, ipi_remote_id); - if (enable_interrupt != 0) + if (enable_interrupt != 0) { ipi_mb_enable_irq(ipi_local_id, ipi_remote_id); + } SMC_RET1(handle, 0); } case IPI_MAILBOX_ENABLE_IRQ: diff --git a/plat/xilinx/common/plat_startup.c b/plat/xilinx/common/plat_startup.c index 9f829c9f6..c5c52a881 100644 --- a/plat/xilinx/common/plat_startup.c +++ b/plat/xilinx/common/plat_startup.c @@ -123,14 +123,17 @@ static uint32_t get_xbl_ss(const struct xbl_partition *partition) static uint32_t get_xbl_endian(const struct xbl_partition *partition) { uint64_t flags = partition->flags & XBL_FLAGS_ENDIAN_MASK; + uint32_t spsr_value = 0U; flags >>= XBL_FLAGS_ENDIAN_SHIFT; if (flags == XBL_FLAGS_ENDIAN_BE) { - return SPSR_E_BIG; + spsr_value = SPSR_E_BIG; } else { - return SPSR_E_LITTLE; + spsr_value = SPSR_E_LITTLE; } + + return spsr_value; } /** @@ -182,10 +185,12 @@ enum xbl_handoff xbl_handover(entry_point_info_t *bl32, uint64_t handoff_addr) { const struct xbl_handoff_params *HandoffParams; + enum xbl_handoff xbl_status = XBL_HANDOFF_SUCCESS; if (handoff_addr == 0U) { WARN("BL31: No handoff structure passed\n"); - return XBL_HANDOFF_NO_STRUCT; + xbl_status = XBL_HANDOFF_NO_STRUCT; + goto exit_label; } HandoffParams = (struct xbl_handoff_params *)handoff_addr; @@ -194,7 +199,8 @@ enum xbl_handoff xbl_handover(entry_point_info_t *bl32, (HandoffParams->magic[2] != (uint8_t)'N') || (HandoffParams->magic[3] != (uint8_t)'X')) { ERROR("BL31: invalid handoff structure at %" PRIx64 "\n", handoff_addr); - return XBL_HANDOFF_INVAL_STRUCT; + xbl_status = XBL_HANDOFF_INVAL_STRUCT; + goto exit_label; } VERBOSE("BL31: TF-A handoff params at:0x%" PRIx64 ", entries:%u\n", @@ -202,7 +208,8 @@ enum xbl_handoff xbl_handover(entry_point_info_t *bl32, if (HandoffParams->num_entries > XBL_MAX_PARTITIONS) { ERROR("BL31: TF-A handoff params: too many partitions (%u/%u)\n", HandoffParams->num_entries, XBL_MAX_PARTITIONS); - return XBL_HANDOFF_TOO_MANY_PARTS; + xbl_status = XBL_HANDOFF_TOO_MANY_PARTS; + goto exit_label; } /* @@ -304,5 +311,6 @@ enum xbl_handoff xbl_handover(entry_point_info_t *bl32, } } - return XBL_HANDOFF_SUCCESS; +exit_label: + return xbl_status; } diff --git a/plat/xilinx/common/pm_service/pm_api_sys.c b/plat/xilinx/common/pm_service/pm_api_sys.c index 627266d7b..9af8bb264 100644 --- a/plat/xilinx/common/pm_service/pm_api_sys.c +++ b/plat/xilinx/common/pm_service/pm_api_sys.c @@ -149,10 +149,11 @@ enum pm_ret_status pm_self_suspend(uint32_t nid, uint32_t payload[PAYLOAD_ARG_CNT]; uint32_t cpuid = plat_my_core_pos(); const struct pm_proc *proc = pm_get_proc(cpuid); + enum pm_ret_status ret = PM_RET_ERROR_INTERNAL; if (proc == NULL) { WARN("Failed to get proc %d\n", cpuid); - return PM_RET_ERROR_INTERNAL; + goto exit_label; } /* @@ -165,7 +166,10 @@ enum pm_ret_status pm_self_suspend(uint32_t nid, PM_PACK_PAYLOAD6(payload, LIBPM_MODULE_ID, flag, PM_SELF_SUSPEND, proc->node_id, latency, state, address, (address >> 32)); - return pm_ipi_send_sync(proc, payload, NULL, 0); + ret = pm_ipi_send_sync(proc, payload, NULL, 0); + +exit_label: + return ret; } /** @@ -215,15 +219,18 @@ enum pm_ret_status pm_req_suspend(uint32_t target, uint8_t ack, uint32_t flag) { uint32_t payload[PAYLOAD_ARG_CNT]; + enum pm_ret_status ret = PM_RET_SUCCESS; /* Send request to the PMU */ PM_PACK_PAYLOAD4(payload, LIBPM_MODULE_ID, flag, PM_REQ_SUSPEND, target, latency, state); if (ack == (uint32_t)IPI_BLOCKING) { - return pm_ipi_send_sync(primary_proc, payload, NULL, 0); + ret = pm_ipi_send_sync(primary_proc, payload, NULL, 0); } else { - return pm_ipi_send(primary_proc, payload); + ret = pm_ipi_send(primary_proc, payload); } + + return ret; } /** @@ -273,15 +280,15 @@ enum pm_ret_status pm_req_wakeup(uint32_t target, uint32_t set_address, enum pm_ret_status pm_get_callbackdata(uint32_t *data, size_t count, uint32_t flag, uint32_t ack) { enum pm_ret_status ret = PM_RET_SUCCESS; + /* Return if interrupt is not from PMU */ - if (pm_ipi_irq_status(primary_proc) == 0U) { - return ret; - } + if (pm_ipi_irq_status(primary_proc) != 0U) { - ret = pm_ipi_buff_read_callb(data, count); + ret = pm_ipi_buff_read_callb(data, count); - if (ack != 0U) { - pm_ipi_irq_clear(primary_proc); + if (ack != 0U) { + pm_ipi_irq_clear(primary_proc); + } } return ret; @@ -302,16 +309,19 @@ enum pm_ret_status pm_force_powerdown(uint32_t target, uint8_t ack, uint32_t flag) { uint32_t payload[PAYLOAD_ARG_CNT]; + enum pm_ret_status ret = PM_RET_SUCCESS; /* Send request to the PMC */ PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, flag, PM_FORCE_POWERDOWN, target, ack); if (ack == (uint32_t)IPI_BLOCKING) { - return pm_ipi_send_sync(primary_proc, payload, NULL, 0); + ret = pm_ipi_send_sync(primary_proc, payload, NULL, 0); } else { - return pm_ipi_send(primary_proc, payload); + ret = pm_ipi_send(primary_proc, payload); } + + return ret; } /** @@ -328,18 +338,22 @@ enum pm_ret_status pm_system_shutdown(uint32_t type, uint32_t subtype, uint32_t flag) { uint32_t payload[PAYLOAD_ARG_CNT]; + enum pm_ret_status ret = PM_RET_SUCCESS; if (type == XPM_SHUTDOWN_TYPE_SETSCOPE_ONLY) { /* Setting scope for subsequent PSCI reboot or shutdown */ pm_shutdown_scope = subtype; - return PM_RET_SUCCESS; + goto exit_label; } /* Send request to the PMC */ PM_PACK_PAYLOAD3(payload, LIBPM_MODULE_ID, flag, PM_SYSTEM_SHUTDOWN, type, subtype); - return pm_ipi_send_non_blocking(primary_proc, payload); + ret = pm_ipi_send_non_blocking(primary_proc, payload); + +exit_label: + return ret; } /** @@ -412,16 +426,19 @@ enum pm_ret_status pm_feature_check(uint32_t api_id, uint32_t *ret_payload, { uint32_t payload[PAYLOAD_ARG_CNT]; uint32_t module_id; + enum pm_ret_status ret; /* Return version of API which are implemented in TF-A only */ switch (api_id) { case PM_GET_CALLBACK_DATA: case PM_GET_TRUSTZONE_VERSION: ret_payload[0] = PM_API_VERSION_2; - return PM_RET_SUCCESS; + ret = PM_RET_SUCCESS; + goto exit_label; case TF_A_PM_REGISTER_SGI: ret_payload[0] = PM_API_BASE_VERSION; - return PM_RET_SUCCESS; + ret = PM_RET_SUCCESS; + goto exit_label; default: break; } @@ -433,12 +450,17 @@ enum pm_ret_status pm_feature_check(uint32_t api_id, uint32_t *ret_payload, * If module_id is 0, then we consider it LIBPM module as default id */ if ((module_id > 0U) && (module_id != LIBPM_MODULE_ID)) { - return PM_RET_SUCCESS; + ret = PM_RET_SUCCESS; + goto exit_label; } PM_PACK_PAYLOAD2(payload, LIBPM_MODULE_ID, flag, - PM_FEATURE_CHECK, api_id); - return pm_ipi_send_sync(primary_proc, payload, ret_payload, RET_PAYLOAD_ARG_CNT); + PM_FEATURE_CHECK, api_id); + ret = pm_ipi_send_sync(primary_proc, payload, ret_payload, RET_PAYLOAD_ARG_CNT); + +exit_label: + return ret; + } /** diff --git a/plat/xilinx/common/pm_service/pm_ipi.c b/plat/xilinx/common/pm_service/pm_ipi.c index e12e74d80..bf1fd550a 100644 --- a/plat/xilinx/common/pm_service/pm_ipi.c +++ b/plat/xilinx/common/pm_service/pm_ipi.c @@ -294,14 +294,17 @@ void pm_ipi_irq_clear(const struct pm_proc *proc) uint32_t pm_ipi_irq_status(const struct pm_proc *proc) { int32_t ret; + int32_t result = 0; ret = ipi_mb_enquire_status(proc->ipi->local_ipi_id, proc->ipi->remote_ipi_id); if (((uint32_t)ret & IPI_MB_STATUS_RECV_PENDING) != 0U) { - return 1; + result = 1; } else { - return 0; + result = 0; } + + return result; } #if IPI_CRC_CHECK diff --git a/plat/xilinx/common/pm_service/pm_svc_main.c b/plat/xilinx/common/pm_service/pm_svc_main.c index f1484d07a..237761a09 100644 --- a/plat/xilinx/common/pm_service/pm_svc_main.c +++ b/plat/xilinx/common/pm_service/pm_svc_main.c @@ -155,7 +155,7 @@ static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle, ipi_status = ipi_mb_enquire_status(IPI_ID_APU, IPI_ID_PMC); if (((uint32_t)ipi_status & IPI_MB_STATUS_RECV_PENDING) == 0U) { plat_ic_end_of_interrupt(id); - return 0; + goto exit_label; } /* Handle PMC case */ @@ -202,6 +202,7 @@ static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle, /* Clear FIQ */ plat_ic_end_of_interrupt(id); +exit_label: return 0; } @@ -219,21 +220,19 @@ static uint64_t ipi_fiq_handler(uint32_t id, uint32_t flags, void *handle, */ int32_t pm_register_sgi(uint32_t sgi_num, uint32_t reset) { + int32_t ret = 0; + if (reset == 1U) { sgi = INVALID_SGI; - return 0; + } else if (sgi != INVALID_SGI) { + ret = -EBUSY; + } else if (sgi_num >= GICV3_MAX_SGI_TARGETS) { + ret = -EINVAL; + } else { + sgi = (uint32_t)sgi_num; } - if (sgi != INVALID_SGI) { - return -EBUSY; - } - - if (sgi_num >= GICV3_MAX_SGI_TARGETS) { - return -EINVAL; - } - - sgi = (uint32_t)sgi_num; - return 0; + return ret; } /** diff --git a/plat/xilinx/common/versal.c b/plat/xilinx/common/versal.c index 7c29bae33..dc0ae1042 100644 --- a/plat/xilinx/common/versal.c +++ b/plat/xilinx/common/versal.c @@ -25,12 +25,17 @@ */ int32_t plat_is_smccc_feature_available(u_register_t fid) { + int32_t ret = 0; + switch (fid) { case SMCCC_ARCH_SOC_ID: - return SMC_ARCH_CALL_SUCCESS; + ret = SMC_ARCH_CALL_SUCCESS; + break; default: - return SMC_ARCH_CALL_NOT_SUPPORTED; + ret = SMC_ARCH_CALL_NOT_SUPPORTED; } + + return ret; } /** diff --git a/plat/xilinx/versal/bl31_versal_setup.c b/plat/xilinx/versal/bl31_versal_setup.c index 54badf511..befe36c65 100644 --- a/plat/xilinx/versal/bl31_versal_setup.c +++ b/plat/xilinx/versal/bl31_versal_setup.c @@ -151,16 +151,19 @@ int request_intr_type_el3(uint32_t id, interrupt_type_handler_t handler) { static uint32_t index; uint32_t i; + int32_t ret = 0; /* Validate 'handler' and 'id' parameters */ if ((handler == NULL) || (index >= MAX_INTR_EL3)) { - return -EINVAL; + ret = -EINVAL; + goto exit_label; } /* Check if a handler has already been registered */ for (i = 0; i < index; i++) { if (id == type_el3_interrupt_table[i].id) { - return -EALREADY; + ret = -EALREADY; + goto exit_label; } } @@ -169,7 +172,8 @@ int request_intr_type_el3(uint32_t id, interrupt_type_handler_t handler) index++; - return 0; +exit_label: + return ret; } static uint64_t rdo_el3_interrupt_handler(uint32_t id, uint32_t flags, @@ -178,6 +182,7 @@ static uint64_t rdo_el3_interrupt_handler(uint32_t id, uint32_t flags, (void)id; uint32_t intr_id; uint32_t i; + uint64_t ret = 0; interrupt_type_handler_t handler = NULL; intr_id = plat_ic_get_pending_interrupt_id(); @@ -189,10 +194,10 @@ static uint64_t rdo_el3_interrupt_handler(uint32_t id, uint32_t flags, } if (handler != NULL) { - return handler(intr_id, flags, handle, cookie); + ret = handler(intr_id, flags, handle, cookie); } - return 0; + return ret; } void bl31_platform_setup(void) diff --git a/plat/xilinx/versal/include/versal_def.h b/plat/xilinx/versal/include/versal_def.h index f7149c770..c5bcc95f4 100644 --- a/plat/xilinx/versal/include/versal_def.h +++ b/plat/xilinx/versal/include/versal_def.h @@ -49,10 +49,10 @@ /******************************************************************************* * memory map related constants ******************************************************************************/ -#define DEVICE0_BASE 0xFF000000 -#define DEVICE0_SIZE 0x00E00000 -#define DEVICE1_BASE 0xF9000000 -#define DEVICE1_SIZE 0x00800000 +#define DEVICE0_BASE U(0xFF000000) +#define DEVICE0_SIZE U(0x00E00000) +#define DEVICE1_BASE U(0xF9000000) +#define DEVICE1_SIZE U(0x00800000) /******************************************************************************* * IRQ constants @@ -63,16 +63,16 @@ /******************************************************************************* * CCI-400 related constants ******************************************************************************/ -#define PLAT_ARM_CCI_BASE 0xFD000000 -#define PLAT_ARM_CCI_SIZE 0x00100000 +#define PLAT_ARM_CCI_BASE U(0xFD000000) +#define PLAT_ARM_CCI_SIZE U(0x00100000) #define PLAT_ARM_CCI_CLUSTER0_SL_IFACE_IX 4 #define PLAT_ARM_CCI_CLUSTER1_SL_IFACE_IX 5 /******************************************************************************* * UART related constants ******************************************************************************/ -#define VERSAL_UART0_BASE 0xFF000000 -#define VERSAL_UART1_BASE 0xFF010000 +#define VERSAL_UART0_BASE U(0xFF000000) +#define VERSAL_UART1_BASE U(0xFF010000) #if CONSOLE_IS(pl011) || CONSOLE_IS(dtb) # define UART_BASE VERSAL_UART0_BASE @@ -115,8 +115,8 @@ #define ACTLR_EL3_CPUACTLR_BIT (1 << 0) /* For cpu reset APU space here too 0xFE5F1000 CRF_APB*/ -#define CRF_BASE 0xFD1A0000 -#define CRF_SIZE 0x00600000 +#define CRF_BASE U(0xFD1A0000) +#define CRF_SIZE U(0x00600000) /* CRF registers and bitfields */ #define CRF_RST_APU (CRF_BASE + 0X00000300) diff --git a/plat/xilinx/versal/plat_psci.c b/plat/xilinx/versal/plat_psci.c index f160563bd..b9762675f 100644 --- a/plat/xilinx/versal/plat_psci.c +++ b/plat/xilinx/versal/plat_psci.c @@ -28,16 +28,17 @@ static int32_t versal_pwr_domain_on(u_register_t mpidr) { int32_t cpu_id = plat_core_pos_by_mpidr(mpidr); const struct pm_proc *proc; + int32_t ret = PSCI_E_INTERN_FAIL; VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr); if (cpu_id == -1) { - return PSCI_E_INTERN_FAIL; + goto exit_label; } proc = pm_get_proc((uint32_t)cpu_id); if (proc == NULL) { - return PSCI_E_INTERN_FAIL; + goto exit_label; } /* Send request to PMC to wake up selected ACPU core */ @@ -47,7 +48,10 @@ static int32_t versal_pwr_domain_on(u_register_t mpidr) /* Clear power down request */ pm_client_wakeup(proc); - return PSCI_E_SUCCESS; + ret = PSCI_E_SUCCESS; + +exit_label: + return ret; } /** @@ -246,6 +250,7 @@ static void versal_pwr_domain_off(const psci_power_state_t *target_state) static int32_t versal_validate_power_state(uint32_t power_state, psci_power_state_t *req_state) { + int32_t ret = PSCI_E_SUCCESS; VERBOSE("%s: power_state: 0x%x\n", __func__, power_state); uint32_t pstate = psci_get_pstate_type(power_state); @@ -261,10 +266,10 @@ static int32_t versal_validate_power_state(uint32_t power_state, /* We expect the 'state id' to be zero */ if (psci_get_pstate_id(power_state) != 0U) { - return PSCI_E_INVALID_PARAMS; + ret = PSCI_E_INVALID_PARAMS; } - return PSCI_E_SUCCESS; + return ret; } /** diff --git a/plat/xilinx/versal/plat_versal.c b/plat/xilinx/versal/plat_versal.c index ba17b1d9f..6e0b2d65a 100644 --- a/plat/xilinx/versal/plat_versal.c +++ b/plat/xilinx/versal/plat_versal.c @@ -10,13 +10,12 @@ int32_t plat_core_pos_by_mpidr(u_register_t mpidr) { - if ((mpidr & MPIDR_CLUSTER_MASK) != 0U) { - return -1; + int32_t ret = -1; + + if (((mpidr & MPIDR_CLUSTER_MASK) == 0U) && + ((mpidr & MPIDR_CPU_MASK) < PLATFORM_CORE_COUNT)) { + ret = versal_calc_core_pos(mpidr); } - if ((mpidr & MPIDR_CPU_MASK) >= PLATFORM_CORE_COUNT) { - return -1; - } - - return (int32_t)versal_calc_core_pos(mpidr); + return ret; } diff --git a/plat/xilinx/versal_net/aarch64/versal_net_common.c b/plat/xilinx/versal_net/aarch64/versal_net_common.c index 0dd019461..736581842 100644 --- a/plat/xilinx/versal_net/aarch64/versal_net_common.c +++ b/plat/xilinx/versal_net/aarch64/versal_net_common.c @@ -42,20 +42,28 @@ const mmap_region_t *plat_get_mmap(void) /* For saving cpu clock for certain platform */ uint32_t cpu_clock; -char *board_name_decode(void) +const char *board_name_decode(void) { + const char *platform; + switch (platform_id) { case VERSAL_NET_SPP: - return "IPP"; + platform = "IPP"; + break; case VERSAL_NET_EMU: - return "EMU"; + platform = "EMU"; + break; case VERSAL_NET_SILICON: - return "Silicon"; + platform = "Silicon"; + break; case VERSAL_NET_QEMU: - return "QEMU"; + platform = "QEMU"; + break; default: - return "Unknown"; + platform = "Unknown"; } + + return platform; } void board_detection(void) diff --git a/plat/xilinx/versal_net/bl31_versal_net_setup.c b/plat/xilinx/versal_net/bl31_versal_net_setup.c index b1fdd9c24..d131a9260 100644 --- a/plat/xilinx/versal_net/bl31_versal_net_setup.c +++ b/plat/xilinx/versal_net/bl31_versal_net_setup.c @@ -178,16 +178,19 @@ int request_intr_type_el3(uint32_t id, interrupt_type_handler_t handler) { static uint32_t index; uint32_t i; + int32_t ret = 0; /* Validate 'handler' and 'id' parameters */ if ((handler == NULL) || (index >= MAX_INTR_EL3)) { - return -EINVAL; + ret = -EINVAL; + goto exit_label; } /* Check if a handler has already been registered */ for (i = 0; i < index; i++) { if (id == type_el3_interrupt_table[i].id) { - return -EALREADY; + ret = -EALREADY; + goto exit_label; } } @@ -196,7 +199,8 @@ int request_intr_type_el3(uint32_t id, interrupt_type_handler_t handler) index++; - return 0; +exit_label: + return ret; } static uint64_t rdo_el3_interrupt_handler(uint32_t id, uint32_t flags, diff --git a/plat/xilinx/versal_net/include/plat_private.h b/plat/xilinx/versal_net/include/plat_private.h index 0b82ca730..8b4020e1d 100644 --- a/plat/xilinx/versal_net/include/plat_private.h +++ b/plat/xilinx/versal_net/include/plat_private.h @@ -35,7 +35,7 @@ void plat_versal_net_gic_redistif_off(void); extern uint32_t cpu_clock, platform_id, platform_version; void board_detection(void); -char *board_name_decode(void); +const char *board_name_decode(void); uint64_t smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags); int32_t sip_svc_setup_init(void); diff --git a/plat/xilinx/versal_net/include/platform_def.h b/plat/xilinx/versal_net/include/platform_def.h index 8cb7deb1d..a7ff84ea5 100644 --- a/plat/xilinx/versal_net/include/platform_def.h +++ b/plat/xilinx/versal_net/include/platform_def.h @@ -25,6 +25,9 @@ #define PLATFORM_CORE_COUNT (PLATFORM_CLUSTER_COUNT * PLATFORM_CORE_COUNT_PER_CLUSTER) +#define E_INVALID_CORE_COUNT -1 +#define E_INVALID_CLUSTER_COUNT -3 + #define PLAT_MAX_PWR_LVL U(2) #define PLAT_MAX_RET_STATE U(1) #define PLAT_MAX_OFF_STATE U(2) diff --git a/plat/xilinx/versal_net/plat_psci_pm.c b/plat/xilinx/versal_net/plat_psci_pm.c index 9f955740d..a76832ee5 100644 --- a/plat/xilinx/versal_net/plat_psci_pm.c +++ b/plat/xilinx/versal_net/plat_psci_pm.c @@ -29,17 +29,18 @@ static int32_t versal_net_pwr_domain_on(u_register_t mpidr) { int32_t cpu_id = plat_core_pos_by_mpidr(mpidr); const struct pm_proc *proc; + int32_t ret = PSCI_E_INTERN_FAIL; VERBOSE("%s: mpidr: 0x%lx, cpuid: %x\n", __func__, mpidr, cpu_id); if (cpu_id == -1) { - return PSCI_E_INTERN_FAIL; + goto exit_label; } proc = pm_get_proc(cpu_id); if (proc == NULL) { - return PSCI_E_INTERN_FAIL; + goto exit_label; } (void)pm_req_wakeup(proc->node_id, (versal_net_sec_entry & 0xFFFFFFFFU) | 0x1U, @@ -48,7 +49,10 @@ static int32_t versal_net_pwr_domain_on(u_register_t mpidr) /* Clear power down request */ pm_client_wakeup(proc); - return PSCI_E_SUCCESS; + ret = PSCI_E_SUCCESS; + +exit_label: + return ret; } /** @@ -64,7 +68,7 @@ static void versal_net_pwr_domain_off(const psci_power_state_t *target_state) const struct pm_proc *proc = pm_get_proc(cpu_id); if (proc == NULL) { - return; + goto exit_label; } for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) { @@ -94,6 +98,9 @@ static void versal_net_pwr_domain_off(const psci_power_state_t *target_state) SECURE_FLAG); } } + +exit_label: + return; } /** @@ -148,7 +155,7 @@ static void versal_net_pwr_domain_suspend(const psci_power_state_t *target_state const struct pm_proc *proc = pm_get_proc(cpu_id); if (proc == NULL) { - return; + goto exit_label; } for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) { @@ -170,6 +177,9 @@ static void versal_net_pwr_domain_suspend(const psci_power_state_t *target_state SECURE_FLAG); /* TODO: disable coherency */ + +exit_label: + return; } static void versal_net_pwr_domain_on_finish(const psci_power_state_t *target_state) @@ -195,12 +205,13 @@ static void versal_net_pwr_domain_suspend_finish(const psci_power_state_t *targe const struct pm_proc *proc = pm_get_proc(cpu_id); if (proc == NULL) { - return; + goto exit_label; } - for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) + for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) { VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n", __func__, i, target_state->pwr_domain_state[i]); + } /* Clear the APU power control register for this cpu */ pm_client_wakeup(proc); @@ -213,6 +224,9 @@ static void versal_net_pwr_domain_suspend_finish(const psci_power_state_t *targe } plat_arm_gic_cpuif_enable(); + +exit_label: + return; } /** @@ -243,6 +257,8 @@ static void __dead2 versal_net_system_off(void) static int32_t versal_net_validate_power_state(unsigned int power_state, psci_power_state_t *req_state) { + int32_t ret = PSCI_E_INVALID_PARAMS; + VERBOSE("%s: power_state: 0x%x\n", __func__, power_state); uint32_t pstate = psci_get_pstate_type(power_state); @@ -257,11 +273,11 @@ static int32_t versal_net_validate_power_state(unsigned int power_state, } /* We expect the 'state id' to be zero */ - if (psci_get_pstate_id(power_state) != 0U) { - return PSCI_E_INVALID_PARAMS; + if (psci_get_pstate_id(power_state) == 0U) { + ret = PSCI_E_SUCCESS; } - return PSCI_E_SUCCESS; + return ret; } /** @@ -274,8 +290,9 @@ static void versal_net_get_sys_suspend_power_state(psci_power_state_t *req_state { uint64_t i; - for (i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) + for (i = MPIDR_AFFLVL0; i <= PLAT_MAX_PWR_LVL; i++) { req_state->pwr_domain_state[i] = PLAT_MAX_OFF_STATE; + } } static const struct plat_psci_ops versal_net_nopmc_psci_ops = { diff --git a/plat/xilinx/versal_net/plat_topology.c b/plat/xilinx/versal_net/plat_topology.c index 4e2d36ee4..ef5c42661 100644 --- a/plat/xilinx/versal_net/plat_topology.c +++ b/plat/xilinx/versal_net/plat_topology.c @@ -41,6 +41,7 @@ const uint8_t *plat_get_power_domain_tree_desc(void) int32_t plat_core_pos_by_mpidr(u_register_t mpidr) { uint32_t cluster_id, cpu_id; + int32_t ret; mpidr &= MPIDR_AFFINITY_MASK; @@ -48,7 +49,8 @@ int32_t plat_core_pos_by_mpidr(u_register_t mpidr) cpu_id = (uint32_t)MPIDR_AFFLVL1_VAL(mpidr); if (cluster_id >= PLATFORM_CLUSTER_COUNT) { - return -3; + ret = E_INVALID_CLUSTER_COUNT; + goto exit_label; } /* @@ -56,8 +58,11 @@ int32_t plat_core_pos_by_mpidr(u_register_t mpidr) * one of the two clusters present on the platform. */ if (cpu_id >= PLATFORM_CORE_COUNT_PER_CLUSTER) { - return -1; + ret = E_INVALID_CORE_COUNT; + } else { + ret = (int32_t)(cpu_id + (cluster_id * PLATFORM_CORE_COUNT_PER_CLUSTER)); } - return (int32_t)(cpu_id + (cluster_id * PLATFORM_CORE_COUNT_PER_CLUSTER)); +exit_label: + return ret; } diff --git a/plat/xilinx/versal_net/sip_svc_setup.c b/plat/xilinx/versal_net/sip_svc_setup.c index bf06e2c63..21657ab96 100644 --- a/plat/xilinx/versal_net/sip_svc_setup.c +++ b/plat/xilinx/versal_net/sip_svc_setup.c @@ -37,7 +37,7 @@ /* SiP Service UUID */ DEFINE_SVC_UUID2(versal_net_sip_uuid, - 0x80d4c25a, 0xebaf, 0x11eb, 0x94, 0x68, + 0x80d4c25au, 0xebaf, 0x11eb, 0x94, 0x68, 0x0b, 0x4e, 0x3b, 0x8f, 0xc3, 0x60); /** diff --git a/plat/xilinx/zynqmp/aarch64/zynqmp_common.c b/plat/xilinx/zynqmp/aarch64/zynqmp_common.c index 6c31a8a4a..412238dde 100644 --- a/plat/xilinx/zynqmp/aarch64/zynqmp_common.c +++ b/plat/xilinx/zynqmp/aarch64/zynqmp_common.c @@ -56,12 +56,15 @@ static uint32_t zynqmp_get_silicon_ver(void) uint32_t get_uart_clk(void) { unsigned int ver = zynqmp_get_silicon_ver(); + uint32_t uart_clk = 0U; if (ver == ZYNQMP_CSU_VERSION_QEMU) { - return 133000000; + uart_clk = 133000000U; } else { - return 100000000; + uart_clk = 100000000U; } + + return uart_clk; } #if LOG_LEVEL >= LOG_LEVEL_NOTICE @@ -311,14 +314,17 @@ static char *zynqmp_print_silicon_idcode(void) int32_t plat_is_smccc_feature_available(u_register_t fid) { + int32_t ret = SMC_ARCH_CALL_NOT_SUPPORTED; + switch (fid) { case SMCCC_ARCH_SOC_ID: - return SMC_ARCH_CALL_SUCCESS; + ret = SMC_ARCH_CALL_SUCCESS; + break; default: - return SMC_ARCH_CALL_NOT_SUPPORTED; + break; } - return SMC_ARCH_CALL_NOT_SUPPORTED; + return ret; } int32_t plat_get_soc_version(void) @@ -397,10 +403,13 @@ void zynqmp_config_setup(void) uint32_t plat_get_syscnt_freq2(void) { uint32_t ver = zynqmp_get_silicon_ver(); + uint32_t ret = 0U; if (ver == ZYNQMP_CSU_VERSION_QEMU) { - return 65000000; + ret = 65000000U; } else { - return mmio_read_32((uint64_t)IOU_SCNTRS_BASEFREQ); + ret = mmio_read_32((uint64_t)IOU_SCNTRS_BASEFREQ); } + + return ret; } diff --git a/plat/xilinx/zynqmp/plat_psci.c b/plat/xilinx/zynqmp/plat_psci.c index a61935992..3fae40778 100644 --- a/plat/xilinx/zynqmp/plat_psci.c +++ b/plat/xilinx/zynqmp/plat_psci.c @@ -36,22 +36,23 @@ static int32_t zynqmp_pwr_domain_on(u_register_t mpidr) const struct pm_proc *proc; uint32_t buff[3]; enum pm_ret_status ret; + int32_t result = PSCI_E_INTERN_FAIL; VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr); if (cpu_id == -1) { - return PSCI_E_INTERN_FAIL; + goto exit_label; } proc = pm_get_proc(cpu_id); if (proc == NULL) { - return PSCI_E_INTERN_FAIL; + goto exit_label; } /* Check the APU proc status before wakeup */ ret = pm_get_node_status(proc->node_id, buff); if ((ret != PM_RET_SUCCESS) || (buff[0] == PM_PROC_STATE_SUSPENDING)) { - return PSCI_E_INTERN_FAIL; + goto exit_label; } /* Clear power down request */ @@ -60,7 +61,10 @@ static int32_t zynqmp_pwr_domain_on(u_register_t mpidr) /* Send request to PMU to wake up selected APU CPU core */ (void)pm_req_wakeup(proc->node_id, 1, zynqmp_sec_entry, REQ_ACK_BLOCKING); - return PSCI_E_SUCCESS; + result = PSCI_E_SUCCESS; + +exit_label: + return result; } static void zynqmp_pwr_domain_off(const psci_power_state_t *target_state) @@ -101,9 +105,10 @@ static void zynqmp_pwr_domain_suspend(const psci_power_state_t *target_state) return; } - for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) + for (size_t i = 0; i <= PLAT_MAX_PWR_LVL; i++) { VERBOSE("%s: target_state->pwr_domain_state[%lu]=%x\n", __func__, i, target_state->pwr_domain_state[i]); + } state = (target_state->pwr_domain_state[1] > PLAT_MAX_RET_STATE) ? PM_STATE_SUSPEND_TO_RAM : PM_STATE_CPU_IDLE; @@ -194,6 +199,7 @@ static int32_t zynqmp_validate_power_state(uint32_t power_state, VERBOSE("%s: power_state: 0x%x\n", __func__, power_state); uint32_t pstate = psci_get_pstate_type(power_state); + int32_t result = PSCI_E_INVALID_PARAMS; assert(req_state); @@ -204,11 +210,11 @@ static int32_t zynqmp_validate_power_state(uint32_t power_state, req_state->pwr_domain_state[MPIDR_AFFLVL0] = PLAT_MAX_OFF_STATE; } /* We expect the 'state id' to be zero */ - if (psci_get_pstate_id(power_state) != 0U) { - return PSCI_E_INVALID_PARAMS; + if (psci_get_pstate_id(power_state) == 0U) { + result = PSCI_E_SUCCESS; } - return PSCI_E_SUCCESS; + return result; } static void zynqmp_get_sys_suspend_power_state(psci_power_state_t *req_state) diff --git a/plat/xilinx/zynqmp/plat_zynqmp.c b/plat/xilinx/zynqmp/plat_zynqmp.c index e7c03786b..3a4c1720a 100644 --- a/plat/xilinx/zynqmp/plat_zynqmp.c +++ b/plat/xilinx/zynqmp/plat_zynqmp.c @@ -10,13 +10,12 @@ int32_t plat_core_pos_by_mpidr(u_register_t mpidr) { - if ((mpidr & MPIDR_CLUSTER_MASK) != 0U) { - return -1; + int32_t core_pos = -1; + + if (((mpidr & MPIDR_CLUSTER_MASK) == 0U) && + ((mpidr & MPIDR_CPU_MASK) < PLATFORM_CORE_COUNT)) { + core_pos = (int32_t)zynqmp_calc_core_pos(mpidr); } - if ((mpidr & MPIDR_CPU_MASK) >= PLATFORM_CORE_COUNT) { - return -1; - } - - return (int32_t)zynqmp_calc_core_pos(mpidr); + return core_pos; } diff --git a/plat/xilinx/zynqmp/pm_service/pm_api_clock.c b/plat/xilinx/zynqmp/pm_service/pm_api_clock.c index dbc5f1333..aad3114bc 100644 --- a/plat/xilinx/zynqmp/pm_service/pm_api_clock.c +++ b/plat/xilinx/zynqmp/pm_service/pm_api_clock.c @@ -2405,12 +2405,16 @@ static uint32_t pm_clk_invalid_list[] = {CLK_USB0, CLK_USB1, CLK_CSU_SPB, static bool pm_clock_valid(uint32_t clock_id) { unsigned int i; + bool valid = true; - for (i = 0U; i < ARRAY_SIZE(pm_clk_invalid_list); i++) - if (pm_clk_invalid_list[i] == clock_id) - return 0; + for (i = 0U; i < ARRAY_SIZE(pm_clk_invalid_list); i++) { + if (pm_clk_invalid_list[i] == clock_id) { + valid = false; + break; + } + } - return 1; + return valid; } /** @@ -2492,13 +2496,15 @@ enum pm_ret_status pm_api_clock_get_topology(uint32_t clock_id, uint8_t num_nodes; uint32_t i; uint16_t typeflags; + enum pm_ret_status status = PM_RET_ERROR_ARGS; if (!pm_clock_valid(clock_id)) { - return PM_RET_ERROR_ARGS; + goto exit_label; } if (pm_clock_type(clock_id) != CLK_TYPE_OUTPUT) { - return PM_RET_ERROR_NOTSUPPORTED; + status = PM_RET_ERROR_NOTSUPPORTED; + goto exit_label; } (void)memset(topology, 0, CLK_TOPOLOGY_PAYLOAD_LEN); @@ -2507,7 +2513,8 @@ enum pm_ret_status pm_api_clock_get_topology(uint32_t clock_id, /* Skip parent till index */ if (index >= num_nodes) { - return PM_RET_SUCCESS; + status = PM_RET_SUCCESS; + goto exit_label; } for (i = 0; i < 3U; i++) { @@ -2525,7 +2532,10 @@ enum pm_ret_status pm_api_clock_get_topology(uint32_t clock_id, (CLK_TYPEFLAGS_BITS - CLK_TYPEFLAGS2_SHIFT)); } - return PM_RET_SUCCESS; + status = PM_RET_SUCCESS; + +exit_label: + return status; } /** @@ -2548,13 +2558,15 @@ enum pm_ret_status pm_api_clock_get_fixedfactor_params(uint32_t clock_id, const struct pm_clock_node *clock_nodes; uint8_t num_nodes; uint32_t type, i; + enum pm_ret_status status = PM_RET_ERROR_ARGS; if (!pm_clock_valid(clock_id)) { - return PM_RET_ERROR_ARGS; + goto exit_label; } if (pm_clock_type(clock_id) != CLK_TYPE_OUTPUT) { - return PM_RET_ERROR_NOTSUPPORTED; + status = PM_RET_ERROR_NOTSUPPORTED; + goto exit_label; } clock_nodes = *clocks[clock_id].nodes; @@ -2570,11 +2582,12 @@ enum pm_ret_status pm_api_clock_get_fixedfactor_params(uint32_t clock_id, } /* Clock is not fixed clock */ - if (i == num_nodes) { - return PM_RET_ERROR_ARGS; + if (i != num_nodes) { + status = PM_RET_SUCCESS; } - return PM_RET_SUCCESS; +exit_label: + return status; } /** @@ -2601,18 +2614,20 @@ enum pm_ret_status pm_api_clock_get_parents(uint32_t clock_id, { uint32_t i; const int32_t *clk_parents; + enum pm_ret_status status = PM_RET_ERROR_ARGS; if (!pm_clock_valid(clock_id)) { - return PM_RET_ERROR_ARGS; + goto exit_label; } if (pm_clock_type(clock_id) != CLK_TYPE_OUTPUT) { - return PM_RET_ERROR_NOTSUPPORTED; + status = PM_RET_ERROR_NOTSUPPORTED; + goto exit_label; } clk_parents = *clocks[clock_id].parents; if (clk_parents == NULL) { - return PM_RET_ERROR_ARGS; + goto exit_label; } (void)memset(parents, 0, CLK_PARENTS_PAYLOAD_LEN); @@ -2620,7 +2635,8 @@ enum pm_ret_status pm_api_clock_get_parents(uint32_t clock_id, /* Skip parent till index */ for (i = 0; i < index; i++) { if (clk_parents[i] == CLK_NA_PARENT) { - return PM_RET_SUCCESS; + status = PM_RET_SUCCESS; + goto exit_label; } } @@ -2631,7 +2647,10 @@ enum pm_ret_status pm_api_clock_get_parents(uint32_t clock_id, } } - return PM_RET_SUCCESS; + status = PM_RET_SUCCESS; + +exit_label: + return status; } /** @@ -2648,8 +2667,10 @@ enum pm_ret_status pm_api_clock_get_parents(uint32_t clock_id, enum pm_ret_status pm_api_clock_get_attributes(uint32_t clock_id, uint32_t *attr) { + enum pm_ret_status status = PM_RET_ERROR_ARGS; + if (clock_id >= (uint32_t)CLK_MAX) { - return PM_RET_ERROR_ARGS; + goto exit_label; } /* Clock valid bit */ @@ -2658,7 +2679,10 @@ enum pm_ret_status pm_api_clock_get_attributes(uint32_t clock_id, /* Clock type (Output/External) */ *attr |= (pm_clock_type(clock_id) << CLK_TYPE_SHIFT); - return PM_RET_SUCCESS; + status = PM_RET_SUCCESS; + +exit_label: + return status; } /** @@ -2678,9 +2702,10 @@ enum pm_ret_status pm_api_clock_get_max_divisor(enum clock_id clock_id, { uint32_t i; const struct pm_clock_node *nodes; + enum pm_ret_status status = PM_RET_ERROR_ARGS; if (clock_id >= CLK_MAX_OUTPUT_CLK) { - return PM_RET_ERROR_ARGS; + goto exit_label; } nodes = *clocks[clock_id].nodes; @@ -2693,11 +2718,13 @@ enum pm_ret_status pm_api_clock_get_max_divisor(enum clock_id clock_id, } else { *max_div = (uint32_t)BIT(nodes[i].width) - (uint32_t)1U; } - return PM_RET_SUCCESS; + status = PM_RET_SUCCESS; + break; } } - return PM_RET_ERROR_ARGS; +exit_label: + return status; } /** @@ -2771,14 +2798,16 @@ static struct pm_pll pm_plls[] = { struct pm_pll *pm_clock_get_pll(enum clock_id clock_id) { uint32_t i; + struct pm_pll *pll = NULL; for (i = 0; i < ARRAY_SIZE(pm_plls); i++) { if (pm_plls[i].cid == clock_id) { - return &pm_plls[i]; + pll = &pm_plls[i]; + break; } } - return NULL; + return pll; } /** @@ -2793,13 +2822,14 @@ enum pm_ret_status pm_clock_get_pll_node_id(enum clock_id clock_id, enum pm_node_id *node_id) { const struct pm_pll *pll = pm_clock_get_pll(clock_id); + enum pm_ret_status status = PM_RET_ERROR_ARGS; if (pll != NULL) { *node_id = pll->nid; - return PM_RET_SUCCESS; + status = PM_RET_SUCCESS; } - return PM_RET_ERROR_ARGS; + return status; } /** @@ -2813,17 +2843,19 @@ enum pm_ret_status pm_clock_get_pll_node_id(enum clock_id clock_id, struct pm_pll *pm_clock_get_pll_by_related_clk(enum clock_id clock_id) { uint32_t i; + struct pm_pll *pll = NULL; for (i = 0; i < ARRAY_SIZE(pm_plls); i++) { if ((pm_plls[i].pre_src == clock_id) || (pm_plls[i].post_src == clock_id) || (pm_plls[i].div2 == clock_id) || (pm_plls[i].bypass == clock_id)) { - return &pm_plls[i]; + pll = &pm_plls[i]; + break; } } - return NULL; + return pll; } /** @@ -2838,16 +2870,18 @@ struct pm_pll *pm_clock_get_pll_by_related_clk(enum clock_id clock_id) */ enum pm_ret_status pm_clock_pll_enable(struct pm_pll *pll) { - if (pll == NULL) { - return PM_RET_ERROR_ARGS; + enum pm_ret_status status = PM_RET_ERROR_ARGS; + + if (pll != NULL) { + /* Set the PLL mode according to the buffered mode value */ + if (pll->mode == PLL_FRAC_MODE) { + status = pm_pll_set_mode(pll->nid, PM_PLL_MODE_FRACTIONAL); + } else { + status = pm_pll_set_mode(pll->nid, PM_PLL_MODE_INTEGER); + } } - /* Set the PLL mode according to the buffered mode value */ - if (pll->mode == PLL_FRAC_MODE) { - return pm_pll_set_mode(pll->nid, PM_PLL_MODE_FRACTIONAL); - } - - return pm_pll_set_mode(pll->nid, PM_PLL_MODE_INTEGER); + return status; } /** @@ -2862,11 +2896,13 @@ enum pm_ret_status pm_clock_pll_enable(struct pm_pll *pll) */ enum pm_ret_status pm_clock_pll_disable(struct pm_pll *pll) { - if (pll == NULL) { - return PM_RET_ERROR_ARGS; + enum pm_ret_status status = PM_RET_ERROR_ARGS; + + if (pll != NULL) { + status = pm_pll_set_mode(pll->nid, PM_PLL_MODE_RESET); } - return pm_pll_set_mode(pll->nid, PM_PLL_MODE_RESET); + return status; } /** @@ -2883,16 +2919,16 @@ enum pm_ret_status pm_clock_pll_disable(struct pm_pll *pll) enum pm_ret_status pm_clock_pll_get_state(struct pm_pll *pll, uint32_t *state) { - enum pm_ret_status status; + enum pm_ret_status status = PM_RET_ERROR_ARGS; enum pm_pll_mode mode; if ((pll == NULL) || (state == NULL)) { - return PM_RET_ERROR_ARGS; + goto exit_label; } status = pm_pll_get_mode(pll->nid, &mode); if (status != PM_RET_SUCCESS) { - return status; + goto exit_label; } if (mode == PM_PLL_MODE_RESET) { @@ -2901,7 +2937,10 @@ enum pm_ret_status pm_clock_pll_get_state(struct pm_pll *pll, *state = 1; } - return PM_RET_SUCCESS; + status = PM_RET_SUCCESS; + +exit_label: + return status; } /** @@ -2921,23 +2960,25 @@ enum pm_ret_status pm_clock_pll_set_parent(struct pm_pll *pll, enum clock_id clock_id, uint32_t parent_index) { + enum pm_ret_status status = PM_RET_ERROR_ARGS; + if (pll == NULL) { - return PM_RET_ERROR_ARGS; + goto exit_label; } if (pll->pre_src == clock_id) { - return pm_pll_set_parameter(pll->nid, PM_PLL_PARAM_PRE_SRC, - parent_index); + status = pm_pll_set_parameter(pll->nid, PM_PLL_PARAM_PRE_SRC, parent_index); + goto exit_label; } if (pll->post_src == clock_id) { - return pm_pll_set_parameter(pll->nid, PM_PLL_PARAM_POST_SRC, - parent_index); + status = pm_pll_set_parameter(pll->nid, PM_PLL_PARAM_POST_SRC, parent_index); + goto exit_label; } if (pll->div2 == clock_id) { - return pm_pll_set_parameter(pll->nid, PM_PLL_PARAM_DIV2, - parent_index); + status = pm_pll_set_parameter(pll->nid, PM_PLL_PARAM_DIV2, parent_index); } - return PM_RET_ERROR_ARGS; +exit_label: + return status; } /** @@ -2955,27 +2996,33 @@ enum pm_ret_status pm_clock_pll_get_parent(struct pm_pll *pll, enum clock_id clock_id, uint32_t *parent_index) { + enum pm_ret_status status = PM_RET_ERROR_ARGS; + if (pll == NULL) { - return PM_RET_ERROR_ARGS; + goto exit_label; } if (pll->pre_src == clock_id) { - return pm_pll_get_parameter(pll->nid, PM_PLL_PARAM_PRE_SRC, - parent_index); + status = pm_pll_get_parameter(pll->nid, PM_PLL_PARAM_PRE_SRC, + parent_index); + goto exit_label; } if (pll->post_src == clock_id) { - return pm_pll_get_parameter(pll->nid, PM_PLL_PARAM_POST_SRC, - parent_index); + status = pm_pll_get_parameter(pll->nid, PM_PLL_PARAM_POST_SRC, + parent_index); + goto exit_label; } if (pll->div2 == clock_id) { - return pm_pll_get_parameter(pll->nid, PM_PLL_PARAM_DIV2, - parent_index); + status = pm_pll_get_parameter(pll->nid, PM_PLL_PARAM_DIV2, + parent_index); + goto exit_label; } if (pll->bypass == clock_id) { *parent_index = 0; - return PM_RET_SUCCESS; + status = PM_RET_SUCCESS; } - return PM_RET_ERROR_ARGS; +exit_label: + return status; } /** @@ -2992,13 +3039,14 @@ enum pm_ret_status pm_clock_set_pll_mode(enum clock_id clock_id, uint32_t mode) { struct pm_pll *pll = pm_clock_get_pll(clock_id); + enum pm_ret_status status = PM_RET_ERROR_ARGS; - if ((pll == NULL) || ((mode != PLL_FRAC_MODE) && (mode != PLL_INT_MODE))) { - return PM_RET_ERROR_ARGS; + if (!((pll == NULL) || ((mode != PLL_FRAC_MODE) && (mode != PLL_INT_MODE)))) { + pll->mode = (uint8_t)mode; + status = PM_RET_SUCCESS; } - pll->mode = (uint8_t)mode; - return PM_RET_SUCCESS; + return status; } /** @@ -3015,13 +3063,14 @@ enum pm_ret_status pm_clock_get_pll_mode(enum clock_id clock_id, uint32_t *mode) { const struct pm_pll *pll = pm_clock_get_pll(clock_id); + enum pm_ret_status status = PM_RET_ERROR_ARGS; - if ((pll == NULL) || (mode == NULL)) { - return PM_RET_ERROR_ARGS; + if ((pll != NULL) && (mode != NULL)) { + *mode = pll->mode; + status = PM_RET_SUCCESS; } - *mode = pll->mode; - return PM_RET_SUCCESS; + return status; } /** @@ -3033,15 +3082,17 @@ enum pm_ret_status pm_clock_get_pll_mode(enum clock_id clock_id, */ enum pm_ret_status pm_clock_id_is_valid(uint32_t clock_id) { - if (!pm_clock_valid(clock_id)) { - return PM_RET_ERROR_ARGS; + enum pm_ret_status status = PM_RET_ERROR_ARGS; + + if (pm_clock_valid(clock_id)) { + if (pm_clock_type(clock_id) != CLK_TYPE_OUTPUT) { + status = PM_RET_ERROR_NOTSUPPORTED; + } else { + status = PM_RET_SUCCESS; + } } - if (pm_clock_type(clock_id) != CLK_TYPE_OUTPUT) { - return PM_RET_ERROR_NOTSUPPORTED; - } - - return PM_RET_SUCCESS; + return status; } /** @@ -3056,23 +3107,29 @@ uint8_t pm_clock_has_div(uint32_t clock_id, enum pm_clock_div_id div_id) { uint32_t i; const struct pm_clock_node *nodes; + uint8_t status = 0U; if (clock_id >= (uint32_t)CLK_MAX_OUTPUT_CLK) { - return 0; + goto exit_label; } nodes = *clocks[clock_id].nodes; for (i = 0; i < clocks[clock_id].num_nodes; i++) { if (nodes[i].type == TYPE_DIV1) { - if (div_id == PM_CLOCK_DIV0_ID) - return 1; + if (div_id == PM_CLOCK_DIV0_ID) { + status = 1U; + break; + } } else if (nodes[i].type == TYPE_DIV2) { - if (div_id == PM_CLOCK_DIV1_ID) - return 1; + if (div_id == PM_CLOCK_DIV1_ID) { + status = 1U; + break; + } } else { /* To fix the misra 15.7 warning */ } } - return 0; +exit_label: + return status; } diff --git a/plat/xilinx/zynqmp/pm_service/pm_api_ioctl.c b/plat/xilinx/zynqmp/pm_service/pm_api_ioctl.c index a8404bad0..fd3992cad 100644 --- a/plat/xilinx/zynqmp/pm_service/pm_api_ioctl.c +++ b/plat/xilinx/zynqmp/pm_service/pm_api_ioctl.c @@ -61,9 +61,11 @@ static enum pm_ret_status pm_ioctl_get_rpu_oper_mode(uint32_t *mode) static enum pm_ret_status pm_ioctl_set_rpu_oper_mode(uint32_t mode) { uint32_t val; + enum pm_ret_status status = PM_RET_SUCCESS; if ((mmio_read_32(CRL_APB_RST_LPD_TOP) & CRL_APB_RPU_AMBA_RESET) != 0U) { - return PM_RET_ERROR_ACCESS; + status = PM_RET_ERROR_ACCESS; + goto exit_label; } val = mmio_read_32(ZYNQMP_RPU_GLBL_CNTL); @@ -77,12 +79,14 @@ static enum pm_ret_status pm_ioctl_set_rpu_oper_mode(uint32_t mode) val |= ZYNQMP_TCM_COMB_MASK; val |= ZYNQMP_SLCLAMP_MASK; } else { - return PM_RET_ERROR_ARGS; + status = PM_RET_ERROR_ARGS; + goto exit_label; } mmio_write_32(ZYNQMP_RPU_GLBL_CNTL, val); - return PM_RET_SUCCESS; +exit_label: + return status; } /** @@ -136,6 +140,7 @@ static enum pm_ret_status pm_ioctl_config_boot_addr(enum pm_node_id nid, static enum pm_ret_status pm_ioctl_config_tcm_comb(uint32_t value) { uint32_t val; + enum pm_ret_status status = PM_RET_SUCCESS; val = mmio_read_32(ZYNQMP_RPU_GLBL_CNTL); @@ -144,12 +149,14 @@ static enum pm_ret_status pm_ioctl_config_tcm_comb(uint32_t value) } else if (value == PM_RPU_TCM_COMB) { val |= ZYNQMP_TCM_COMB_MASK; } else { - return PM_RET_ERROR_ARGS; + status = PM_RET_ERROR_ARGS; + goto exit_label; } mmio_write_32(ZYNQMP_RPU_GLBL_CNTL, val); - return PM_RET_SUCCESS; +exit_label: + return status; } /** @@ -165,12 +172,16 @@ static enum pm_ret_status pm_ioctl_config_tcm_comb(uint32_t value) static enum pm_ret_status pm_ioctl_set_tapdelay_bypass(uint32_t type, uint32_t value) { + enum pm_ret_status status = PM_RET_SUCCESS; + if ((((value != PM_TAPDELAY_BYPASS_ENABLE) && (value != PM_TAPDELAY_BYPASS_DISABLE)) || (type >= PM_TAPDELAY_MAX))) { - return PM_RET_ERROR_ARGS; + status = PM_RET_ERROR_ARGS; + } else { + status = pm_mmio_write(IOU_TAPDLY_BYPASS, TAP_DELAY_MASK, value << type); } - return pm_mmio_write(IOU_TAPDLY_BYPASS, TAP_DELAY_MASK, value << type); + return status; } /** @@ -372,11 +383,11 @@ static enum pm_ret_status pm_ioctl_set_pll_frac_data /* Get PLL node ID using PLL clock ID */ status = pm_clock_get_pll_node_id(pll, &pll_nid); - if (status != PM_RET_SUCCESS) { - return status; + if (status == PM_RET_SUCCESS) { + status = pm_pll_set_parameter(pll_nid, PM_PLL_PARAM_DATA, data); } - return pm_pll_set_parameter(pll_nid, PM_PLL_PARAM_DATA, data); + return status; } /** @@ -397,11 +408,11 @@ static enum pm_ret_status pm_ioctl_get_pll_frac_data /* Get PLL node ID using PLL clock ID */ status = pm_clock_get_pll_node_id(pll, &pll_nid); - if (status != PM_RET_SUCCESS) { - return status; + if (status == PM_RET_SUCCESS) { + status = pm_pll_get_parameter(pll_nid, PM_PLL_PARAM_DATA, data); } - return pm_pll_get_parameter(pll_nid, PM_PLL_PARAM_DATA, data); + return status; } /** @@ -418,12 +429,16 @@ static enum pm_ret_status pm_ioctl_get_pll_frac_data static enum pm_ret_status pm_ioctl_write_ggs(uint32_t index, uint32_t value) { + enum pm_ret_status ret_status = PM_RET_SUCCESS; + if (index >= GGS_NUM_REGS) { - return PM_RET_ERROR_ARGS; + ret_status = PM_RET_ERROR_ARGS; + } else { + ret_status = pm_mmio_write((uint64_t)GGS_BASEADDR + (index << 2), + 0xFFFFFFFFU, value); } - return pm_mmio_write((uint64_t)(GGS_BASEADDR + (index << 2)), - 0xFFFFFFFFU, value); + return ret_status; } /** @@ -440,11 +455,15 @@ static enum pm_ret_status pm_ioctl_write_ggs(uint32_t index, static enum pm_ret_status pm_ioctl_read_ggs(uint32_t index, uint32_t *value) { + enum pm_ret_status ret_status = PM_RET_SUCCESS; + if (index >= GGS_NUM_REGS) { - return PM_RET_ERROR_ARGS; + ret_status = PM_RET_ERROR_ARGS; + } else { + ret_status = pm_mmio_read((uint64_t)GGS_BASEADDR + (index << 2), value); } - return pm_mmio_read((uint64_t)(GGS_BASEADDR + (index << 2)), value); + return ret_status; } /** @@ -461,12 +480,16 @@ static enum pm_ret_status pm_ioctl_read_ggs(uint32_t index, static enum pm_ret_status pm_ioctl_write_pggs(uint32_t index, uint32_t value) { + enum pm_ret_status ret_status = PM_RET_SUCCESS; + if (index >= PGGS_NUM_REGS) { - return PM_RET_ERROR_ARGS; + ret_status = PM_RET_ERROR_ARGS; + } else { + ret_status = pm_mmio_write((uint64_t)PGGS_BASEADDR + (index << 2), + 0xFFFFFFFFU, value); } - return pm_mmio_write((uint64_t)(PGGS_BASEADDR + (index << 2)), - 0xFFFFFFFFU, value); + return ret_status; } /** @@ -481,6 +504,7 @@ static enum pm_ret_status pm_ioctl_afi(uint32_t index, uint32_t value) { uint32_t mask; + enum pm_ret_status status = PM_RET_ERROR_ARGS; const uint32_t regarr[] = {0xFD360000U, 0xFD360014U, 0xFD370000U, @@ -499,17 +523,16 @@ static enum pm_ret_status pm_ioctl_afi(uint32_t index, 0xFF419000U, }; - if (index >= ARRAY_SIZE(regarr)) { - return PM_RET_ERROR_ARGS; + if (index < ARRAY_SIZE(regarr)) { + if (index <= AFIFM6_WRCTRL) { + mask = FABRIC_WIDTH; + } else { + mask = 0xf00; + } + status = pm_mmio_write(regarr[index], mask, value); } - if (index <= AFIFM6_WRCTRL) { - mask = FABRIC_WIDTH; - } else { - mask = 0xf00; - } - - return pm_mmio_write(regarr[index], mask, value); + return status; } /** @@ -526,11 +549,15 @@ static enum pm_ret_status pm_ioctl_afi(uint32_t index, static enum pm_ret_status pm_ioctl_read_pggs(uint32_t index, uint32_t *value) { + enum pm_ret_status status = 0; + if (index >= PGGS_NUM_REGS) { - return PM_RET_ERROR_ARGS; + status = PM_RET_ERROR_ARGS; + } else { + status = pm_mmio_read((uint64_t)PGGS_BASEADDR + (index << 2), value); } - return pm_mmio_read((uint64_t)(PGGS_BASEADDR + (index << 2)), value); + return status; } /** @@ -548,7 +575,7 @@ static enum pm_ret_status pm_ioctl_ulpi_reset(void) ret = pm_mmio_write(CRL_APB_BOOT_PIN_CTRL, CRL_APB_BOOT_PIN_MASK, ZYNQMP_ULPI_RESET_VAL_HIGH); if (ret != PM_RET_SUCCESS) { - return ret; + goto exit_label; } /* Drive ULPI assert for atleast 1ms */ @@ -557,7 +584,7 @@ static enum pm_ret_status pm_ioctl_ulpi_reset(void) ret = pm_mmio_write(CRL_APB_BOOT_PIN_CTRL, CRL_APB_BOOT_PIN_MASK, ZYNQMP_ULPI_RESET_VAL_LOW); if (ret != PM_RET_SUCCESS) { - return ret; + goto exit_label; } /* Drive ULPI de-assert for atleast 1ms */ @@ -566,6 +593,7 @@ static enum pm_ret_status pm_ioctl_ulpi_reset(void) ret = pm_mmio_write(CRL_APB_BOOT_PIN_CTRL, CRL_APB_BOOT_PIN_MASK, ZYNQMP_ULPI_RESET_VAL_HIGH); +exit_label: return ret; } @@ -703,12 +731,13 @@ enum pm_ret_status tfa_ioctl_bitmask(uint32_t *bit_mask) IOCTL_AFI, }; uint8_t i, ioctl_id; - enum pm_ret_status ret; + enum pm_ret_status ret = PM_RET_SUCCESS; for (i = 0U; i < ARRAY_SIZE(supported_ids); i++) { ioctl_id = supported_ids[i]; if (ioctl_id >= 64U) { - return PM_RET_ERROR_NOTSUPPORTED; + ret = PM_RET_ERROR_NOTSUPPORTED; + break; } ret = check_api_dependency(ioctl_id); if (ret == PM_RET_SUCCESS) { @@ -716,5 +745,5 @@ enum pm_ret_status tfa_ioctl_bitmask(uint32_t *bit_mask) } } - return PM_RET_SUCCESS; + return ret; } diff --git a/plat/xilinx/zynqmp/pm_service/pm_api_pinctrl.c b/plat/xilinx/zynqmp/pm_service/pm_api_pinctrl.c index 763d9fa41..5ffd9efa4 100644 --- a/plat/xilinx/zynqmp/pm_service/pm_api_pinctrl.c +++ b/plat/xilinx/zynqmp/pm_service/pm_api_pinctrl.c @@ -1991,13 +1991,16 @@ enum pm_ret_status pm_api_pinctrl_get_num_functions(uint32_t *nfuncs) enum pm_ret_status pm_api_pinctrl_get_num_func_groups(uint32_t fid, uint32_t *ngroups) { + enum pm_ret_status status = PM_RET_SUCCESS; + if (fid >= (uint32_t)MAX_FUNCTION) { - return PM_RET_ERROR_ARGS; + status = PM_RET_ERROR_ARGS; + } else { + + *ngroups = pinctrl_functions[fid].group_size; } - *ngroups = pinctrl_functions[fid].group_size; - - return PM_RET_SUCCESS; + return status; } /** @@ -2044,9 +2047,11 @@ enum pm_ret_status pm_api_pinctrl_get_function_groups(uint32_t fid, uint16_t grps; uint16_t end_of_grp_offset; uint16_t i; + enum pm_ret_status status = PM_RET_SUCCESS; if (fid >= (uint32_t)MAX_FUNCTION) { - return PM_RET_ERROR_ARGS; + status = PM_RET_ERROR_ARGS; + goto exit_label; } (void)memset(groups, END_OF_GROUPS, GROUPS_PAYLOAD_LEN); @@ -2061,7 +2066,8 @@ enum pm_ret_status pm_api_pinctrl_get_function_groups(uint32_t fid, groups[i] = (uint16_t)(grps + index + i); } - return PM_RET_SUCCESS; +exit_label: + return status; } /** @@ -2089,22 +2095,26 @@ enum pm_ret_status pm_api_pinctrl_get_pin_groups(uint32_t pin, { uint32_t i; const uint16_t *grps; + enum pm_ret_status status = PM_RET_SUCCESS; if (pin >= (uint32_t)MAX_PIN) { - return PM_RET_ERROR_ARGS; + status = PM_RET_ERROR_ARGS; + goto exit_label; } (void)memset(groups, END_OF_GROUPS, GROUPS_PAYLOAD_LEN); grps = *zynqmp_pin_groups[pin].groups; if (grps == NULL) { - return PM_RET_SUCCESS; + status = PM_RET_SUCCESS; + goto exit_label; } /* Skip groups till index */ for (i = 0; i < index; i++) { if (grps[i] == (uint16_t)END_OF_GROUPS) { - return PM_RET_SUCCESS; + status = PM_RET_SUCCESS; + goto exit_label; } } @@ -2115,5 +2125,6 @@ enum pm_ret_status pm_api_pinctrl_get_pin_groups(uint32_t pin, } } - return PM_RET_SUCCESS; +exit_label: + return status; } diff --git a/plat/xilinx/zynqmp/pm_service/pm_client.c b/plat/xilinx/zynqmp/pm_service/pm_client.c index 9882e3016..cadde9abc 100644 --- a/plat/xilinx/zynqmp/pm_service/pm_client.c +++ b/plat/xilinx/zynqmp/pm_service/pm_client.c @@ -238,11 +238,13 @@ static void pm_client_set_wakeup_sources(void) */ const struct pm_proc *pm_get_proc(uint32_t cpuid) { + const struct pm_proc *ret = NULL; + if (cpuid < ARRAY_SIZE(pm_procs_all)) { - return &pm_procs_all[cpuid]; + ret = &pm_procs_all[cpuid]; } - return NULL; + return ret; } /** @@ -254,12 +256,16 @@ const struct pm_proc *pm_get_proc(uint32_t cpuid) */ static uint32_t pm_get_cpuid(enum pm_node_id nid) { + uint32_t ret = UNDEFINED_CPUID; + for (size_t i = 0; i < ARRAY_SIZE(pm_procs_all); i++) { if (pm_procs_all[i].node_id == nid) { - return i; + ret = i; + break; } } - return UNDEFINED_CPUID; + + return ret; } const struct pm_proc *primary_proc = &pm_procs_all[0]; @@ -321,28 +327,30 @@ void pm_client_abort_suspend(void) void pm_client_wakeup(const struct pm_proc *proc) { uint32_t cpuid = pm_get_cpuid(proc->node_id); + uint32_t val; - if (cpuid == UNDEFINED_CPUID) { - return; + if (cpuid != UNDEFINED_CPUID) { + bakery_lock_get(&pm_client_secure_lock); + + /* clear powerdown bit for affected cpu */ + val = mmio_read_32(APU_PWRCTL); + + val &= ~(proc->pwrdn_mask); + mmio_write_32(APU_PWRCTL, val); + + bakery_lock_release(&pm_client_secure_lock); } - - bakery_lock_get(&pm_client_secure_lock); - - /* clear powerdown bit for affected cpu */ - uint32_t val = mmio_read_32(APU_PWRCTL); - val &= ~(proc->pwrdn_mask); - mmio_write_32(APU_PWRCTL, val); - - bakery_lock_release(&pm_client_secure_lock); } enum pm_ret_status pm_set_suspend_mode(uint32_t mode) { - if ((mode != PM_SUSPEND_MODE_STD) && - (mode != PM_SUSPEND_MODE_POWER_OFF)) { - return PM_RET_ERROR_ARGS; + enum pm_ret_status suspend_mode_status = PM_RET_ERROR_ARGS; + + if ((mode == PM_SUSPEND_MODE_STD) || + (mode == PM_SUSPEND_MODE_POWER_OFF)) { + suspend_mode = mode; + suspend_mode_status = PM_RET_SUCCESS; } - suspend_mode = mode; - return PM_RET_SUCCESS; + return suspend_mode_status; } diff --git a/plat/xilinx/zynqmp/pm_service/zynqmp_pm_api_sys.c b/plat/xilinx/zynqmp/pm_service/zynqmp_pm_api_sys.c index 215bf30f5..81f681f2e 100644 --- a/plat/xilinx/zynqmp/pm_service/zynqmp_pm_api_sys.c +++ b/plat/xilinx/zynqmp/pm_service/zynqmp_pm_api_sys.c @@ -305,14 +305,17 @@ enum pm_ret_status pm_req_suspend(enum pm_node_id target, uint32_t latency, uint32_t state) { uint32_t payload[PAYLOAD_ARG_CNT]; + enum pm_ret_status ret = PM_RET_SUCCESS; /* Send request to the PMU */ PM_PACK_PAYLOAD5(payload, PM_REQ_SUSPEND, target, ack, latency, state); if (ack == REQ_ACK_BLOCKING) { - return pm_ipi_send_sync(primary_proc, payload, NULL, 0); + ret = pm_ipi_send_sync(primary_proc, payload, NULL, 0); } else { - return pm_ipi_send(primary_proc, payload); + ret = pm_ipi_send(primary_proc, payload); } + + return ret; } /** @@ -339,7 +342,7 @@ enum pm_ret_status pm_req_wakeup(enum pm_node_id target, { uint32_t payload[PAYLOAD_ARG_CNT]; uint64_t encoded_address; - + enum pm_ret_status ret = PM_RET_SUCCESS; /* encode set Address into 1st bit of address */ encoded_address = address; @@ -350,10 +353,12 @@ enum pm_ret_status pm_req_wakeup(enum pm_node_id target, encoded_address >> 32, ack); if (ack == REQ_ACK_BLOCKING) { - return pm_ipi_send_sync(primary_proc, payload, NULL, 0); + ret = pm_ipi_send_sync(primary_proc, payload, NULL, 0); } else { - return pm_ipi_send(primary_proc, payload); + ret = pm_ipi_send(primary_proc, payload); } + + return ret; } /** @@ -369,15 +374,18 @@ enum pm_ret_status pm_force_powerdown(enum pm_node_id target, enum pm_request_ack ack) { uint32_t payload[PAYLOAD_ARG_CNT]; + enum pm_ret_status ret = PM_RET_SUCCESS; /* Send request to the PMU */ PM_PACK_PAYLOAD3(payload, PM_FORCE_POWERDOWN, target, ack); if (ack == REQ_ACK_BLOCKING) { - return pm_ipi_send_sync(primary_proc, payload, NULL, 0); + ret = pm_ipi_send_sync(primary_proc, payload, NULL, 0); } else { - return pm_ipi_send(primary_proc, payload); + ret = pm_ipi_send(primary_proc, payload); } + + return ret; } /** @@ -439,15 +447,17 @@ enum pm_ret_status pm_set_wakeup_source(enum pm_node_id target, enum pm_ret_status pm_system_shutdown(uint32_t type, uint32_t subtype) { uint32_t payload[PAYLOAD_ARG_CNT]; + enum pm_ret_status ret = PM_RET_SUCCESS; if (type == (uint32_t)PMF_SHUTDOWN_TYPE_SETSCOPE_ONLY) { /* Setting scope for subsequent PSCI reboot or shutdown */ pm_shutdown_scope = subtype; - return PM_RET_SUCCESS; + } else { + PM_PACK_PAYLOAD3(payload, PM_SYSTEM_SHUTDOWN, type, subtype); + ret = pm_ipi_send_non_blocking(primary_proc, payload); } - PM_PACK_PAYLOAD3(payload, PM_SYSTEM_SHUTDOWN, type, subtype); - return pm_ipi_send_non_blocking(primary_proc, payload); + return ret; } /* APIs for managing PM slaves: */ @@ -468,14 +478,17 @@ enum pm_ret_status pm_req_node(enum pm_node_id nid, enum pm_request_ack ack) { uint32_t payload[PAYLOAD_ARG_CNT]; + enum pm_ret_status ret = PM_RET_SUCCESS; PM_PACK_PAYLOAD5(payload, PM_REQ_NODE, nid, capabilities, qos, ack); if (ack == REQ_ACK_BLOCKING) { - return pm_ipi_send_sync(primary_proc, payload, NULL, 0); + ret = pm_ipi_send_sync(primary_proc, payload, NULL, 0); } else { - return pm_ipi_send(primary_proc, payload); + ret = pm_ipi_send(primary_proc, payload); } + + return ret; } /** @@ -496,15 +509,18 @@ enum pm_ret_status pm_set_requirement(enum pm_node_id nid, enum pm_request_ack ack) { uint32_t payload[PAYLOAD_ARG_CNT]; + enum pm_ret_status ret = PM_RET_SUCCESS; PM_PACK_PAYLOAD5(payload, PM_SET_REQUIREMENT, nid, capabilities, qos, ack); if (ack == REQ_ACK_BLOCKING) { - return pm_ipi_send_sync(primary_proc, payload, NULL, 0); + ret = pm_ipi_send_sync(primary_proc, payload, NULL, 0); } else { - return pm_ipi_send(primary_proc, payload); + ret = pm_ipi_send(primary_proc, payload); } + + return ret; } /* Miscellaneous API functions */ @@ -709,13 +725,16 @@ enum pm_ret_status pm_aes_engine(uint32_t address_high, enum pm_ret_status pm_get_callbackdata(uint32_t *data, size_t count) { enum pm_ret_status ret = PM_RET_SUCCESS; + /* Return if interrupt is not from PMU */ if ((pm_ipi_irq_status(primary_proc) == 0U)) { - return ret; + goto exit_label; } ret = pm_ipi_buff_read_callb(data, count); pm_ipi_irq_clear(primary_proc); + +exit_label: return ret; } @@ -770,7 +789,7 @@ enum pm_ret_status check_api_dependency(uint8_t id) { uint8_t i; uint32_t version_type; - enum pm_ret_status ret; + enum pm_ret_status ret = PM_RET_SUCCESS; for (i = 0U; i < ARRAY_SIZE(api_dep_table); i++) { if (api_dep_table[i].id == id) { @@ -780,18 +799,20 @@ enum pm_ret_status check_api_dependency(uint8_t id) ret = fw_api_version(api_dep_table[i].api_id, &version_type, 1); - if (ret != (uint32_t)PM_RET_SUCCESS) { - return ret; + if (ret != PM_RET_SUCCESS) { + goto exit_label; } /* Check if fw version matches TF-A expected version */ if (version_type != tfa_expected_ver_id[api_dep_table[i].api_id]) { - return PM_RET_ERROR_NOTSUPPORTED; + ret = PM_RET_ERROR_NOTSUPPORTED; + goto exit_label; } } } - return PM_RET_SUCCESS; +exit_label: + return ret; } /** @@ -806,20 +827,26 @@ enum pm_ret_status check_api_dependency(uint8_t id) static enum pm_ret_status feature_check_tfa(uint32_t api_id, uint32_t *version, uint32_t *bit_mask) { + enum pm_ret_status ret = PM_RET_ERROR_NO_FEATURE; + switch (api_id) { case PM_QUERY_DATA: *version = TFA_API_QUERY_DATA_VERSION; bit_mask[0] = (uint32_t)(PM_QUERY_FEATURE_BITMASK); bit_mask[1] = (uint32_t)(PM_QUERY_FEATURE_BITMASK >> 32); - return PM_RET_SUCCESS; + ret = PM_RET_SUCCESS; + break; case PM_GET_CALLBACK_DATA: case PM_GET_TRUSTZONE_VERSION: case PM_SET_SUSPEND_MODE: *version = TFA_API_BASE_VERSION; - return PM_RET_SUCCESS; + ret = PM_RET_SUCCESS; + break; default: - return PM_RET_ERROR_NO_FEATURE; + break; } + + return ret; } /** @@ -834,6 +861,8 @@ static enum pm_ret_status feature_check_tfa(uint32_t api_id, uint32_t *version, static enum pm_ret_status get_tfa_version_for_partial_apis(uint32_t api_id, uint32_t *version) { + enum pm_ret_status ret = PM_RET_ERROR_ARGS; + switch (api_id) { case PM_SELF_SUSPEND: case PM_REQ_WAKEUP: @@ -854,13 +883,17 @@ static enum pm_ret_status get_tfa_version_for_partial_apis(uint32_t api_id, case PM_PLL_GET_MODE: case PM_REGISTER_ACCESS: *version = TFA_API_BASE_VERSION; - return PM_RET_SUCCESS; + ret = PM_RET_SUCCESS; + break; case PM_FEATURE_CHECK: *version = FW_API_VERSION_2; - return PM_RET_SUCCESS; + ret = PM_RET_SUCCESS; + break; default: - return PM_RET_ERROR_ARGS; + break; } + + return ret; } /** @@ -876,6 +909,7 @@ static enum pm_ret_status feature_check_partial(uint32_t api_id, uint32_t *version) { uint32_t status; + uint32_t ret = PM_RET_ERROR_NO_FEATURE; switch (api_id) { case PM_SELF_SUSPEND: @@ -898,13 +932,17 @@ static enum pm_ret_status feature_check_partial(uint32_t api_id, case PM_REGISTER_ACCESS: case PM_FEATURE_CHECK: status = check_api_dependency(api_id); - if (status != (uint32_t)PM_RET_SUCCESS) { - return status; + if (status != PM_RET_SUCCESS) { + ret = status; + } else { + ret = get_tfa_version_for_partial_apis(api_id, version); } - return get_tfa_version_for_partial_apis(api_id, version); + break; default: - return PM_RET_ERROR_NO_FEATURE; + break; } + + return ret; } /** @@ -921,18 +959,18 @@ enum pm_ret_status pm_feature_check(uint32_t api_id, uint32_t *version, uint32_t *bit_mask, uint8_t len) { uint32_t ret_payload[RET_PAYLOAD_ARG_CNT] = {0U}; - uint32_t status; + enum pm_ret_status status; /* Get API version implemented in TF-A */ status = feature_check_tfa(api_id, version, bit_mask); - if (status != (uint32_t)PM_RET_ERROR_NO_FEATURE) { - return status; + if (status != PM_RET_ERROR_NO_FEATURE) { + goto exit_label; } /* Get API version implemented by firmware and TF-A both */ status = feature_check_partial(api_id, version); - if (status != (uint32_t)PM_RET_ERROR_NO_FEATURE) { - return status; + if (status != PM_RET_ERROR_NO_FEATURE) { + goto exit_label; } /* Get API version implemented by firmware */ @@ -941,7 +979,7 @@ enum pm_ret_status pm_feature_check(uint32_t api_id, uint32_t *version, * firmware but implemented in TF-A */ if ((api_id != (uint32_t)PM_IOCTL) && (status != PM_RET_SUCCESS)) { - return status; + goto exit_label; } *version = ret_payload[0]; @@ -949,7 +987,8 @@ enum pm_ret_status pm_feature_check(uint32_t api_id, uint32_t *version, /* Update IOCTL bit mask which are implemented in TF-A */ if ((api_id == (uint32_t)PM_IOCTL) || (api_id == (uint32_t)PM_GET_OP_CHARACTERISTIC)) { if (len < 2U) { - return PM_RET_ERROR_ARGS; + status = PM_RET_ERROR_ARGS; + goto exit_label; } bit_mask[0] = ret_payload[1]; bit_mask[1] = ret_payload[2]; @@ -961,6 +1000,7 @@ enum pm_ret_status pm_feature_check(uint32_t api_id, uint32_t *version, /* Requires for MISRA */ } +exit_label: return status; } @@ -1112,7 +1152,7 @@ static enum pm_ret_status pm_clock_gate(uint32_t clock_id, /* Check if clock ID is valid and return an error if it is not */ status = pm_clock_id_is_valid(clock_id); if (status != PM_RET_SUCCESS) { - return status; + goto exit_label; } if (enable != 0U) { @@ -1130,6 +1170,7 @@ static enum pm_ret_status pm_clock_gate(uint32_t clock_id, status = PM_RET_SUCCESS; } +exit_label: return status; } @@ -1147,15 +1188,19 @@ static enum pm_ret_status pm_clock_gate(uint32_t clock_id, enum pm_ret_status pm_clock_enable(uint32_t clock_id) { struct pm_pll *pll; + enum pm_ret_status ret = PM_RET_SUCCESS; /* First try to handle it as a PLL */ pll = pm_clock_get_pll(clock_id); if (pll != NULL) { - return pm_clock_pll_enable(pll); + ret = pm_clock_pll_enable(pll); + } else { + + /* It's an on-chip clock, PMU should configure clock's gate */ + ret = pm_clock_gate(clock_id, 1); } - /* It's an on-chip clock, PMU should configure clock's gate */ - return pm_clock_gate(clock_id, 1); + return ret; } /** @@ -1172,15 +1217,19 @@ enum pm_ret_status pm_clock_enable(uint32_t clock_id) enum pm_ret_status pm_clock_disable(uint32_t clock_id) { struct pm_pll *pll; + enum pm_ret_status ret = PM_RET_SUCCESS; /* First try to handle it as a PLL */ pll = pm_clock_get_pll(clock_id); if (pll != NULL) { - return pm_clock_pll_disable(pll); + ret = pm_clock_pll_disable(pll); + } else { + + /* It's an on-chip clock, PMU should configure clock's gate */ + ret = pm_clock_gate(clock_id, 0); } - /* It's an on-chip clock, PMU should configure clock's gate */ - return pm_clock_gate(clock_id, 0); + return ret; } /** @@ -1204,17 +1253,21 @@ enum pm_ret_status pm_clock_getstate(uint32_t clock_id, /* First try to handle it as a PLL */ pll = pm_clock_get_pll(clock_id); if (pll != NULL) { - return pm_clock_pll_get_state(pll, state); + status = pm_clock_pll_get_state(pll, state); + goto exit_label; } /* Check if clock ID is a valid on-chip clock */ status = pm_clock_id_is_valid(clock_id); if (status != PM_RET_SUCCESS) { - return status; + goto exit_label; } /* Send request to the PMU */ PM_PACK_PAYLOAD2(payload, PM_CLOCK_GETSTATE, clock_id); - return pm_ipi_send_sync(primary_proc, payload, state, 1); + status = pm_ipi_send_sync(primary_proc, payload, state, 1); + +exit_label: + return status; } /** @@ -1242,13 +1295,14 @@ enum pm_ret_status pm_clock_setdivider(uint32_t clock_id, /* Get PLL node ID using PLL clock ID */ status = pm_clock_get_pll_node_id(clock_id, &nid); if (status == PM_RET_SUCCESS) { - return pm_pll_set_parameter(nid, PM_PLL_PARAM_FBDIV, divider); + status = pm_pll_set_parameter(nid, PM_PLL_PARAM_FBDIV, divider); + goto exit_label; } /* Check if clock ID is a valid on-chip clock */ status = pm_clock_id_is_valid(clock_id); if (status != PM_RET_SUCCESS) { - return status; + goto exit_label; } if (div0 == (divider & div0)) { @@ -1258,12 +1312,16 @@ enum pm_ret_status pm_clock_setdivider(uint32_t clock_id, div_id = PM_CLOCK_DIV1_ID; val = (divider & ~div1) >> 16; } else { - return PM_RET_ERROR_ARGS; + status = PM_RET_ERROR_ARGS; + goto exit_label; } /* Send request to the PMU */ PM_PACK_PAYLOAD4(payload, PM_CLOCK_SETDIVIDER, clock_id, div_id, val); - return pm_ipi_send_sync(primary_proc, payload, NULL, 0); + status = pm_ipi_send_sync(primary_proc, payload, NULL, 0); + +exit_label: + return status; } /** @@ -1280,7 +1338,7 @@ enum pm_ret_status pm_clock_setdivider(uint32_t clock_id, enum pm_ret_status pm_clock_getdivider(uint32_t clock_id, uint32_t *divider) { - enum pm_ret_status status; + enum pm_ret_status status = PM_RET_SUCCESS; enum pm_node_id nid; uint32_t payload[PAYLOAD_ARG_CNT]; uint32_t val; @@ -1288,22 +1346,23 @@ enum pm_ret_status pm_clock_getdivider(uint32_t clock_id, /* Get PLL node ID using PLL clock ID */ status = pm_clock_get_pll_node_id(clock_id, &nid); if (status == PM_RET_SUCCESS) { - return pm_pll_get_parameter(nid, PM_PLL_PARAM_FBDIV, divider); + status = pm_pll_get_parameter(nid, PM_PLL_PARAM_FBDIV, divider); + goto exit_label; } /* Check if clock ID is a valid on-chip clock */ status = pm_clock_id_is_valid(clock_id); if (status != PM_RET_SUCCESS) { - return status; + goto exit_label; } if ((pm_clock_has_div(clock_id, PM_CLOCK_DIV0_ID)) != 0U) { /* Send request to the PMU to get div0 */ PM_PACK_PAYLOAD3(payload, PM_CLOCK_GETDIVIDER, clock_id, - PM_CLOCK_DIV0_ID); + PM_CLOCK_DIV0_ID); status = pm_ipi_send_sync(primary_proc, payload, &val, 1); if (status != PM_RET_SUCCESS) { - return status; + goto exit_label; } *divider = val; } @@ -1311,14 +1370,14 @@ enum pm_ret_status pm_clock_getdivider(uint32_t clock_id, if ((pm_clock_has_div(clock_id, PM_CLOCK_DIV1_ID)) != 0U) { /* Send request to the PMU to get div1 */ PM_PACK_PAYLOAD3(payload, PM_CLOCK_GETDIVIDER, clock_id, - PM_CLOCK_DIV1_ID); + PM_CLOCK_DIV1_ID); status = pm_ipi_send_sync(primary_proc, payload, &val, 1); if (status != PM_RET_SUCCESS) { - return status; + goto exit_label; } *divider |= val << 16; } - +exit_label: return status; } @@ -1342,18 +1401,22 @@ enum pm_ret_status pm_clock_setparent(uint32_t clock_id, /* First try to handle it as a PLL */ pll = pm_clock_get_pll_by_related_clk(clock_id); if (pll != NULL) { - return pm_clock_pll_set_parent(pll, clock_id, parent_index); + status = pm_clock_pll_set_parent(pll, clock_id, parent_index); + goto exit_label; } /* Check if clock ID is a valid on-chip clock */ status = pm_clock_id_is_valid(clock_id); if (status != PM_RET_SUCCESS) { - return status; + goto exit_label; } /* Send request to the PMU */ PM_PACK_PAYLOAD3(payload, PM_CLOCK_SETPARENT, clock_id, parent_index); - return pm_ipi_send_sync(primary_proc, payload, NULL, 0); + status = pm_ipi_send_sync(primary_proc, payload, NULL, 0); + +exit_label: + return status; } /** @@ -1377,18 +1440,22 @@ enum pm_ret_status pm_clock_getparent(uint32_t clock_id, /* First try to handle it as a PLL */ pll = pm_clock_get_pll_by_related_clk(clock_id); if (pll != NULL) { - return pm_clock_pll_get_parent(pll, clock_id, parent_index); + status = pm_clock_pll_get_parent(pll, clock_id, parent_index); + goto exit_label; } /* Check if clock ID is a valid on-chip clock */ status = pm_clock_id_is_valid(clock_id); if (status != PM_RET_SUCCESS) { - return status; + goto exit_label; } /* Send request to the PMU */ PM_PACK_PAYLOAD2(payload, PM_CLOCK_GETPARENT, clock_id); - return pm_ipi_send_sync(primary_proc, payload, parent_index, 1); + status = pm_ipi_send_sync(primary_proc, payload, parent_index, 1); + +exit_label: + return status; } /** @@ -1655,20 +1722,26 @@ enum pm_ret_status pm_pll_set_parameter(enum pm_node_id nid, uint32_t value) { uint32_t payload[PAYLOAD_ARG_CNT]; + enum pm_ret_status ret = 0; /* Check if given node ID is a PLL node */ if ((nid < NODE_APLL) || (nid > NODE_IOPLL)) { - return PM_RET_ERROR_ARGS; + ret = PM_RET_ERROR_ARGS; + goto exit_label; } /* Check if parameter ID is valid and return an error if it's not */ if (param_id >= PM_PLL_PARAM_MAX) { - return PM_RET_ERROR_ARGS; + ret = PM_RET_ERROR_ARGS; + goto exit_label; } /* Send request to the PMU */ PM_PACK_PAYLOAD4(payload, PM_PLL_SET_PARAMETER, nid, param_id, value); - return pm_ipi_send_sync(primary_proc, payload, NULL, 0); + ret = pm_ipi_send_sync(primary_proc, payload, NULL, 0); + +exit_label: + return ret; } /** @@ -1686,20 +1759,26 @@ enum pm_ret_status pm_pll_get_parameter(enum pm_node_id nid, uint32_t *value) { uint32_t payload[PAYLOAD_ARG_CNT]; + enum pm_ret_status ret = PM_RET_SUCCESS; /* Check if given node ID is a PLL node */ if ((nid < NODE_APLL) || (nid > NODE_IOPLL)) { - return PM_RET_ERROR_ARGS; + ret = PM_RET_ERROR_ARGS; + goto exit_label; } /* Check if parameter ID is valid and return an error if it's not */ if (param_id >= PM_PLL_PARAM_MAX) { - return PM_RET_ERROR_ARGS; + ret = PM_RET_ERROR_ARGS; + goto exit_label; } /* Send request to the PMU */ PM_PACK_PAYLOAD3(payload, PM_PLL_GET_PARAMETER, nid, param_id); - return pm_ipi_send_sync(primary_proc, payload, value, 1); + ret = pm_ipi_send_sync(primary_proc, payload, value, 1); + +exit_label: + return ret; } /** @@ -1719,20 +1798,26 @@ enum pm_ret_status pm_pll_get_parameter(enum pm_node_id nid, enum pm_ret_status pm_pll_set_mode(enum pm_node_id nid, enum pm_pll_mode mode) { uint32_t payload[PAYLOAD_ARG_CNT]; + enum pm_ret_status ret = PM_RET_SUCCESS; /* Check if given node ID is a PLL node */ if ((nid < NODE_APLL) || (nid > NODE_IOPLL)) { - return PM_RET_ERROR_ARGS; + ret = PM_RET_ERROR_ARGS; + goto exit_label; } /* Check if PLL mode is valid */ if (mode >= PM_PLL_MODE_MAX) { - return PM_RET_ERROR_ARGS; + ret = PM_RET_ERROR_ARGS; + goto exit_label; } /* Send request to the PMU */ PM_PACK_PAYLOAD3(payload, PM_PLL_SET_MODE, nid, mode); - return pm_ipi_send_sync(primary_proc, payload, NULL, 0); + ret = pm_ipi_send_sync(primary_proc, payload, NULL, 0); + +exit_label: + return ret; } /** @@ -1747,15 +1832,18 @@ enum pm_ret_status pm_pll_set_mode(enum pm_node_id nid, enum pm_pll_mode mode) enum pm_ret_status pm_pll_get_mode(enum pm_node_id nid, enum pm_pll_mode *mode) { uint32_t payload[PAYLOAD_ARG_CNT]; + enum pm_ret_status ret = PM_RET_SUCCESS; /* Check if given node ID is a PLL node */ if ((nid < NODE_APLL) || (nid > NODE_IOPLL)) { - return PM_RET_ERROR_ARGS; + ret = PM_RET_ERROR_ARGS; + } else { + /* Send request to the PMU */ + PM_PACK_PAYLOAD2(payload, PM_PLL_GET_MODE, nid); + ret = pm_ipi_send_sync(primary_proc, payload, mode, 1); } - /* Send request to the PMU */ - PM_PACK_PAYLOAD2(payload, PM_PLL_GET_MODE, nid); - return pm_ipi_send_sync(primary_proc, payload, mode, 1); + return ret; } /** @@ -1783,7 +1871,8 @@ enum pm_ret_status pm_register_access(uint32_t register_access_id, ((CSUDMA_BASE & address) != CSUDMA_BASE) && ((RSA_CORE_BASE & address) != RSA_CORE_BASE) && ((PMU_GLOBAL_BASE & address) != PMU_GLOBAL_BASE)) { - return PM_RET_ERROR_ACCESS; + ret = PM_RET_ERROR_ACCESS; + goto exit_label; } switch (register_access_id) { @@ -1798,6 +1887,8 @@ enum pm_ret_status pm_register_access(uint32_t register_access_id, WARN("Unimplemented register_access call\n\r"); break; } + +exit_label: return ret; } diff --git a/plat/xilinx/zynqmp/pm_service/zynqmp_pm_svc_main.c b/plat/xilinx/zynqmp/pm_service/zynqmp_pm_svc_main.c index 21435c62a..81db58b76 100644 --- a/plat/xilinx/zynqmp/pm_service/zynqmp_pm_svc_main.c +++ b/plat/xilinx/zynqmp/pm_service/zynqmp_pm_svc_main.c @@ -220,6 +220,7 @@ err: int32_t pm_setup(void) { enum pm_ret_status err; + int32_t ret = -EINVAL; pm_ipi_init(primary_proc); @@ -227,17 +228,17 @@ int32_t pm_setup(void) if (err != PM_RET_SUCCESS) { ERROR("BL31: Failed to read Platform Management API version. " "Return: %d\n", err); - return -EINVAL; + goto exit_label; } if (pm_ctx.api_version < PM_VERSION) { ERROR("BL31: Platform Management API version error. Expected: " "v%d.%d - Found: v%d.%d\n", PM_VERSION_MAJOR, PM_VERSION_MINOR, pm_ctx.api_version >> 16, pm_ctx.api_version & 0xFFFFU); - return -EINVAL; + goto exit_label; } - int32_t status = 0, ret = 0; + int32_t status = 0; #if ZYNQMP_WDT_RESTART status = pm_wdt_restart_setup(); if (status) @@ -255,6 +256,7 @@ int32_t pm_setup(void) pm_up = (status == 0); +exit_label: return ret; }