diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm.c b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm.c
new file mode 100644
index 000000000..4537c8800
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm.c
@@ -0,0 +1,998 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+#include <stdint.h>
+
+#include <drivers/delay_timer.h>
+#include <lib/spinlock.h>
+#include <plat/common/platform.h>
+#include <platform_def.h>
+
+#include <lib/mtk_init/mtk_init.h>
+#include <lib/pm/mtk_pm.h>
+#include <lpm/mt_lp_rm.h>
+#include "mt_cpu_pm.h"
+#include "mt_cpu_pm_cpc.h"
+#include "mt_cpu_pm_mbox.h"
+#include "mt_cpu_pm_smc.h"
+#include "mt_lp_irqremain.h"
+#include "mt_ppu.h"
+#include "mt_smp.h"
+#include <mtk_mmap_pool.h>
+#include <pwr_topology.h>
+
+/*
+ * The locker must use the bakery locker when cache turn off.
+ * Using spin_lock will has better performance.
+ */
+#ifdef MT_CPU_PM_USING_BAKERY_LOCK
+DEFINE_BAKERY_LOCK(mt_cpu_pm_lock);
+#define plat_cpu_pm_lock_init() bakery_lock_init(&mt_cpu_pm_lock)
+#define plat_cpu_pm_lock() bakery_lock_get(&mt_cpu_pm_lock)
+#define plat_cpu_pm_unlock() bakery_lock_release(&mt_cpu_pm_lock)
+#else
+spinlock_t mt_cpu_pm_lock;
+#define plat_cpu_pm_lock_init()
+#define plat_cpu_pm_lock() spin_lock(&mt_cpu_pm_lock)
+#define plat_cpu_pm_unlock() spin_unlock(&mt_cpu_pm_lock)
+#endif /* MT_CPU_PM_USING_BAKERY_LOCK */
+
+#define cpu_pm_unlikely(x) __builtin_expect(!!(x), 0)
+
+enum mt_pwr_node {
+	MT_PWR_SYSTEM_MCUSYS = 0,
+	MT_PWR_SYSTEM_VCORE,
+	MT_PWR_MAX
+};
+
+#define CPU_PM_DEPD_MASK		0x0000000F
+#define CPU_PM_DEPD_INIT		BIT(0)
+#define CPU_PM_DEPD_READY		BIT(1)
+#define CPU_PM_PLAT_READY		BIT(2)
+
+#define CPU_PM_AFFLV_CLUSTER_ABORT	BIT(0)
+#define CPU_PM_AFFLV_MCUSYS_ABORT	BIT(4)
+
+enum cpupm_pwr_req_def {
+	CPUPM_PWR_REQ_CLUSTER,
+	CPUPM_PWR_REQ_MCUSYS,
+	CPUPM_PWR_REQ_MAX
+};
+
+#ifdef CPU_PM_TINYSYS_SUPPORT
+#define CPU_PM_LP_READY		(CPU_PM_DEPD_INIT | \
+				 CPU_PM_DEPD_READY | \
+				 CPU_PM_PLAT_READY)
+#else
+#define CPU_PM_LP_READY		(CPU_PM_PLAT_READY)
+#endif /* CPU_PM_TINYSYS_SUPPORT */
+
+#if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN && \
+	!CPU_PM_DOMAIN_CORE_ONLY
+static int mt_pwr_nodes[MT_PWR_MAX];
+static int plat_mt_lp_cpu_rc;
+static struct mt_cpu_pm_record cpu_pm_record;
+static uint64_t suspend_abort_reason;
+#endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN &&
+	* !CPU_PM_DOMAIN_CORE_ONLY
+	*/
+
+static struct mtk_plat_dev_config plat_dev;
+
+#if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
+#define CPUPM_ARCH_TIME_MS(_ms)	((_ms) * 1000 * SYS_COUNTER_FREQ_IN_MHZ)
+#define CPUPM_BOOTUP_TIME_THR	CPUPM_ARCH_TIME_MS(CPUPM_READY_MS)
+
+static unsigned int cpu_pm_status;
+
+#ifdef CPU_PM_PWR_REQ
+unsigned int cpupm_pwr_reqs[CPUPM_PWR_REQ_MAX];
+#endif /* CPU_PM_PWR_REQ */
+
+#ifdef CPU_PM_SUSPEND_NOTIFY
+#define IS_CPU_SUPEND_SAVE(__cid) (cpu_stage[__cid].cpu_status & \
+			   (PER_CPU_STATUS_S2IDLE | PER_CPU_STATUS_HOTPLUG))
+
+/* make sure all available cores have passed by s2idle flow in kernel */
+#define IS_PLAT_ALL_ONLINE_CORES_S2IDLE(__st) ({ \
+	int _res = 0; \
+	if (cpu_pm_unlikely(cpu_stage[__st->info.cpuid].cpu_status \
+			& PER_CPU_STATUS_S2IDLE)) { \
+		unsigned int i;\
+		for (i = 0, _res = 1; i < PLATFORM_CORE_COUNT; ++i) \
+			if (!IS_CPU_SUPEND_SAVE(i)) { \
+				_res = 0; \
+				break; \
+			} \
+	} _res; })
+
+#else
+#define IS_PLAT_ALL_ONLINE_CORES_S2IDLE(__st) \
+	IS_PLAT_SUSPEND_ID(__st->pwr.state_id)
+
+#endif /* CPU_PM_SUSPEND_NOTIFY */
+
+#endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN */
+
+#ifdef CPU_PM_SUSPEND_NOTIFY
+static struct per_cpu_stage cpu_stage[PLATFORM_CORE_COUNT];
+#endif /* CPU_PM_SUSPEND_NOTIFY */
+
+#if CONFIG_MTK_CPU_SUSPEND_EN || CONFIG_MTK_SMP_EN
+#if CONFIG_MTK_PM_SUPPORT
+static void cpupm_cluster_resume_common(void)
+{
+	struct cluster_pwr_ctrl cls_pwr_ctrl;
+
+	PER_CLUSTER_PWR_CTRL(cls_pwr_ctrl, 0);
+
+#ifndef CPU_PM_ACP_FSM
+	mt_smp_ppu_pwr_set(&cls_pwr_ctrl.pwr, PPU_PWPR_DYNAMIC_MODE,
+		(plat_dev.auto_off) ? PPU_PWPR_MEM_RET : PPU_PWPR_OFF);
+#endif /* CPU_PM_ACP_FSM */
+}
+
+#ifdef CONFIG_MTK_CPU_ILDO
+
+#define read_cpupwrctlr()		read_cpupwrctlr_el3()
+#define write_cpupwrctlr(_v)		write_cpupwrctlr_el3(_v)
+#define mt_cpu_retention_enable(_ret_delay) \
+	write_cpupwrctlr((read_cpupwrctlr() & \
+			 (~(CPUPWRCTLR_EL3_WFI_RET_MASK << \
+			 CPUPWRCTLR_EL3_WFI_RET_SHIFT))) | \
+			 ((_ret_delay & CPUPWRCTLR_EL3_WFI_RET_MASK) << \
+			 CPUPWRCTLR_EL3_WFI_RET_SHIFT))
+#define mt_cpu_retention_disable() \
+	write_cpupwrctlr(read_cpupwrctlr() & \
+			 (~(CPUPWRCTLR_EL3_WFI_RET_MASK << \
+			 CPUPWRCTLR_EL3_WFI_RET_SHIFT)))
+
+static unsigned int cpu_retention_enable[PLATFORM_CORE_COUNT];
+
+static void cpupm_cpu_retention_init(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < PLATFORM_CORE_COUNT; i++)
+		cpu_retention_enable[i] = 0;
+	INFO("[CPU_PM]: CPU_RET_MASK: 0x%x\n", CPU_PM_CPU_RET_MASK);
+}
+
+static void cpupm_cpu_retention_set(unsigned int ret_delay)
+{
+	mt_cpu_retention_enable(ret_delay);
+}
+
+static unsigned int cpupm_cpu_ildo_state_valid(unsigned int cpu)
+{
+	unsigned int timeout = 0, ret_sta_reg;
+
+	if (!cpu_retention_enable[cpu])
+		return CPU_PM_RET_SET_FAIL;
+
+	CPU_PM_ASSERT(cpu < PLATFORM_CORE_COUNT);
+	while (timeout < CPU_RET_TIMEOUT) {
+		if (mmio_read_32(CPU_EB_RET_STA_REG) & BIT(cpu)) {
+			cpupm_cpu_retention_set(cpu_retention_enable[cpu]);
+			return CPU_PM_RET_SET_SUCCESS;
+		}
+		udelay(1);
+		timeout++;
+	}
+
+	ERROR("[CPU_RET] wait brisket init timeout, sta:%x\n", ret_sta_reg);
+	return CPU_PM_RET_SET_FAIL;
+}
+
+unsigned int cpupu_get_cpu_retention_control(void)
+{
+	unsigned int i, ret = 0;
+
+	for (i = 0; i < PLATFORM_CORE_COUNT; i++)
+		ret |= cpu_retention_enable[i];
+	return ret;
+}
+
+unsigned int cpupm_cpu_retention_control(unsigned int enable)
+{
+	unsigned int ret = CPU_PM_RET_SET_FAIL;
+	unsigned int cpu = plat_my_core_pos();
+
+	if ((cpu_pm_status == CPU_PM_LP_READY) &&
+	    (CPU_PM_CPU_RET_MASK & BIT(cpu))) {
+		enable &= 0x7;
+		cpu_retention_enable[cpu] = (enable & 0x7);
+		if (enable) {
+			ret = cpupm_cpu_ildo_state_valid(cpu);
+		} else {
+			mt_cpu_retention_disable();
+			ret = CPU_PM_RET_SET_SUCCESS;
+		}
+	}
+	return ret;
+}
+#else
+#define cpupm_cpu_ildo_state_valid(cpu)
+#endif /* CONFIG_MTK_CPU_ILDO */
+
+static void cpupm_cpu_resume_common(const struct mtk_cpupm_pwrstate *state)
+{
+	CPU_PM_ASSERT(state);
+	mtk_cpc_core_on_hint_clr(state->info.cpuid);
+	cpupm_cpu_ildo_state_valid(state->info.cpuid);
+}
+#endif /* CONFIG_MTK_PM_SUPPORT */
+#endif /* CONFIG_MTK_CPU_SUSPEND_EN || CONFIG_MTK_SMP_EN */
+
+#define RVBARADDR_ONKEEPON_SEL		(MCUCFG_BASE + 0x388)
+
+#if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_SMP_EN
+static int cpupm_cpu_smp_afflv(unsigned int cur_afflv,
+			       const struct mtk_cpupm_pwrstate *state,
+			       const struct pwr_toplogy *topology)
+{
+#ifdef CPU_PM_TINYSYS_SUPPORT
+	if (topology)
+		mtk_set_mcupm_group_hint(topology->group);
+#endif /* CPU_PM_TINYSYS_SUPPORT */
+	return 0;
+}
+
+static int cpupm_cpu_pwr_on_prepare(unsigned int cpu, uintptr_t entry)
+{
+	struct cpu_pwr_ctrl pwr_ctrl = {};
+	int ret = MTK_CPUPM_E_OK;
+
+	if (mmio_read_32(RVBARADDR_ONKEEPON_SEL) == 0x1) {
+		ERROR("ONKEEPON_SEL=%x, CPC_FLOW_CTRL_CFG=%x\n",
+		      mmio_read_32(RVBARADDR_ONKEEPON_SEL),
+		      mmio_read_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG));
+		mmio_write_32(RVBARADDR_ONKEEPON_SEL, 0x1);
+	}
+
+	PER_CPU_PWR_CTRL(pwr_ctrl, cpu);
+	mt_smp_core_bootup_address_set(0, cpu, &pwr_ctrl, entry);
+	mt_smp_core_init_arch(0, cpu, 1, &pwr_ctrl);
+	ret = mt_smp_power_core_on(cpu, &pwr_ctrl);
+
+	return ret;
+}
+
+void cpupm_cpu_resume_smp(const struct mtk_cpupm_pwrstate *state)
+{
+	CPU_PM_ASSERT(state);
+
+	cpupm_cpu_resume_common(state);
+#ifdef CPU_PM_SUSPEND_NOTIFY
+	cpu_stage[state->info.cpuid].cpu_status &= ~PER_CPU_STATUS_HOTPLUG;
+#endif /* CPU_PM_SUSPEND_NOTIFY */
+	pwr_domain_coordination(PWR_DOMAIN_SMP_ON,
+				0,
+				state,
+				cpupm_cpu_smp_afflv);
+}
+
+void cpupm_cpu_suspend_smp(const struct mtk_cpupm_pwrstate *state)
+{
+	struct cpu_pwr_ctrl pwr_ctrl = {};
+
+	CPU_PM_ASSERT(state);
+
+	PER_CPU_PWR_CTRL(pwr_ctrl, state->info.cpuid);
+	mt_smp_power_core_off(state->info.cpuid, &pwr_ctrl);
+
+#ifdef CPU_PM_SUSPEND_NOTIFY
+	cpu_stage[state->info.cpuid].cpu_status |= PER_CPU_STATUS_HOTPLUG;
+#endif /* CPU_PM_SUSPEND_NOTIFY */
+	pwr_domain_coordination(PWR_DOMAIN_SMP_OFF,
+				0,
+				state,
+				cpupm_cpu_smp_afflv);
+}
+
+static void cpupm_smp_init(unsigned int cpu, uintptr_t sec_entrypoint)
+{
+	unsigned int reg;
+	struct mtk_cpupm_pwrstate state = {
+		.info = {
+			.cpuid = cpu,
+			.mode = MTK_CPU_PM_SMP,
+		},
+		.pwr = {
+			.afflv = 0,
+			.state_id = 0,
+		},
+	};
+
+	struct cluster_pwr_ctrl cls_pwr_ctrl;
+
+	reg = mmio_read_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG);
+	if (reg & CPC_MCUSYS_CPC_RESET_PWR_ON_EN) {
+		INFO("[%s:%d][CPU_PM] reset pwr on is enabled and clear it!\n",
+		     __func__, __LINE__);
+		mmio_clrbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG,
+				CPC_MCUSYS_CPC_RESET_PWR_ON_EN);
+	}
+
+	PER_CLUSTER_PWR_CTRL(cls_pwr_ctrl, 0);
+
+	mt_smp_ppu_op_set(&cls_pwr_ctrl.pwr,
+			  PPU_PWPR_OP_DYNAMIC_MODE,
+			  PPU_PWPR_OP_ONE_SLICE_SF_ONLY);
+	cpupm_cluster_resume_common();
+	cpupm_cpu_pwr_on_prepare(cpu, sec_entrypoint);
+	cpupm_cpu_resume_smp(&state);
+}
+#endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_SMP_EN */
+
+#if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
+
+#if !CPU_PM_DOMAIN_CORE_ONLY
+static unsigned int plat_prev_stateid;
+static int mcusys_prepare_suspend(unsigned int cur_afflv,
+				  const struct mtk_cpupm_pwrstate *state,
+				  const struct pwr_toplogy *topology)
+{
+	unsigned int stateid;
+
+	if (!state)
+		return MTK_CPUPM_E_FAIL;
+
+	stateid = state->pwr.state_id;
+
+#ifdef CPU_PM_TINYSYS_SUPPORT
+	if (topology)
+		mtk_set_mcupm_group_hint(topology->group);
+#endif /* CPU_PM_TINYSYS_SUPPORT */
+
+	if (!IS_PLAT_MCUSYSOFF_AFFLV(cur_afflv))
+		return MTK_CPUPM_E_OK;
+
+#ifdef CPU_PM_PWR_REQ
+	if (cpupm_pwr_reqs[CPUPM_PWR_REQ_CLUSTER] ||
+	    cpupm_pwr_reqs[CPUPM_PWR_REQ_MCUSYS]) {
+		suspend_abort_reason = MTK_PM_SUSPEND_ABORT_PWR_REQ;
+		goto mt_pwr_mcusysoff_break;
+	}
+#endif /* CPU_PM_PWR_REQ */
+
+	if (mtk_cpc_mcusys_off_prepare() != CPC_SUCCESS) {
+		suspend_abort_reason = MTK_PM_SUSPEND_ABORT_LAST_CORE;
+		goto mt_pwr_mcusysoff_break;
+	}
+
+	if (IS_PLAT_ALL_ONLINE_CORES_S2IDLE(state))
+		stateid = MT_PLAT_PWR_STATE_SUSPEND;
+	else if (mt_pwr_nodes[MT_PWR_SYSTEM_MCUSYS] != 0)
+		stateid = MT_PLAT_PWR_STATE_MCUSYS;
+	else if (mt_pwr_nodes[MT_PWR_SYSTEM_VCORE] != 0)
+		stateid = MT_PLAT_PWR_STATE_SYSTEM_VCORE;
+	else
+		stateid = MT_PLAT_PWR_STATE_MCUSYS;
+
+	plat_prev_stateid = stateid;
+	plat_mt_lp_cpu_rc =
+		mt_lp_rm_find_constraint(0, state->info.cpuid, stateid, NULL);
+
+	if (plat_mt_lp_cpu_rc < 0) {
+		suspend_abort_reason = MTK_PM_SUSPEND_ABORT_RC_INVALID;
+		goto mt_pwr_mcusysoff_reflect;
+	}
+
+#ifdef CPU_PM_TINYSYS_SUPPORT
+	mtk_set_cpu_pm_preffered_cpu(state->info.cpuid);
+#endif /* CPU_PM_TINYSYS_SUPPORT */
+	suspend_abort_reason = MTK_PM_SUSPEND_OK;
+	return MTK_CPUPM_E_OK;
+
+mt_pwr_mcusysoff_reflect:
+	mtk_cpc_mcusys_off_reflect();
+mt_pwr_mcusysoff_break:
+	plat_mt_lp_cpu_rc = -1;
+
+	return MTK_CPUPM_E_FAIL;
+}
+
+#define RECORD_NAME_LEN		(16)
+#define RECORD_NAME_LEN_SMC	(8)
+void mtk_cpu_pm_mcusys_record(const struct mtk_cpupm_pwrstate *state)
+{
+	unsigned int i = 0, j = 0;
+	unsigned int stateid = state->pwr.state_id;
+	char name[RECORD_NAME_LEN];
+	int ret = 0;
+	uint64_t tran = 0;
+
+	memset(name, 0, sizeof(name));
+	switch (stateid) {
+	case MT_PLAT_PWR_STATE_MCUSYS:
+	case MT_PLAT_PWR_STATE_MCUSYS_BUCK:
+		ret = snprintf(name, RECORD_NAME_LEN-1, "mcusys_off");
+		break;
+	case MT_PLAT_PWR_STATE_SYSTEM_MEM:
+		ret = snprintf(name, RECORD_NAME_LEN-1, "system_mem");
+		break;
+	case MT_PLAT_PWR_STATE_SYSTEM_PLL:
+		ret = snprintf(name, RECORD_NAME_LEN-1, "system_pll");
+		break;
+	case MT_PLAT_PWR_STATE_SYSTEM_BUS:
+		ret = snprintf(name, RECORD_NAME_LEN-1, "system_bus");
+		break;
+	case MT_PLAT_PWR_STATE_SYSTEM_VCORE:
+		ret = snprintf(name, RECORD_NAME_LEN-1, "system_vcore");
+		break;
+	case MT_PLAT_PWR_STATE_SUSPEND:
+		ret = snprintf(name, RECORD_NAME_LEN-1, "suspend");
+		break;
+	default:
+		ret = snprintf(name, RECORD_NAME_LEN-1, "Unknown_State");
+		break;
+	}
+
+	if (ret < 0) {
+		INFO("[%s]snprintf error%d\n", __func__, ret);
+		return;
+	}
+
+	memset(cpu_pm_record.name, 0, sizeof(cpu_pm_record.name));
+	while ((i < RECORD_NAME_LEN) && (*(name + i) != '\0')) {
+		if (i == RECORD_NAME_LEN_SMC)
+			++j;
+		tran = (uint64_t)(*(name + i) & 0xFF);
+		cpu_pm_record.name[j] |= (tran <<
+			((i - (RECORD_NAME_LEN_SMC * j)) << 3));
+		if (name[i] == '\0')
+			break;
+		i++;
+	}
+
+	cpu_pm_record.cnt++;
+}
+
+uint64_t mtk_mcusys_off_record_cnt_get(void)
+{
+	return cpu_pm_record.cnt;
+}
+
+uint64_t mtk_mcusys_off_record_name_get(void)
+{
+	static unsigned int idx;
+	uint64_t ret = 0;
+
+	ret = cpu_pm_record.name[idx];
+	idx = !idx;
+
+	return ret;
+}
+
+static int mcusys_prepare_resume(unsigned int cur_afflv,
+				 const struct mtk_cpupm_pwrstate *state,
+				 const struct pwr_toplogy *topology)
+{
+	uint32_t cpu = plat_my_core_pos();
+
+	if (!state)
+		return MTK_CPUPM_E_FAIL;
+
+#ifdef CPU_PM_TINYSYS_SUPPORT
+	if (topology)
+		mtk_set_mcupm_group_hint(topology->group);
+#endif /* CPU_PM_TINYSYS_SUPPORT */
+
+	if (!IS_PLAT_MCUSYSOFF_AFFLV(cur_afflv))
+		return MTK_CPUPM_E_OK;
+
+#ifdef CPU_PM_PWR_REQ
+	if (cpupm_pwr_reqs[CPUPM_PWR_REQ_CLUSTER] ||
+	    cpupm_pwr_reqs[CPUPM_PWR_REQ_MCUSYS])
+		return MTK_CPUPM_E_FAIL;
+#endif /* CPU_PM_PWR_REQ */
+
+	if (plat_mt_lp_cpu_rc < 0)
+		return MTK_CPUPM_E_FAIL;
+
+	mt_lp_rm_reset_constraint(plat_mt_lp_cpu_rc,
+				  state->info.cpuid,
+				  plat_prev_stateid);
+	mtk_cpc_mcusys_off_reflect();
+
+	mtk_cpu_pm_mcusys_record(state);
+
+	/* Clear EXT_INT_WAKEUP_REQ of the first-on CPU */
+	mmio_write_32(SPM_EXT_INT_WAKEUP_REQ_CLR, BIT(cpu));
+	if (mmio_read_32(SPM_EXT_INT_WAKEUP_REQ)) {
+		NOTICE("EXT_INT_WAKEUP_REQ(%u) is not cleared. CPU: %lu\n",
+		       mmio_read_32(SPM_EXT_INT_WAKEUP_REQ),
+		       BIT(cpu));
+		CPU_PM_ASSERT(0);
+	}
+
+	return MTK_CPUPM_E_OK;
+}
+
+uint64_t mtk_suspend_abort_reason_get(void)
+{
+	return suspend_abort_reason;
+}
+#endif /* CPU_PM_DOMAIN_CORE_ONLY */
+
+static unsigned int cpupm_do_pstate_off(const mtk_pstate_type psci_state,
+					const struct mtk_cpupm_pwrstate *state)
+{
+	unsigned int pstate = MT_CPUPM_PWR_DOMAIN_CORE;
+
+#ifdef CPU_PM_DOMAIN_CORE_ONLY
+	pstate &= ~(MT_CPUPM_PWR_DOMAIN_CLUSTER |
+		    MT_CPUPM_PWR_DOMAIN_PERCORE_DSU |
+		    MT_CPUPM_PWR_DOMAIN_MCUSYS);
+#else
+	if (!state || (state->pwr.afflv > PLAT_MAX_PWR_LVL)) {
+		CPU_PM_ASSERT(state);
+		CPU_PM_ASSERT(state->pwr.afflv <= PLAT_MAX_PWR_LVL);
+	}
+
+	/*
+	 * If all core afflv is higher than PLAT_MAX_RET_STATE
+	 * and state's id is MT_PLAT_PWR_STATE_MCUSYS
+	 */
+	switch (state->pwr.state_id) {
+	case MT_PLAT_PWR_STATE_MCUSYS_BUCK:
+		mt_pwr_nodes[MT_PWR_SYSTEM_MCUSYS]++;
+		break;
+	case MT_PLAT_PWR_STATE_SYSTEM_VCORE:
+		mt_pwr_nodes[MT_PWR_SYSTEM_VCORE]++;
+		break;
+	default:
+		break;
+	}
+	pstate |= pwr_domain_coordination(PWR_DOMAIN_OFF,
+					  psci_state,
+					  state,
+					  mcusys_prepare_suspend);
+
+#endif /* CPU_PM_DOMAIN_CORE_ONLY */
+
+	return pstate;
+}
+
+static unsigned int cpupm_do_pstate_on(const mtk_pstate_type psci_state,
+				       const struct mtk_cpupm_pwrstate *state)
+{
+	unsigned int pstate = MT_CPUPM_PWR_DOMAIN_CORE;
+
+#ifdef CPU_PM_DOMAIN_CORE_ONLY
+	pstate &= ~(MT_CPUPM_PWR_DOMAIN_CLUSTER |
+		    MT_CPUPM_PWR_DOMAIN_PERCORE_DSU |
+		    MT_CPUPM_PWR_DOMAIN_MCUSYS);
+#else
+	CPU_PM_ASSERT(state);
+
+	if (state->pwr.afflv > PLAT_MAX_PWR_LVL)
+		CPU_PM_ASSERT(0);
+
+	switch (state->pwr.state_id) {
+	case MT_PLAT_PWR_STATE_MCUSYS_BUCK:
+		mt_pwr_nodes[MT_PWR_SYSTEM_MCUSYS]--;
+		CPU_PM_ASSERT(mt_pwr_nodes[MT_PWR_SYSTEM_MCUSYS] >= 0);
+		break;
+	case MT_PLAT_PWR_STATE_SYSTEM_VCORE:
+		mt_pwr_nodes[MT_PWR_SYSTEM_VCORE]--;
+		CPU_PM_ASSERT(mt_pwr_nodes[MT_PWR_SYSTEM_VCORE] >= 0);
+		break;
+	default:
+		break;
+	}
+
+	pstate |= pwr_domain_coordination(PWR_DOMAIN_ON,
+					  psci_state,
+					  state,
+					  mcusys_prepare_resume);
+#endif /* CPU_PM_DOMAIN_CORE_ONLY */
+	return pstate;
+}
+
+static void cpupm_cpu_resume(const struct mtk_cpupm_pwrstate *state)
+{
+	cpupm_cpu_resume_common(state);
+}
+
+static void cpupm_cluster_resume(const struct mtk_cpupm_pwrstate *state)
+{
+	cpupm_cluster_resume_common();
+	mtk_cpu_pm_save_cpc_latency(DEV_TYPE_CPUSYS);
+}
+
+#if CPU_PM_PWR_REQ || CPU_PM_ACP_FSM
+static void cpupm_cluster_suspend(const struct mtk_cpupm_pwrstate *state)
+{
+	struct cluster_pwr_ctrl cls_pwr_ctrl;
+
+	PER_CLUSTER_PWR_CTRL(cls_pwr_ctrl, 0);
+
+#ifdef CPU_PM_PWR_REQ
+	if (cpupm_pwr_reqs[CPUPM_PWR_REQ_CLUSTER]) {
+		mt_smp_ppu_pwr_dynamic_set(&cls_pwr_ctrl.pwr,
+					   PPU_PWPR_ON);
+		return;
+	}
+#endif /* CPU_PM_PWR_REQ */
+#ifdef CPU_PM_ACP_FSM
+	unsigned int val, pwsr, timeout_cnt = 0;
+
+	do {
+		val = mmio_read_32(MCUSYS_ACP_UTB_FSM);
+		DO_ACP_FSM_WAIT_TIMEOUT(timeout_cnt);
+	} while ((val & ACP_PWR_CTRL_OP_STATUS) != ACP_PWR_CTRL_OP_ST_IDLE);
+
+	mt_smp_ppu_set(&cls_pwr_ctrl.pwr,
+		       PPU_PWPR_OP_DYNAMIC_MODE,
+		       DSU_PPU_PWPR_OP_MODE_DEF,
+		       PPU_PWPR_DYNAMIC_MODE,
+		       (plat_dev.auto_off) ?
+		       PPU_PWPR_MEM_RET :
+		       PPU_PWPR_OFF);
+
+	timeout_cnt = 0;
+	do {
+		pwsr = mmio_read_32(cls_pwr_ctrl.pwr.ppu_pwsr);
+		DO_ACP_FSM_WAIT_TIMEOUT(timeout_cnt);
+	} while ((pwsr & PPU_PWSR_OP_STATUS) == PPU_OP_ST_SF_ONLY);
+#endif /* CPU_PM_ACP_FSM */
+}
+#endif /* CPU_PM_PWR_REQ || CPU_PM_ACP_FSM */
+
+static void cpupm_mcusys_resume(const struct mtk_cpupm_pwrstate *state)
+{
+#ifdef CPU_PM_IRQ_REMAIN_ENABLE
+	mt_lp_irqremain_release();
+#endif /* CPU_PM_IRQ_REMAIN_ENABLE */
+	mtk_cpu_pm_save_cpc_latency(DEV_TYPE_MCUSYS);
+}
+
+static void cpupm_mcusys_suspend(const struct mtk_cpupm_pwrstate *state)
+{
+#if !CPU_PM_DOMAIN_CORE_ONLY
+	struct cluster_pwr_ctrl cls_pwr_ctrl;
+
+	assert(state);
+
+	if (plat_mt_lp_cpu_rc < 0)
+		return;
+
+	mt_lp_rm_do_constraint(plat_mt_lp_cpu_rc,
+			       state->info.cpuid,
+			       plat_prev_stateid);
+
+#ifdef CPU_PM_IRQ_REMAIN_ENABLE
+	mt_lp_irqremain_aquire();
+#endif /* CPU_PM_IRQ_REMAIN_ENABLE */
+	if (plat_dev.auto_off) {
+		/*
+		 * The DSU ppu setting is DYN_MEM_RET when auto dormant enable.
+		 * Need to set PPU to DYN_OFF when mcusys off.
+		 *
+		 */
+		PER_CLUSTER_PWR_CTRL(cls_pwr_ctrl, 0);
+		mt_smp_ppu_pwr_set(&cls_pwr_ctrl.pwr,
+				   PPU_PWPR_DYNAMIC_MODE,
+				   PPU_PWPR_OFF);
+	}
+#endif /* CPU_PM_DOMAIN_CORE_ONLY */
+}
+
+static unsigned int cpupm_get_pstate(enum mt_cpupm_pwr_domain domain,
+				     const mtk_pstate_type psci_state,
+				     const struct mtk_cpupm_pwrstate *state)
+{
+	unsigned int pstate = 0;
+
+	if (!state)
+		return 0;
+
+	if (state->info.mode == MTK_CPU_PM_SMP)
+		pstate = MT_CPUPM_PWR_DOMAIN_CORE;
+	else {
+		if (domain == CPUPM_PWR_OFF)
+			pstate = cpupm_do_pstate_off(psci_state, state);
+		else if (domain == CPUPM_PWR_ON)
+			pstate = cpupm_do_pstate_on(psci_state, state);
+		else {
+			INFO("[%s:%d][CPU_PM] unknown pwr domain :%d\n",
+			     __func__, __LINE__, domain);
+			assert(0);
+		}
+	}
+	return pstate;
+}
+
+#define CPUPM_READY_MS (40000)
+static int cpupm_pwr_state_valid(unsigned int afflv, unsigned int state)
+{
+	if (cpu_pm_status == CPU_PM_LP_READY)
+		return MTK_CPUPM_E_OK;
+
+	if (cpu_pm_status != CPU_PM_LP_READY) {
+#ifdef CPU_PM_TINYSYS_SUPPORT
+		int status = 0;
+
+		if (!(cpu_pm_status & CPU_PM_DEPD_INIT)) {
+			status = mtk_lp_depd_condition(
+				CPUPM_MBOX_WAIT_DEV_INIT);
+			if (status == 0) {
+				plat_cpu_pm_lock();
+				cpu_pm_status |= CPU_PM_DEPD_INIT;
+				plat_cpu_pm_unlock();
+			}
+		} else if (!(cpu_pm_status & CPU_PM_DEPD_READY)) {
+			status = mtk_lp_depd_condition(
+				CPUPM_MBOX_WAIT_TASK_READY);
+			if (status == 0) {
+				plat_cpu_pm_lock();
+				cpu_pm_status |= CPU_PM_DEPD_READY;
+				plat_cpu_pm_unlock();
+			}
+		} else {
+#endif /* CPU_PM_TINYSYS_SUPPORT */
+			uint64_t arch_time = read_cntpct_el0();
+
+			if (arch_time > (uint64_t)CPUPM_BOOTUP_TIME_THR) {
+				plat_cpu_pm_lock();
+				cpu_pm_status |= CPU_PM_PLAT_READY;
+				plat_cpu_pm_unlock();
+			}
+#ifdef CPU_PM_TINYSYS_SUPPORT
+		}
+#endif /* CPU_PM_TINYSYS_SUPPORT */
+		return MTK_CPUPM_E_FAIL;
+	}
+	return MTK_CPUPM_E_OK;
+}
+#endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN */
+
+#define CPUPM_PWR_STAT_REQ_UID_MAGIC	(0xbacd1103)
+
+#define IS_VALID_CPUPM_PWR_STAT_REQ(mg) \
+	((mg & CPUPM_PWR_STAT_REQ_UID_MAGIC) == CPUPM_PWR_STAT_REQ_UID_MAGIC)
+
+#if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
+static int cpupm_invoke(unsigned int func_id, void *priv)
+{
+	int ret = MTK_CPUPM_E_OK;
+	int i, reverse = 0;
+	struct cpupm_invoke_data *save_status = (struct cpupm_invoke_data *) priv;
+	struct cpupm_pwr_req *req = (struct cpupm_pwr_req *)priv;
+	unsigned int pwr_req = req->req;
+	unsigned int cpu_status;
+
+	switch (func_id) {
+#ifdef CPU_PM_SUSPEND_NOTIFY
+	case CPUPM_INVOKE_WAKED_CPU:
+		if (priv) {
+			for (i = 0; i < PLATFORM_CORE_COUNT; i++) {
+				cpu_status = cpu_stage[i].cpu_status;
+				if (IS_CPUPM_SAVE_PWR_STATUS(cpu_status))
+					reverse |= BIT(i);
+			}
+			save_status->val.v_u32 = ~reverse;
+		} else
+			ret = MTK_CPUPM_E_ERR;
+		break;
+#endif /* CPU_PM_SUSPEND_NOTIFY */
+
+#ifdef CPU_PM_PWR_REQ
+	case CPUPM_INVOKE_PWR_REQ_ACTIVE:
+		if (priv) {
+			if (req->stat.uid == CPUPM_PWR_REQ_UID_MAGIC)
+				req->stat.uid = CPUPM_PWR_STAT_REQ_UID_MAGIC;
+			else
+				ret = MTK_CPUPM_E_ERR;
+		} else
+			ret = MTK_CPUPM_E_ERR;
+		break;
+	case CPUPM_INVOKE_PWR_REQ_ACQUIRE:
+	case CPUPM_INVOKE_PWR_REQ_RELASE:
+		if (priv && (IS_VALID_CPUPM_PWR_STAT_REQ(req->stat.uid))) {
+			plat_cpu_pm_lock();
+			if (func_id == CPUPM_INVOKE_PWR_REQ_ACQUIRE) {
+				if (pwr_req & MT_CPUPM_PWR_DOMAIN_CLUSTER)
+					pwr_req |=
+					MT_CPUPM_PWR_DOMAIN_MCUSYS_BY_CLUSTER;
+
+				pwr_req = pwr_req & ~req->stat.sta_req;
+
+				if (pwr_req & MT_CPUPM_PWR_DOMAIN_CLUSTER)
+					cpupm_pwr_reqs[CPUPM_PWR_REQ_CLUSTER]++;
+
+				if ((pwr_req & MT_CPUPM_MCUSYS_REQ) &&
+				    !(req->stat.sta_req & MT_CPUPM_MCUSYS_REQ))
+					cpupm_pwr_reqs[CPUPM_PWR_REQ_MCUSYS]++;
+
+				req->stat.sta_req |= pwr_req;
+			} else {
+				if (pwr_req & MT_CPUPM_PWR_DOMAIN_CLUSTER)
+					pwr_req |=
+					MT_CPUPM_PWR_DOMAIN_MCUSYS_BY_CLUSTER;
+
+				pwr_req = pwr_req & req->stat.sta_req;
+				req->stat.sta_req &= ~pwr_req;
+
+				if (pwr_req & MT_CPUPM_PWR_DOMAIN_CLUSTER) {
+					if (cpupm_pwr_reqs[
+					   CPUPM_PWR_REQ_CLUSTER] > 0)
+						cpupm_pwr_reqs[
+						CPUPM_PWR_REQ_CLUSTER]--;
+				}
+
+				if ((pwr_req & MT_CPUPM_MCUSYS_REQ) &&
+				    !(req->stat.sta_req &
+				    MT_CPUPM_MCUSYS_REQ)) {
+					if (cpupm_pwr_reqs[
+					    CPUPM_PWR_REQ_MCUSYS] > 0)
+						cpupm_pwr_reqs[
+						CPUPM_PWR_REQ_MCUSYS]--;
+				}
+			}
+
+			plat_cpu_pm_unlock();
+		} else
+			ret = MTK_CPUPM_E_ERR;
+		break;
+#endif /* CPU_PM_PWR_REQ */
+	default:
+		ret = MTK_CPUPM_E_ERR;
+		break;
+	}
+	return ret;
+}
+#endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN */
+
+void mt_plat_cpu_pm_dev_update(struct mtk_plat_dev_config *config)
+{
+	if (!config)
+		return;
+	plat_dev.auto_off = config->auto_off;
+	plat_dev.auto_thres_us = config->auto_thres_us;
+}
+
+int mt_plat_cpu_pm_dev_config(struct mtk_plat_dev_config **config)
+{
+	if (!config)
+		return MTK_CPUPM_E_FAIL;
+	*config = &plat_dev;
+	return 0;
+}
+
+#if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_SMP_EN
+static struct mtk_cpu_smp_ops cpcv5_0_cpu_smp = {
+	.init = cpupm_smp_init,
+	.cpu_pwr_on_prepare = cpupm_cpu_pwr_on_prepare,
+	.cpu_on = cpupm_cpu_resume_smp,
+	.cpu_off = cpupm_cpu_suspend_smp,
+};
+#endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_SMP_EN */
+
+#if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
+static struct mtk_cpu_pm_ops cpcv5_0_mcdi = {
+	.get_pstate = cpupm_get_pstate,
+	.pwr_state_valid = cpupm_pwr_state_valid,
+	.cpu_resume = cpupm_cpu_resume,
+#if CPU_PM_PWR_REQ || CPU_PM_ACP_FSM
+	.cluster_suspend = cpupm_cluster_suspend,
+#endif /* CPU_PM_PWR_REQ || CPU_PM_ACP_FSM */
+	.cluster_resume = cpupm_cluster_resume,
+	.mcusys_suspend = cpupm_mcusys_suspend,
+	.mcusys_resume = cpupm_mcusys_resume,
+	.invoke = cpupm_invoke,
+};
+#endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN */
+
+/* Init cpu_state.cpu_status as Hotplugged for non-boot CPUs. */
+static void mtk_cpu_status_init(void)
+{
+#ifdef CPU_PM_SUSPEND_NOTIFY
+	for (int i = 1 ; i < PLATFORM_CORE_COUNT; i++)
+		cpu_stage[i].cpu_status |= PER_CPU_STATUS_HOTPLUG;
+#endif /* CPU_PM_SUSPEND_NOTIFY */
+}
+
+/*
+ * Depend on mtk pm methodology, the psci op init must
+ * be invoked after cpu pm to avoid initialization fail.
+ */
+int mt_plat_cpu_pm_init(void)
+{
+	plat_cpu_pm_lock_init();
+	pwr_topology_init();
+	mtk_cpc_init();
+	mtk_cpu_status_init();
+#if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
+	register_cpu_pm_ops(CPU_PM_FN(), &cpcv5_0_mcdi);
+#endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN */
+#if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_SMP_EN
+	register_cpu_smp_ops(CPU_PM_FN(), &cpcv5_0_cpu_smp);
+#endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_SMP_EN */
+#ifdef CPU_PM_IRQ_REMAIN_ENABLE
+	mt_lp_irqremain_init();
+#endif /* CPU_PM_IRQ_REMAIN_ENABLE */
+	cpupm_smc_init();
+#ifdef CONFIG_MTK_CPU_ILDO
+	cpupm_cpu_retention_init();
+#endif /* CONFIG_MTK_CPU_ILDO */
+	INFO("[%s:%d] - MCDI finished\n", __func__, __LINE__);
+	return 0;
+}
+MTK_ARCH_INIT(mt_plat_cpu_pm_init);
+
+static const mmap_region_t cpu_pm_mmap[] MTK_MMAP_SECTION = {
+	MAP_REGION_FLAT(MT_UTILITYBUS_BASE,
+			MT_UTILITYBUS_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+#ifdef CPU_PM_TINYSYS_SUPPORT
+#if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
+	MAP_REGION_FLAT(CPU_EB_TCM_BASE,
+			CPU_EB_TCM_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+#ifdef CPU_EB_TCM_CNT_BASE
+	MAP_REGION_FLAT(CPU_EB_TCM_CNT_BASE,
+			CPU_EB_TCM_SIZE,
+			MT_DEVICE | MT_RW | MT_SECURE),
+#endif /* CPU_EB_TCM_CNT_BASE */
+#endif /* CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN */
+#endif /* CPU_PM_TINYSYS_SUPPORT */
+	{0},
+};
+DECLARE_MTK_MMAP_REGIONS(cpu_pm_mmap);
+
+static void *cpupm_core_pwr_handler(const void *arg, unsigned int act)
+{
+	struct mt_cpupm_event_data *nb =
+	(struct mt_cpupm_event_data *)arg;
+
+	if (!arg || (nb->cpuid >= PLATFORM_CORE_COUNT))
+		return (void *)arg;
+
+	if (act & MT_CPUPM_PWR_ON) {
+#ifdef CPU_PM_SUSPEND_NOTIFY
+		cpu_stage[nb->cpuid].cpu_status &= ~PER_CPU_STATUS_PDN;
+#endif /* CPU_PM_SUSPEND_NOTIFY */
+		mtk_cpu_pm_counter_update(nb->cpuid);
+		mtk_cpu_pm_save_cpc_latency(nb->cpuid);
+	} else {
+#ifdef CPU_PM_SUSPEND_NOTIFY
+		cpu_stage[nb->cpuid].cpu_status |= PER_CPU_STATUS_PDN;
+#endif /* CPU_PM_SUSPEND_NOTIFY */
+	}
+
+	return (void *)arg;
+}
+
+void *cpupm_core_pwr_off_handler(const void *arg)
+{
+	return cpupm_core_pwr_handler(arg, MT_CPUPM_PWR_OFF);
+}
+MT_CPUPM_SUBCRIBE_EVENT_PWR_OFF(cpupm_core_pwr_off_handler);
+
+void *cpupm_core_pwr_on_handler(const void *arg)
+{
+	return cpupm_core_pwr_handler(arg, MT_CPUPM_PWR_ON);
+}
+MT_CPUPM_SUBCRIBE_EVENT_PWR_ON(cpupm_core_pwr_on_handler);
+
+#ifdef CPU_PM_SUSPEND_NOTIFY
+int cpupm_set_suspend_state(unsigned int act, unsigned int cpuid)
+{
+	if (cpuid >= PLATFORM_CORE_COUNT)
+		return MTK_CPUPM_E_ERR;
+
+	if (act & MT_LPM_SMC_ACT_SET)
+		cpu_stage[cpuid].cpu_status |= PER_CPU_STATUS_S2IDLE;
+	else
+		cpu_stage[cpuid].cpu_status &= ~PER_CPU_STATUS_S2IDLE;
+
+	return 0;
+}
+#endif /* CPU_PM_SUSPEND_NOTIFY */
diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm.h b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm.h
new file mode 100644
index 000000000..20356daae
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm.h
@@ -0,0 +1,314 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MT_CPU_PM_H
+#define MT_CPU_PM_H
+
+#include <assert.h>
+
+#include <platform_def.h>
+
+#include <lib/pm/mtk_pm.h>
+
+#if !HW_ASSISTED_COHERENCY
+#define MT_CPU_PM_USING_BAKERY_LOCK
+#endif /* !HW_ASSISTED_COHERENCY */
+
+/*
+ * Enable bit of CPU_PM callbacks
+ */
+static inline unsigned int CPU_PM_FN(void)
+{
+	return (MTK_CPUPM_FN_CPUPM_GET_PWR_STATE |
+		MTK_CPUPM_FN_PWR_STATE_VALID |
+		MTK_CPUPM_FN_PWR_ON_CORE_PREPARE |
+		MTK_CPUPM_FN_RESUME_CORE |
+#ifdef CPU_PM_PWR_REQ
+		MTK_CPUPM_FN_SUSPEND_CLUSTER |
+#endif /* CPU_PM_PWR_REQ */
+		MTK_CPUPM_FN_RESUME_CLUSTER |
+		MTK_CPUPM_FN_SUSPEND_MCUSYS |
+		MTK_CPUPM_FN_RESUME_MCUSYS |
+		MTK_CPUPM_FN_SMP_INIT |
+		MTK_CPUPM_FN_SMP_CORE_ON |
+		MTK_CPUPM_FN_SMP_CORE_OFF);
+}
+
+#define CPU_PM_ASSERT(_cond) ({ \
+	if (!(_cond)) { \
+		INFO("[%s:%d] - %s\n", __func__, __LINE__, #_cond); \
+		panic(); \
+	} })
+
+/* related registers */
+#define SPM_POWERON_CONFIG_EN			(SPM_BASE + 0x000)
+#define SPM_CPU_PWR_STATUS			(SPM_BASE + 0x174)
+
+/* bit-fields of SPM_POWERON_CONFIG_EN */
+#define PROJECT_CODE				(0xB16U << 16)
+#define BCLK_CG_EN				BIT(0)
+
+#define CPC_PWR_MASK_MCUSYS_MP0			(0xC001)
+
+#define PER_CLUSTER_PWR_DATA(_p, _cl) ({ \
+	_p.pwr.ppu_pwpr = CLUSTER_PPU_PWPR_##_cl; \
+	_p.pwr.ppu_pwsr = CLUSTER_PPU_PWSR_##_cl; \
+	_p.pwr.ppu_dcdr0 = CLUSTER_PPU_DCDR0_##_cl; \
+	_p.pwr.ppu_dcdr1 = CLUSTER_PPU_DCDR1_##_cl; \
+	})
+
+#define PER_CLUSTER_PWR_CTRL(_val, _cl) ({ \
+	switch (_cl) { \
+	case 0: \
+		PER_CLUSTER_PWR_DATA(_val, 0); \
+		break; \
+	default: \
+		assert(0); \
+		break; \
+	} })
+
+#define PER_CPU_PWR_DATA(_p, _cl, _c) ({ \
+	_p.rvbaraddr_l = CORE_RVBRADDR_##_cl##_##_c##_L; \
+	_p.rvbaraddr_h = CORE_RVBRADDR_##_cl##_##_c##_H; \
+	_p.pwr.ppu_pwpr = CORE_PPU_PWPR_##_cl##_##_c; \
+	_p.pwr.ppu_pwsr = CORE_PPU_PWSR_##_cl##_##_c; \
+	_p.pwr.ppu_dcdr0 = CORE_PPU_DCDR0_##_cl##_##_c; \
+	_p.pwr.ppu_dcdr1 = CORE_PPU_DCDR1_##_cl##_##_c; })
+
+#define PER_CPU_PWR_CTRL(_val, _cpu) ({ \
+	switch (_cpu) { \
+	case 0: \
+		PER_CPU_PWR_DATA(_val, 0, 0); \
+		break; \
+	case 1: \
+		PER_CPU_PWR_DATA(_val, 0, 1); \
+		break; \
+	case 2: \
+		PER_CPU_PWR_DATA(_val, 0, 2); \
+		break; \
+	case 3: \
+		PER_CPU_PWR_DATA(_val, 0, 3); \
+		break; \
+	case 4: \
+		PER_CPU_PWR_DATA(_val, 0, 4); \
+		break; \
+	case 5: \
+		PER_CPU_PWR_DATA(_val, 0, 5); \
+		break; \
+	case 6: \
+		PER_CPU_PWR_DATA(_val, 0, 6); \
+		break; \
+	case 7: \
+		PER_CPU_PWR_DATA(_val, 0, 7); \
+		break; \
+	default: \
+		assert(0); \
+		break; \
+	} })
+
+/*
+ * Definition about bootup address for each core
+ * CORE_RVBRADDR_clusterid_cpuid
+ */
+#define CORE_RVBRADDR_0_0_L		(MCUCFG_BASE + 0x00)
+#define CORE_RVBRADDR_0_1_L		(MCUCFG_BASE + 0x08)
+#define CORE_RVBRADDR_0_2_L		(MCUCFG_BASE + 0x10)
+#define CORE_RVBRADDR_0_3_L		(MCUCFG_BASE + 0x18)
+#define CORE_RVBRADDR_0_4_L		(MCUCFG_BASE + 0x20)
+#define CORE_RVBRADDR_0_5_L		(MCUCFG_BASE + 0x28)
+#define CORE_RVBRADDR_0_6_L		(MCUCFG_BASE + 0x30)
+#define CORE_RVBRADDR_0_7_L		(MCUCFG_BASE + 0x38)
+
+#define CORE_RVBRADDR_0_0_H		(MCUCFG_BASE + 0x04)
+#define CORE_RVBRADDR_0_1_H		(MCUCFG_BASE + 0x0C)
+#define CORE_RVBRADDR_0_2_H		(MCUCFG_BASE + 0x14)
+#define CORE_RVBRADDR_0_3_H		(MCUCFG_BASE + 0x1C)
+#define CORE_RVBRADDR_0_4_H		(MCUCFG_BASE + 0x24)
+#define CORE_RVBRADDR_0_5_H		(MCUCFG_BASE + 0x2C)
+#define CORE_RVBRADDR_0_6_H		(MCUCFG_BASE + 0x34)
+#define CORE_RVBRADDR_0_7_H		(MCUCFG_BASE + 0x3C)
+
+/*
+ * Definition about PPU PWPR for each core
+ * PPU_PWPR_clusterid_cpuid
+ */
+#define CORE_PPU_PWPR_0_0		(MT_UTILITYBUS_BASE + 0x080000)
+#define CORE_PPU_PWPR_0_1		(MT_UTILITYBUS_BASE + 0x180000)
+#define CORE_PPU_PWPR_0_2		(MT_UTILITYBUS_BASE + 0x280000)
+#define CORE_PPU_PWPR_0_3		(MT_UTILITYBUS_BASE + 0x380000)
+#define CORE_PPU_PWPR_0_4		(MT_UTILITYBUS_BASE + 0x480000)
+#define CORE_PPU_PWPR_0_5		(MT_UTILITYBUS_BASE + 0x580000)
+#define CORE_PPU_PWPR_0_6		(MT_UTILITYBUS_BASE + 0x680000)
+#define CORE_PPU_PWPR_0_7		(MT_UTILITYBUS_BASE + 0x780000)
+
+/*
+ * Definition about PPU PWSR for each core
+ * PPU_PWSR_clusterid_cpuid
+ */
+#define CORE_PPU_PWSR_0_0		(MT_UTILITYBUS_BASE + 0x080008)
+#define CORE_PPU_PWSR_0_1		(MT_UTILITYBUS_BASE + 0x180008)
+#define CORE_PPU_PWSR_0_2		(MT_UTILITYBUS_BASE + 0x280008)
+#define CORE_PPU_PWSR_0_3		(MT_UTILITYBUS_BASE + 0x380008)
+#define CORE_PPU_PWSR_0_4		(MT_UTILITYBUS_BASE + 0x480008)
+#define CORE_PPU_PWSR_0_5		(MT_UTILITYBUS_BASE + 0x580008)
+#define CORE_PPU_PWSR_0_6		(MT_UTILITYBUS_BASE + 0x680008)
+#define CORE_PPU_PWSR_0_7		(MT_UTILITYBUS_BASE + 0x780008)
+
+/*
+ * Definition about device delay control 0
+ * PPU_DCDR0_clusterid_cpuid
+ */
+#define CORE_PPU_DCDR0_0_0		(MT_UTILITYBUS_BASE + 0x080170)
+#define CORE_PPU_DCDR0_0_1		(MT_UTILITYBUS_BASE + 0x180170)
+#define CORE_PPU_DCDR0_0_2		(MT_UTILITYBUS_BASE + 0x280170)
+#define CORE_PPU_DCDR0_0_3		(MT_UTILITYBUS_BASE + 0x380170)
+#define CORE_PPU_DCDR0_0_4		(MT_UTILITYBUS_BASE + 0x480170)
+#define CORE_PPU_DCDR0_0_5		(MT_UTILITYBUS_BASE + 0x580170)
+#define CORE_PPU_DCDR0_0_6		(MT_UTILITYBUS_BASE + 0x680170)
+#define CORE_PPU_DCDR0_0_7		(MT_UTILITYBUS_BASE + 0x780170)
+
+/*
+ * Definition about device delay control 1
+ * PPU_DCDR0_clusterid_cpuid
+ */
+#define CORE_PPU_DCDR1_0_0		(MT_UTILITYBUS_BASE + 0x080174)
+#define CORE_PPU_DCDR1_0_1		(MT_UTILITYBUS_BASE + 0x180174)
+#define CORE_PPU_DCDR1_0_2		(MT_UTILITYBUS_BASE + 0x280174)
+#define CORE_PPU_DCDR1_0_3		(MT_UTILITYBUS_BASE + 0x380174)
+#define CORE_PPU_DCDR1_0_4		(MT_UTILITYBUS_BASE + 0x480174)
+#define CORE_PPU_DCDR1_0_5		(MT_UTILITYBUS_BASE + 0x580174)
+#define CORE_PPU_DCDR1_0_6		(MT_UTILITYBUS_BASE + 0x680174)
+#define CORE_PPU_DCDR1_0_7		(MT_UTILITYBUS_BASE + 0x780174)
+
+/*
+ * Definition about PPU PWPR for cluster
+ * PPU_PWPR_clusterid
+ */
+#define CLUSTER_PPU_PWPR_0		(MT_UTILITYBUS_BASE + 0x030000)
+#define CLUSTER_PPU_PWSR_0		(MT_UTILITYBUS_BASE + 0x030008)
+#define CLUSTER_PPU_DCDR0_0		(MT_UTILITYBUS_BASE + 0x030170)
+#define CLUSTER_PPU_DCDR1_0		(MT_UTILITYBUS_BASE + 0x030174)
+
+struct ppu_pwr_ctrl {
+	unsigned int ppu_pwpr;
+	unsigned int ppu_pwsr;
+	unsigned int ppu_dcdr0;
+	unsigned int ppu_dcdr1;
+};
+
+struct cpu_pwr_ctrl {
+	unsigned int rvbaraddr_l;
+	unsigned int rvbaraddr_h;
+#ifndef CPU_PM_CORE_ARCH64_ONLY
+	unsigned int arch_addr;
+#endif /* CPU_PM_CORE_ARCH64_ONLY */
+	struct ppu_pwr_ctrl pwr;
+	unsigned int pwr_ctrl;
+};
+
+struct cluster_pwr_ctrl {
+	struct ppu_pwr_ctrl pwr;
+};
+
+#define MT_CPUPM_PWR_ON			BIT(0)
+#define MT_CPUPM_PWR_OFF		BIT(1)
+
+#ifdef CPU_PM_SUSPEND_NOTIFY
+#define PER_CPU_STATUS_S2IDLE		BIT(0)
+#define PER_CPU_STATUS_PDN		BIT(1)
+#define PER_CPU_STATUS_HOTPLUG		BIT(2)
+#define PER_CPU_STATUS_S2IDLE_PDN \
+	(PER_CPU_STATUS_S2IDLE | PER_CPU_STATUS_PDN)
+
+#define CPUPM_PWR_STATUS(_state, _tar)		((_state & _tar) == _tar)
+#define IS_CPUPM_SAVE_PWR_STATUS(_state)	( \
+	CPUPM_PWR_STATUS(_state, PER_CPU_STATUS_S2IDLE_PDN) || \
+	(_state & PER_CPU_STATUS_HOTPLUG))
+
+#ifdef CONFIG_MTK_CPU_ILDO
+#define CPU_PM_CPU_RET_IS_ENABLED	CPU_PM_CPU_RET_MASK
+
+enum {
+	CPU_PM_RET_SET_SUCCESS = 0,
+	CPU_PM_RET_SET_FAIL
+};
+
+#define CPU_EB_RET_STA_REG	(CPU_EB_TCM_BASE + CPU_EB_RET_STA_OFFSET)
+#define CPU_RET_TIMEOUT		100
+#endif /* CONFIG_MTK_CPU_ILDO */
+
+struct per_cpu_stage {
+	unsigned int cpu_status;
+};
+#endif /* CPU_PM_SUSPEND_NOTIFY */
+
+#define MCUSYS_STATUS_PDN		BIT(0)
+#define MCUSYS_STATUS_CPUSYS_PROTECT	BIT(8)
+#define MCUSYS_STATUS_MCUSYS_PROTECT	BIT(9)
+
+#ifdef CPU_PM_ACP_FSM
+#define ACP_FSM_TIMEOUT_MAX		(500)
+#define ACP_FSM_AWARE_TIME		(100)
+#define DO_ACP_FSM_WAIT_TIMEOUT(k_cnt) ({ \
+	if (k_cnt >= ACP_FSM_TIMEOUT_MAX) { \
+		INFO("[%s:%d] - ACP FSM TIMEOUT %u us (> %u)\n", \
+		     __func__, __LINE__, k_cnt, ACP_FSM_TIMEOUT_MAX); \
+		panic(); \
+	} else if (k_cnt == ACP_FSM_AWARE_TIME) { \
+		INFO("[%s:%d] - ACP FSM latency exceed %u us\n", \
+		     __func__, __LINE__, ACP_FSM_AWARE_TIME); \
+	} \
+	k_cnt++; udelay(1); })
+#endif /* CPU_PM_ACP_FSM */
+
+/* cpu_pm function ID */
+enum mt_cpu_pm_user_id {
+	MCUSYS_STATUS = 0,
+	CPC_COMMAND,
+};
+
+/* cpu_pm lp function ID */
+enum mt_cpu_pm_lp_smc_id {
+	LP_CPC_COMMAND = 0,
+	IRQS_REMAIN_ALLOC,
+	IRQS_REMAIN_CTRL,
+	IRQS_REMAIN_IRQ,
+	IRQS_REMAIN_WAKEUP_CAT,
+	IRQS_REMAIN_WAKEUP_SRC,
+	SUSPEND_SRC,
+	CPU_PM_COUNTER_CTRL,
+	CPU_PM_RECORD_CTRL,
+	SUSPEND_ABORT_REASON,
+	CPU_PM_RET_CTRL
+};
+
+enum mt_suspend_abort_reason {
+	MTK_PM_SUSPEND_OK = 0,
+	MTK_PM_SUSPEND_ABORT_PWR_REQ,
+	MTK_PM_SUSPEND_ABORT_LAST_CORE,
+	MTK_PM_SUSPEND_ABORT_RC_INVALID,
+};
+
+struct mtk_plat_dev_config {
+	int auto_off;
+	unsigned int auto_thres_us;
+};
+
+struct mt_cpu_pm_record {
+	unsigned int cnt;
+	uint64_t name[2];
+};
+
+unsigned int cpupm_cpu_retention_control(unsigned int enable);
+unsigned int cpupu_get_cpu_retention_control(void);
+void mt_plat_cpu_pm_dev_update(struct mtk_plat_dev_config *config);
+int mt_plat_cpu_pm_dev_config(struct mtk_plat_dev_config **config);
+int cpupm_set_suspend_state(unsigned int act, unsigned int cpuid);
+uint64_t mtk_mcusys_off_record_cnt_get(void);
+uint64_t mtk_mcusys_off_record_name_get(void);
+uint64_t mtk_suspend_abort_reason_get(void);
+
+#endif /* MT_CPU_PM_H */
diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_cpc.c b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_cpc.c
new file mode 100644
index 000000000..83d1951f4
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_cpc.c
@@ -0,0 +1,701 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+#include <drivers/delay_timer.h>
+#include <lib/spinlock.h>
+
+#include <lib/pm/mtk_pm.h>
+#include <mcucfg.h>
+#include "mt_cpu_pm.h"
+#include "mt_cpu_pm_cpc.h"
+#include "mt_smp.h"
+#include <mt_timer.h>
+
+#define CHECK_GIC_SGI_PENDING		(0)
+#define MTK_SYS_TIMER_SYNC_SUPPORT	(1)
+#define MCUSYS_CLUSTER_DORMANT_MASK	0xFFFF
+
+struct mtk_cpc_lat_data {
+	unsigned int on_sum;
+	unsigned int on_min;
+	unsigned int on_max;
+	unsigned int off_sum;
+	unsigned int off_min;
+	unsigned int off_max;
+	unsigned int on_cnt;
+	unsigned int off_cnt;
+};
+
+struct mtk_cpc_device {
+	union {
+		struct mtk_cpc_lat_data p[DEV_TYPE_NUM];
+		struct {
+			struct mtk_cpc_lat_data cpu[PLATFORM_CORE_COUNT];
+			struct mtk_cpc_lat_data cluster;
+			struct mtk_cpc_lat_data mcusys;
+		};
+	};
+};
+
+static struct mtk_cpc_device cpc_dev;
+
+static bool cpu_pm_counter_enabled;
+static bool cpu_cpc_prof_enabled;
+
+static void mtk_cpc_auto_dormant_en(unsigned int en)
+{
+	struct mtk_plat_dev_config *cfg = NULL;
+
+	if (en)
+		mmio_setbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG, CPC_AUTO_OFF_EN);
+	else
+		mmio_clrbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG, CPC_AUTO_OFF_EN);
+
+	mt_plat_cpu_pm_dev_config(&cfg);
+
+	if (cfg) {
+		cfg->auto_off = !!en;
+		mt_plat_cpu_pm_dev_update(cfg);
+	}
+}
+
+static void mtk_cpc_auto_dormant_tick(unsigned int us)
+{
+	struct mtk_plat_dev_config *cfg = NULL;
+
+	mmio_write_32(CPC_MCUSYS_CPC_OFF_THRES, US_TO_TICKS(us));
+
+	mt_plat_cpu_pm_dev_config(&cfg);
+
+	if (cfg) {
+		cfg->auto_thres_us = us;
+		mt_plat_cpu_pm_dev_update(cfg);
+	}
+}
+
+static void mtk_cpu_pm_mcusys_prot_release(void)
+{
+	mmio_write_32(CPC_MCUSYS_PWR_ON_MASK, MCUSYS_PROT_CLR);
+}
+
+static int mtk_cpc_last_core_prot(int prot_req, int resp_reg, int resp_ofs)
+{
+	unsigned int sta, retry;
+
+	retry = 0;
+
+	while (retry < RETRY_CNT_MAX) {
+		mmio_write_32(CPC_MCUSYS_LAST_CORE_REQ, prot_req);
+		udelay(1);
+		sta = (mmio_read_32(resp_reg) >> resp_ofs) & CPC_PROT_RESP_MASK;
+
+		if (sta == PROT_GIVEUP)
+			return CPC_ERR_FAIL;
+
+		if (sta == PROT_SUCCESS) {
+			if (mmio_read_32(CPC_WAKEUP_REQ) ==
+			    CPC_WAKEUP_STAT_NONE)
+				return CPC_SUCCESS;
+
+			mtk_cpu_pm_mcusys_prot_release();
+		}
+
+		retry++;
+	}
+
+	return CPC_ERR_TIMEOUT;
+}
+
+static int mtk_cpu_pm_mcusys_prot_aquire(void)
+{
+	return mtk_cpc_last_core_prot(MCUSYS_PROT_SET,
+				      CPC_MCUSYS_LAST_CORE_RESP,
+				      MCUSYS_RESP_OFS);
+}
+
+int mtk_cpu_pm_cluster_prot_aquire(int cluster)
+{
+	return mtk_cpc_last_core_prot(CPUSYS_PROT_SET,
+				      CPC_MCUSYS_MP_LAST_CORE_RESP,
+				      CPUSYS_RESP_OFS);
+}
+
+void mtk_cpu_pm_cluster_prot_release(int cluster)
+{
+	mmio_write_32(CPC_MCUSYS_PWR_ON_MASK, CPUSYS_PROT_CLR);
+}
+
+static bool is_cpu_pm_counter_enabled(void)
+{
+	return cpu_pm_counter_enabled;
+}
+
+static void mtk_cpc_cluster_cnt_backup(void)
+{
+	int backup_cnt;
+	int curr_cnt;
+
+	if (is_cpu_pm_counter_enabled() == false)
+		return;
+
+	/* Single Cluster */
+	backup_cnt = mmio_read_32(SYSRAM_CLUSTER_CNT_BACKUP);
+	curr_cnt = mmio_read_32(CPC_MCUSYS_CLUSTER_COUNTER);
+
+	/* Get off count if dormant count is 0 */
+	if ((curr_cnt & MCUSYS_CLUSTER_DORMANT_MASK) == 0)
+		curr_cnt = (curr_cnt >> 16) & MCUSYS_CLUSTER_DORMANT_MASK;
+	else
+		curr_cnt = curr_cnt & MCUSYS_CLUSTER_DORMANT_MASK;
+
+	mmio_write_32(SYSRAM_CLUSTER_CNT_BACKUP, backup_cnt + curr_cnt);
+	mmio_write_32(CPC_MCUSYS_CLUSTER_COUNTER_CLR, 0x3);
+}
+
+static inline void mtk_cpc_mcusys_off_en(void)
+{
+	mmio_setbits_32(CPC_MCUSYS_PWR_CTRL, CPC_MCUSYS_OFF_EN);
+}
+
+static inline void mtk_cpc_mcusys_off_dis(void)
+{
+	mmio_clrbits_32(CPC_MCUSYS_PWR_CTRL, CPC_MCUSYS_OFF_EN);
+}
+
+void mtk_cpc_mcusys_off_reflect(void)
+{
+	mtk_cpc_mcusys_off_dis();
+	mtk_cpu_pm_mcusys_prot_release();
+}
+
+int mtk_cpc_mcusys_off_prepare(void)
+{
+	if (mtk_cpu_pm_mcusys_prot_aquire() != CPC_SUCCESS)
+		return CPC_ERR_FAIL;
+
+#if CHECK_GIC_SGI_PENDING
+	if (!!(gicr_get_sgi_pending())) {
+		mtk_cpu_pm_mcusys_prot_release();
+		return CPC_ERR_FAIL;
+	}
+#endif /* CHECK_GIC_SGI_PENDING */
+	mtk_cpc_cluster_cnt_backup();
+	mtk_cpc_mcusys_off_en();
+
+	return CPC_SUCCESS;
+}
+
+void mtk_cpc_core_on_hint_set(int cpu)
+{
+	mmio_write_32(CPC_MCUSYS_CPU_ON_SW_HINT_SET, BIT(cpu));
+}
+
+void mtk_cpc_core_on_hint_clr(int cpu)
+{
+	mmio_write_32(CPC_MCUSYS_CPU_ON_SW_HINT_CLR, BIT(cpu));
+}
+
+static void mtk_cpc_dump_timestamp(void)
+{
+	unsigned int id;
+
+	for (id = 0; id < CPC_TRACE_ID_NUM; id++) {
+		mmio_write_32(CPC_MCUSYS_TRACE_SEL, id);
+
+		memcpy((void *)(uintptr_t)CPC_TRACE_SRAM(id),
+		       (const void *)(uintptr_t)CPC_MCUSYS_TRACE_DATA,
+		       CPC_TRACE_SIZE);
+	}
+}
+
+void mtk_cpc_time_sync(void)
+{
+#if MTK_SYS_TIMER_SYNC_SUPPORT
+	uint64_t kt;
+	uint32_t systime_l, systime_h;
+
+	kt = sched_clock();
+	systime_l = mmio_read_32(CNTSYS_L_REG);
+	systime_h = mmio_read_32(CNTSYS_H_REG);
+
+	/* sync kernel timer to cpc */
+	mmio_write_32(CPC_MCUSYS_CPC_KERNEL_TIME_L_BASE, (uint32_t)kt);
+	mmio_write_32(CPC_MCUSYS_CPC_KERNEL_TIME_H_BASE, (uint32_t)(kt >> 32));
+
+	/* sync system timer to cpc */
+	mmio_write_32(CPC_MCUSYS_CPC_SYSTEM_TIME_L_BASE, systime_l);
+	mmio_write_32(CPC_MCUSYS_CPC_SYSTEM_TIME_H_BASE, systime_h);
+#endif /* MTK_SYS_TIMER_SYNC_SUPPORT */
+}
+
+static void mtk_cpc_time_freeze(bool is_freeze)
+{
+#if MTK_SYS_TIMER_SYNC_SUPPORT
+	mtk_cpc_time_sync();
+	if (is_freeze)
+		mmio_setbits_32(CPC_MCUSYS_CPC_DBG_SETTING, CPC_FREEZE);
+	else
+		mmio_clrbits_32(CPC_MCUSYS_CPC_DBG_SETTING, CPC_FREEZE);
+#endif /* MTK_SYS_TIMER_SYNC_SUPPORT */
+}
+
+static void *mtk_cpc_el3_timesync_handler(const void *arg)
+{
+	if (arg) {
+		unsigned int *is_time_sync = (unsigned int *)arg;
+
+		if (*is_time_sync)
+			mtk_cpc_time_freeze(false);
+		else
+			mtk_cpc_time_freeze(true);
+	}
+	return (void *)arg;
+}
+MT_CPUPM_SUBCRIBE_EL3_UPTIME_SYNC_WITH_KERNEL(mtk_cpc_el3_timesync_handler);
+
+static void mtk_cpc_config(unsigned int cfg, unsigned int data)
+{
+	unsigned int reg = 0;
+
+	switch (cfg) {
+	case CPC_SMC_CONFIG_PROF:
+		reg = CPC_MCUSYS_CPC_DBG_SETTING;
+		if (data)
+			mmio_setbits_32(reg, CPC_PROF_EN);
+		else
+			mmio_clrbits_32(reg, CPC_PROF_EN);
+		break;
+	case CPC_SMC_CONFIG_CNT_CLR:
+		reg = CPC_MCUSYS_CLUSTER_COUNTER_CLR;
+		mmio_write_32(reg, 0x3);
+		break;
+	case CPC_SMC_CONFIG_TIME_SYNC:
+		mtk_cpc_time_sync();
+		break;
+	default:
+		break;
+	}
+}
+
+static unsigned int mtk_cpc_read_config(unsigned int cfg)
+{
+	unsigned int res = 0;
+
+	switch (cfg) {
+	case CPC_SMC_CONFIG_PROF:
+		res = mmio_read_32(CPC_MCUSYS_CPC_DBG_SETTING) & CPC_PROF_EN
+			? 1 : 0;
+		break;
+	default:
+		break;
+	}
+
+	return res;
+}
+
+#define PROF_DEV_NAME_LEN	8
+uint64_t mtk_cpc_prof_dev_name(unsigned int dev_id)
+{
+	uint64_t ret = 0, tran = 0;
+	unsigned int i = 0;
+	static const char *prof_dev_name[DEV_TYPE_NUM] = {
+		"CPU0",
+		"CPU1",
+		"CPU2",
+		"CPU3",
+		"CPU4",
+		"CPU5",
+		"CPU6",
+		"CPU7",
+		"CPUSYS",
+		"MCUSYS"
+	};
+
+	while ((prof_dev_name[dev_id][i] != '\0') && (i < PROF_DEV_NAME_LEN)) {
+		tran = (uint64_t)(prof_dev_name[dev_id][i] & 0xFF);
+		ret |= (tran  << (i << 3));
+		i++;
+	}
+
+	return ret;
+}
+
+static void mtk_cpc_prof_clr(void)
+{
+	int i;
+
+	for (i = 0; i < DEV_TYPE_NUM; i++)
+		memset((char *)&cpc_dev.p[i], 0,
+			sizeof(struct mtk_cpc_lat_data));
+}
+
+void mtk_cpc_prof_enable(bool enable)
+{
+	unsigned int reg = 0;
+
+	reg = CPC_MCUSYS_CPC_DBG_SETTING;
+	if (enable)
+		mmio_setbits_32(reg, CPC_PROF_EN);
+	else
+		mmio_clrbits_32(reg, CPC_PROF_EN);
+
+	if ((cpu_cpc_prof_enabled == false) && (enable == true))
+		mtk_cpc_prof_clr();
+	cpu_cpc_prof_enabled = enable;
+}
+
+bool mtk_cpc_prof_is_enabled(void)
+{
+	return cpu_cpc_prof_enabled;
+}
+
+uint64_t mtk_cpc_prof_dev_num(void)
+{
+	return DEV_TYPE_NUM;
+}
+
+#define cpc_tick_to_us(val) ((val) / 13)
+uint64_t mtk_cpc_prof_read(unsigned int prof_act, unsigned int dev_type)
+{
+	uint64_t ret = 0;
+	struct mtk_cpc_lat_data *lat_data;
+
+	if (dev_type >= DEV_TYPE_NUM)
+		return CPC_ERR_FAIL;
+
+	lat_data = &cpc_dev.p[dev_type];
+
+	switch (prof_act) {
+	case CPC_PROF_OFF_CNT:
+		ret = lat_data->off_cnt;
+		break;
+	case CPC_PROF_OFF_AVG:
+		ret = cpc_tick_to_us(lat_data->off_sum / lat_data->off_cnt);
+		break;
+	case CPC_PROF_OFF_MAX:
+		ret = cpc_tick_to_us(lat_data->off_max);
+		break;
+	case CPC_PROF_OFF_MIN:
+		ret = cpc_tick_to_us(lat_data->off_min);
+		break;
+	case CPC_PROF_ON_CNT:
+		ret = lat_data->on_cnt;
+		break;
+	case CPC_PROF_ON_AVG:
+		ret = cpc_tick_to_us(lat_data->on_sum / lat_data->on_cnt);
+		break;
+	case CPC_PROF_ON_MAX:
+		ret = cpc_tick_to_us(lat_data->on_max);
+		break;
+	case CPC_PROF_ON_MIN:
+		ret = cpc_tick_to_us(lat_data->on_min);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+uint64_t mtk_cpc_prof_latency(unsigned int prof_act, unsigned int arg)
+{
+	uint64_t res = 0;
+
+	switch (prof_act) {
+	case CPC_PROF_ENABLE:
+		mtk_cpc_prof_enable((bool)arg);
+		break;
+	case CPC_PROF_ENABLED:
+		res = (uint64_t)mtk_cpc_prof_is_enabled();
+		break;
+	case CPC_PROF_DEV_NUM:
+		res = mtk_cpc_prof_dev_num();
+		break;
+	case CPC_PROF_DEV_NAME:
+		res = mtk_cpc_prof_dev_name(arg);
+		break;
+	case CPC_PROF_OFF_CNT:
+	case CPC_PROF_OFF_AVG:
+	case CPC_PROF_OFF_MAX:
+	case CPC_PROF_OFF_MIN:
+	case CPC_PROF_ON_CNT:
+	case CPC_PROF_ON_AVG:
+	case CPC_PROF_ON_MAX:
+	case CPC_PROF_ON_MIN:
+		res = (uint64_t)mtk_cpc_prof_read(prof_act, arg);
+		break;
+
+	default:
+		break;
+	}
+
+	return res;
+}
+
+uint64_t mtk_cpc_handler(uint64_t act, uint64_t arg1, uint64_t arg2)
+{
+	uint64_t res = 0;
+
+	switch (act) {
+	case CPC_SMC_EVENT_GIC_DPG_SET:
+		/* isolated_status = x2; */
+		break;
+	case CPC_SMC_EVENT_CPC_CONFIG:
+		mtk_cpc_config((unsigned int)arg1, (unsigned int)arg2);
+		break;
+	case CPC_SMC_EVENT_READ_CONFIG:
+		res = mtk_cpc_read_config((unsigned int)arg1);
+		break;
+	case CPC_SMC_EVENT_PROF_LATENCY:
+		res = mtk_cpc_prof_latency((unsigned int)arg1,
+					   (unsigned int)arg2);
+		break;
+	default:
+		break;
+	}
+
+	return res;
+}
+
+uint64_t mtk_cpc_trace_dump(uint64_t act, uint64_t arg1, uint64_t arg2)
+{
+	uint64_t res = 0;
+
+	switch (act) {
+	case CPC_SMC_EVENT_DUMP_TRACE_DATA:
+		mtk_cpc_dump_timestamp();
+		break;
+	default:
+		break;
+	}
+
+	return res;
+}
+
+void mtk_cpu_pm_counter_clear(void)
+{
+	unsigned int cpu = 0;
+
+	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
+		mmio_write_32(SYSRAM_RECENT_CPU_CNT(cpu), 0);
+
+	mmio_write_32(SYSRAM_RECENT_CLUSTER_CNT, 0);
+	mmio_write_32(SYSRAM_RECENT_MCUSYS_CNT, 0);
+	mmio_write_32(SYSRAM_CPUSYS_CNT, 0);
+	mmio_write_32(SYSRAM_MCUSYS_CNT, 0);
+	mmio_write_32(CPC_MCUSYS_CLUSTER_COUNTER_CLR, 0x3);
+	mmio_write_32(SYSRAM_CLUSTER_CNT_BACKUP, 0x0);
+	mmio_write_32(SYSRAM_RECENT_CNT_TS_H, 0x0);
+	mmio_write_32(SYSRAM_RECENT_CNT_TS_L, 0x0);
+}
+
+void mtk_cpu_pm_counter_enable(bool enable)
+{
+	cpu_pm_counter_enabled = enable;
+	if (cpu_pm_counter_enabled == false)
+		mtk_cpu_pm_counter_clear();
+}
+
+bool mtk_cpu_pm_counter_enabled(void)
+{
+	return cpu_pm_counter_enabled;
+}
+
+#define sec_to_us(v)	((v) * 1000 * 1000ULL)
+#define DUMP_INTERVAL	sec_to_us(5)
+void mtk_cpu_pm_counter_update(unsigned int cpu)
+{
+#ifdef CONFIG_MTK_CPU_SUSPEND_EN
+	unsigned int cnt = 0, curr_mcusys_cnt = 0, mcusys_cnt = 0;
+	static unsigned int prev_mcusys_cnt = 0,
+			    cpu_cnt[PLATFORM_CORE_COUNT] = {0};
+	uint64_t curr_us = 0;
+	static uint64_t last_dump_us;
+	static bool reset;
+
+	if (is_cpu_pm_counter_enabled() == false) {
+		reset = true;
+		return;
+	}
+
+	if (reset == true) {
+		last_dump_us = sched_clock() / 1000;
+		prev_mcusys_cnt = mmio_read_32(MCUPM_TCM_MCUSYS_COUNTER);
+		mtk_cpu_pm_counter_clear();
+		cpu_cnt[cpu] = 0;
+		reset = false;
+	}
+
+	cpu_cnt[cpu]++;
+
+	curr_us = sched_clock() / 1000;
+	if (curr_us - last_dump_us > DUMP_INTERVAL) {
+		last_dump_us = curr_us;
+
+		/* CPU off count */
+		for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++) {
+			mmio_write_32(SYSRAM_RECENT_CPU_CNT(cpu),
+				      cpu_cnt[cpu]);
+			cpu_cnt[cpu] = 0;
+		}
+
+		/* Cluster off count */
+		curr_mcusys_cnt = mmio_read_32(MCUPM_TCM_MCUSYS_COUNTER);
+		if (curr_mcusys_cnt >= prev_mcusys_cnt)
+			mcusys_cnt = curr_mcusys_cnt - prev_mcusys_cnt;
+		else
+			mcusys_cnt = curr_mcusys_cnt;
+		prev_mcusys_cnt = mmio_read_32(MCUPM_TCM_MCUSYS_COUNTER);
+
+		cnt = mmio_read_32(CPC_MCUSYS_CLUSTER_COUNTER);
+		/**
+		 * bit[0:15] : memory retention
+		 * bit[16:31] : memory off
+		 */
+		if ((cnt & MCUSYS_CLUSTER_DORMANT_MASK) == 0)
+			cnt = ((cnt >> 16) & MCUSYS_CLUSTER_DORMANT_MASK);
+		else
+			cnt = cnt & MCUSYS_CLUSTER_DORMANT_MASK;
+		cnt += mmio_read_32(SYSRAM_CLUSTER_CNT_BACKUP);
+		cnt += mcusys_cnt;
+
+		mmio_write_32(SYSRAM_RECENT_CLUSTER_CNT, cnt);
+		mmio_write_32(SYSRAM_CPUSYS_CNT,
+			      cnt + mmio_read_32(SYSRAM_CPUSYS_CNT));
+		mmio_write_32(CPC_MCUSYS_CLUSTER_COUNTER_CLR, 0x3);
+		mmio_write_32(SYSRAM_CLUSTER_CNT_BACKUP, 0x0);
+
+		/* MCUSYS off count */
+		mmio_write_32(SYSRAM_RECENT_MCUSYS_CNT,
+			      mcusys_cnt);
+
+		mmio_write_32(SYSRAM_MCUSYS_CNT,
+			      mmio_read_32(SYSRAM_MCUSYS_CNT) + mcusys_cnt);
+
+		mmio_write_32(SYSRAM_RECENT_CNT_TS_H,
+			      (unsigned int)((last_dump_us >> 32) & 0xFFFFFFFF));
+
+		mmio_write_32(SYSRAM_RECENT_CNT_TS_L,
+			      (unsigned int)(last_dump_us & 0xFFFFFFFF));
+	}
+#endif /* CONFIG_MTK_CPU_SUSPEND_EN */
+}
+
+#define __mtk_cpc_record_lat(sum, min, max, lat)\
+	do {					\
+		if (lat > max)			\
+			max = lat;		\
+		if ((lat < min) || (min == 0))	\
+			min = lat;		\
+		(sum) += (lat);			\
+	} while (0)
+
+#ifdef MT_CPU_PM_USING_BAKERY_LOCK
+DEFINE_BAKERY_LOCK(mt_cpu_pm_cpc_lock);
+#define plat_cpu_pm_cpc_lock_init() bakery_lock_init(&mt_cpu_pm_cpc_lock)
+#define plat_cpu_pm_cpc_lock() bakery_lock_get(&mt_cpu_pm_cpc_lock)
+#define plat_cpu_pm_cpc_unlock() bakery_lock_release(&mt_cpu_pm_cpc_lock)
+#else
+spinlock_t mt_cpu_pm_cpc_lock;
+#define plat_cpu_pm_cpc_lock_init()
+#define plat_cpu_pm_cpc_lock() spin_lock(&mt_cpu_pm_cpc_lock)
+#define plat_cpu_pm_cpc_unlock() spin_unlock(&mt_cpu_pm_cpc_lock)
+#endif /* MT_CPU_PM_USING_BAKERY_LOCK */
+
+static void mtk_cpc_record_lat(struct mtk_cpc_lat_data *lat,
+			       unsigned int on_ticks, unsigned int off_ticks)
+{
+	if ((on_ticks == 0) || (off_ticks == 0))
+		return;
+
+	__mtk_cpc_record_lat(lat->on_sum, lat->on_min, lat->on_max, on_ticks);
+	lat->on_cnt++;
+	__mtk_cpc_record_lat(lat->off_sum, lat->off_min,
+			     lat->off_max, off_ticks);
+	lat->off_cnt++;
+}
+
+#define CPC_CPU_LATENCY_MASK	0xFFFF
+void mtk_cpu_pm_save_cpc_latency(enum dev_type dev_type)
+{
+	unsigned int lat = 0, lat_on = 0, lat_off = 0;
+	struct mtk_cpc_lat_data *lat_data = NULL;
+
+	if (mtk_cpc_prof_is_enabled() == false)
+		return;
+
+	plat_cpu_pm_cpc_lock();
+
+	if (dev_type < DEV_TYPE_CPUSYS) {
+		lat = mmio_read_32(CPC_CPU_ON_LATENCY(dev_type));
+		lat_on = lat & CPC_CPU_LATENCY_MASK;
+		lat = mmio_read_32(CPC_CPU_OFF_LATENCY(dev_type));
+		lat_off = lat & CPC_CPU_LATENCY_MASK;
+		lat_data = &cpc_dev.cpu[dev_type];
+	} else if (dev_type == DEV_TYPE_CPUSYS) {
+		lat_on = mmio_read_32(CPC_CLUSTER_ON_LATENCY);
+		lat_on = lat_on & CPC_CPU_LATENCY_MASK;
+		lat_off = mmio_read_32(CPC_CLUSTER_OFF_LATENCY);
+		lat_off = lat_off & CPC_CPU_LATENCY_MASK;
+		lat_data = &cpc_dev.cluster;
+	} else if (dev_type == DEV_TYPE_MCUSYS) {
+		lat = mmio_read_32(CPC_MCUSYS_ON_LATENCY);
+		lat_on = lat & CPC_CPU_LATENCY_MASK;
+		lat = mmio_read_32(CPC_MCUSYS_OFF_LATENCY);
+		lat_off = lat & CPC_CPU_LATENCY_MASK;
+		lat_data = &cpc_dev.mcusys;
+	}
+
+	if (lat_data)
+		mtk_cpc_record_lat(lat_data, lat_on, lat_off);
+
+	plat_cpu_pm_cpc_unlock();
+}
+
+#define RVBARADDR_ONKEEPON_SEL			(MCUCFG_BASE + 0x388)
+
+void mtk_cpc_init(void)
+{
+	struct mtk_plat_dev_config cfg = {
+#ifndef CPU_PM_ACP_FSM
+		.auto_off = 1,
+#else
+		.auto_off = 0,
+#endif /* CPU_PM_ACP_FSM */
+		.auto_thres_us = MTK_CPC_AUTO_DORMANT_THR_US,
+	};
+
+	if (mmio_read_32(RVBARADDR_ONKEEPON_SEL) == 0x1) {
+		ERROR("ONKEEPON_SEL=%x, CPC_FLOW_CTRL_CFG=%x\n",
+		      mmio_read_32(RVBARADDR_ONKEEPON_SEL),
+		      mmio_read_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG));
+		mmio_write_32(RVBARADDR_ONKEEPON_SEL, 0x1);
+	}
+
+#if CONFIG_MTK_SMP_EN
+	mt_smp_init();
+#endif /* CONFIG_MTK_SMP_EN */
+
+#if CONFIG_MTK_CPU_SUSPEND_EN
+	mtk_cpu_pm_counter_clear();
+#endif /* CONFIG_MTK_CPU_SUSPEND_EN */
+
+	mtk_cpc_auto_dormant_en(cfg.auto_off);
+	mtk_cpc_auto_dormant_tick(cfg.auto_thres_us);
+
+	mmio_setbits_32(CPC_MCUSYS_CPC_DBG_SETTING,
+			CPC_DBG_EN | CPC_CALC_EN);
+
+	mmio_setbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG,
+			CPC_OFF_PRE_EN);
+
+	/* enable CPC */
+	mmio_setbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG, CPC_CTRL_ENABLE);
+
+	plat_cpu_pm_cpc_lock_init();
+}
diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_cpc.h b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_cpc.h
new file mode 100644
index 000000000..0f862d90f
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_cpc.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MT_CPU_PM_CPC_H
+#define MT_CPU_PM_CPC_H
+
+#include <lib/mmio.h>
+#include <platform_def.h>
+#include <mcucfg.h>
+#include <mcupm_cfg.h>
+
+#define NEED_CPUSYS_PROT_WORKAROUND	1
+
+/* system sram registers */
+#define CPUIDLE_SRAM_REG(r)	(0x11B000 + (r))
+
+/* db dump */
+#define CPC_TRACE_SIZE		0x20
+#define CPC_TRACE_ID_NUM	13
+#define CPC_TRACE_SRAM(id)	(CPUIDLE_SRAM_REG(0x10) + (id) * CPC_TRACE_SIZE)
+
+/* backup off count */
+#define SYSRAM_RECENT_CPU_CNT(i)	CPUIDLE_SRAM_REG(4 * (i) + 0x1B0)
+#define SYSRAM_RECENT_CLUSTER_CNT	CPUIDLE_SRAM_REG(0x1D0)
+#define SYSRAM_RECENT_MCUSYS_CNT	CPUIDLE_SRAM_REG(0x1D4)
+#define SYSRAM_RECENT_CNT_TS_L		CPUIDLE_SRAM_REG(0x1D8)
+#define SYSRAM_RECENT_CNT_TS_H		CPUIDLE_SRAM_REG(0x1DC)
+#define SYSRAM_CPUSYS_CNT		CPUIDLE_SRAM_REG(0x1E8)
+#define SYSRAM_MCUSYS_CNT		CPUIDLE_SRAM_REG(0x1EC)
+#define SYSRAM_CLUSTER_CNT_BACKUP	CPUIDLE_SRAM_REG(0x1F0)
+#define MCUPM_TCM_MCUSYS_COUNTER \
+	(CPU_EB_TCM_CNT_BASE + CPU_EB_MCUSYS_CNT_OFST)
+
+/* CPC_MCUSYS_CPC_FLOW_CTRL_CFG(0x114): debug setting */
+#define CPC_PWR_ON_SEQ_DIS	BIT(1)
+#define CPC_PWR_ON_PRIORITY	BIT(2)
+#define CPC_AUTO_OFF_EN		BIT(5)
+#define CPC_DORMANT_WAIT_EN	BIT(14)
+#define CPC_CTRL_EN		BIT(16)
+#define CPC_OFF_PRE_EN		BIT(29)
+
+/* CPC_MCUSYS_LAST_CORE_REQ(0x118) : last core protection */
+#define CPUSYS_PROT_SET		BIT(0)
+#define MCUSYS_PROT_SET		BIT(8)
+/* CPC_PWR_ON_MASK(0x128) : last core protection */
+#define CPUSYS_PROT_CLR		BIT(8)
+#define MCUSYS_PROT_CLR		BIT(9)
+
+#define CPC_PROT_RESP_MASK	(0x3)
+/* CPC_CPUSYS_LAST_CORE_RESP(0x11C) : last core protection */
+#define CPUSYS_RESP_OFS		(16)
+/* CPC_MCUSYS_LAST_CORE_RESP(0x124) : last core protection */
+#define MCUSYS_RESP_OFS		(30)
+
+#define RETRY_CNT_MAX		(1000)
+
+#define PROT_RETRY		(0)
+#define PROT_SUCCESS		(1)
+#define PROT_GIVEUP		(2)
+
+/* CPC_MCUSYS_CPC_DBG_SETTING(0x200): debug setting */
+#define CPC_PROF_EN		BIT(0)
+#define CPC_DBG_EN		BIT(1)
+#define CPC_FREEZE		BIT(2)
+#define CPC_CALC_EN		BIT(3)
+
+enum mcusys_cpc_lastcore_prot_status {
+	CPC_SUCCESS = 0,
+	CPC_ERR_FAIL,
+	CPC_ERR_TIMEOUT,
+	NF_CPC_ERR
+};
+
+enum mcusys_cpc_smc_events {
+	CPC_SMC_EVENT_DUMP_TRACE_DATA,
+	CPC_SMC_EVENT_GIC_DPG_SET,
+	CPC_SMC_EVENT_CPC_CONFIG,
+	CPC_SMC_EVENT_READ_CONFIG,
+	CPC_SMC_EVENT_PROF_LATENCY,
+	NF_CPC_SMC_EVENT
+};
+
+enum mcusys_cpc_smc_config {
+	CPC_SMC_CONFIG_PROF,
+	CPC_SMC_CONFIG_CNT_CLR,
+	CPC_SMC_CONFIG_TIME_SYNC,
+
+	NF_CPC_SMC_CONFIG,
+};
+
+enum dev_type {
+	DEV_TYPE_CPU_0 = 0,
+	DEV_TYPE_CPUSYS = PLATFORM_CORE_COUNT,
+	DEV_TYPE_MCUSYS,
+	DEV_TYPE_NUM
+};
+
+enum {
+	CPC_PROF_ENABLE,
+	CPC_PROF_ENABLED,
+	CPC_PROF_DEV_NUM,
+	CPC_PROF_DEV_NAME,
+	CPC_PROF_OFF_CNT,
+	CPC_PROF_OFF_AVG,
+	CPC_PROF_OFF_MAX,
+	CPC_PROF_OFF_MIN,
+	CPC_PROF_ON_CNT,
+	CPC_PROF_ON_AVG,
+	CPC_PROF_ON_MAX,
+	CPC_PROF_ON_MIN,
+
+	CPC_PROF_NUM
+};
+
+#define MTK_CPC_AUTO_DORMANT_THR_US	(8000)
+#define US_TO_TICKS(us)			((us) * 26)
+#define TICKS_TO_US(tick)		((tick) / 26)
+
+int mtk_cpu_pm_cluster_prot_aquire(int cluster);
+void mtk_cpu_pm_cluster_prot_release(int cluster);
+
+void mtk_cpc_mcusys_off_reflect(void);
+int mtk_cpc_mcusys_off_prepare(void);
+
+void mtk_cpc_core_on_hint_set(int cpu);
+void mtk_cpc_core_on_hint_clr(int cpu);
+void mtk_cpc_time_sync(void);
+
+uint64_t mtk_cpc_handler(uint64_t act, uint64_t arg1, uint64_t arg2);
+uint64_t mtk_cpc_trace_dump(uint64_t act, uint64_t arg1, uint64_t arg2);
+void mtk_cpu_pm_counter_enable(bool enable);
+bool mtk_cpu_pm_counter_enabled(void);
+void mtk_cpu_pm_counter_update(unsigned int cpu);
+void mtk_cpc_prof_enable(bool enable);
+bool mtk_cpc_prof_is_enabled(void);
+void mtk_cpu_pm_save_cpc_latency(enum dev_type dev_type);
+void mtk_cpc_init(void);
+
+#endif /* MT_CPU_PM_CPC_H */
diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_mbox.c b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_mbox.c
new file mode 100644
index 000000000..020445cf6
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_mbox.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <errno.h>
+#include <lib/mmio.h>
+#include <platform_def.h>
+
+#include <mcupm_cfg.h>
+#include "mt_cpu_pm_mbox.h"
+
+#ifdef __GNUC__
+#define mcdi_likely(x)		__builtin_expect(!!(x), 1)
+#define mcdi_unlikely(x)	__builtin_expect(!!(x), 0)
+#else
+#define mcdi_likely(x)		(x)
+#define mcdi_unlikely(x)	(x)
+#endif /* __GNUC__ */
+
+#define MCUPM_MBOX_3_BASE	(CPU_EB_TCM_BASE + CPU_EB_MBOX3_OFFSET)
+
+#define _mcupm_mbox_write(id, val) \
+	mmio_write_32(MCUPM_MBOX_3_BASE + 4 * (id), val)
+#define _mcupm_mbox_read(id) \
+	mmio_read_32(MCUPM_MBOX_3_BASE + 4 * (id))
+
+void mtk_set_mcupm_pll_mode(unsigned int mode)
+{
+	if (mode < NF_MCUPM_ARMPLL_MODE)
+		_mcupm_mbox_write(MCUPM_MBOX_ARMPLL_MODE, mode);
+}
+
+int mtk_get_mcupm_pll_mode(void)
+{
+	return _mcupm_mbox_read(MCUPM_MBOX_ARMPLL_MODE);
+}
+
+void mtk_set_mcupm_buck_mode(unsigned int mode)
+{
+	if (mode < NF_MCUPM_BUCK_MODE)
+		_mcupm_mbox_write(MCUPM_MBOX_BUCK_MODE, mode);
+}
+
+int mtk_get_mcupm_buck_mode(void)
+{
+	return _mcupm_mbox_read(MCUPM_MBOX_BUCK_MODE);
+}
+
+void mtk_set_cpu_pm_preffered_cpu(unsigned int cpuid)
+{
+	return _mcupm_mbox_write(MCUPM_MBOX_WAKEUP_CPU, cpuid);
+}
+
+unsigned int mtk_get_cpu_pm_preffered_cpu(void)
+{
+	return _mcupm_mbox_read(MCUPM_MBOX_WAKEUP_CPU);
+}
+
+static int mtk_wait_mbox_init_done(void)
+{
+	int sta = _mcupm_mbox_read(MCUPM_MBOX_TASK_STA);
+
+	if (sta != MCUPM_TASK_INIT)
+		return sta;
+
+	mtk_set_mcupm_pll_mode(MCUPM_ARMPLL_OFF);
+	mtk_set_mcupm_buck_mode(MCUPM_BUCK_OFF_MODE);
+
+	_mcupm_mbox_write(MCUPM_MBOX_PWR_CTRL_EN,
+			  MCUPM_MCUSYS_CTRL |
+			  MCUPM_CM_CTRL |
+			  MCUPM_BUCK_CTRL |
+			  MCUPM_ARMPLL_CTRL);
+
+	return sta;
+}
+
+int mtk_lp_depd_condition(enum cpupm_mbox_depd_type type)
+{
+	int ret = 0, status = 0;
+
+	if (type == CPUPM_MBOX_WAIT_DEV_INIT) {
+		status = mtk_wait_mbox_init_done();
+		if (mcdi_unlikely(status != MCUPM_TASK_INIT))
+			ret = -ENXIO;
+		else
+			_mcupm_mbox_write(MCUPM_MBOX_AP_READY, 1);
+	} else if (type == CPUPM_MBOX_WAIT_TASK_READY) {
+		status = _mcupm_mbox_read(MCUPM_MBOX_TASK_STA);
+		if (mcdi_unlikely((status != MCUPM_TASK_WAIT) &&
+				  (status != MCUPM_TASK_INIT_FINISH)))
+			ret = -ENXIO;
+	}
+	return ret;
+}
+
+void mtk_set_mcupm_group_hint(unsigned int gmask)
+{
+	_mcupm_mbox_write(MCUPM_MBOX_GROUP, gmask);
+}
diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_mbox.h b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_mbox.h
new file mode 100644
index 000000000..cbf7d8ffe
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_mbox.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MT_CPU_PM_MBOX_H
+#define MT_CPU_PM_MBOX_H
+
+#define MCUPM_MBOX_AP_READY		0
+#define MCUPM_MBOX_GROUP		1
+#define MCUPM_MBOX_RESERVED_2		2
+#define MCUPM_MBOX_RESERVED_3		3
+#define MCUPM_MBOX_PWR_CTRL_EN		4
+#define MCUPM_MBOX_L3_CACHE_MODE	5
+#define MCUPM_MBOX_BUCK_MODE		6
+#define MCUPM_MBOX_ARMPLL_MODE		7
+
+#define MCUPM_MBOX_TASK_STA		8
+#define MCUPM_MBOX_RESERVED_9		9
+#define MCUPM_MBOX_RESERVED_10		10
+#define MCUPM_MBOX_RESERVED_11		11
+#define MCUPM_MBOX_WAKEUP_CPU		12
+
+#define MCUPM_MCUSYS_CTRL		BIT(0)
+#define MCUPM_BUCK_CTRL			BIT(1)
+#define MCUPM_ARMPLL_CTRL		BIT(2)
+#define MCUPM_CM_CTRL			BIT(3)
+
+#define MCUPM_L3_OFF_MODE		0
+#define MCUPM_L3_DORMANT_MODE		1
+#define NF_MCUPM_L3_MODE		2U
+
+#define MCUPM_BUCK_NORMAL_MODE		0
+#define MCUPM_BUCK_LP_MODE		1
+#define MCUPM_BUCK_OFF_MODE		2
+#define NF_MCUPM_BUCK_MODE		3U
+
+#define MCUPM_ARMPLL_ON			0
+#define MCUPM_ARMPLL_GATING		1
+#define MCUPM_ARMPLL_OFF		2
+#define NF_MCUPM_ARMPLL_MODE		3U
+
+#define MCUPM_TASK_UNINIT		0
+#define MCUPM_TASK_INIT			1
+#define MCUPM_TASK_INIT_FINISH		2
+#define MCUPM_TASK_WAIT			3
+#define MCUPM_TASK_RUN			4
+#define MCUPM_TASK_PAUSE		5
+
+void mtk_set_mcupm_pll_mode(unsigned int mode);
+int mtk_get_mcupm_pll_mode(void);
+
+void mtk_set_mcupm_buck_mode(unsigned int mode);
+int mtk_get_mcupm_buck_mode(void);
+
+void mtk_set_cpu_pm_preffered_cpu(unsigned int cpuid);
+unsigned int mtk_get_cpu_pm_preffered_cpu(void);
+
+void mtk_set_mcupm_group_hint(unsigned int gmask);
+
+enum cpupm_mbox_depd_type {
+	CPUPM_MBOX_WAIT_DEV_INIT,
+	CPUPM_MBOX_WAIT_TASK_READY,
+};
+
+int mtk_lp_depd_condition(enum cpupm_mbox_depd_type type);
+
+#endif /* MT_CPU_PM_MBOX_H */
diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_smc.c b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_smc.c
new file mode 100644
index 000000000..065d205d9
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_smc.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+#include <lib/spinlock.h>
+
+#include "mt_cpu_pm.h"
+#include "mt_cpu_pm_cpc.h"
+#include "mt_cpu_pm_smc.h"
+#include "mt_lp_irqremain.h"
+
+/*
+ * The locker must use the bakery locker when cache turn off.
+ * Using spin_lock will has better performance.
+ */
+#ifdef MT_CPU_PM_USING_BAKERY_LOCK
+DEFINE_BAKERY_LOCK(mt_cpu_pm_smc_lock);
+#define plat_cpu_pm_smc_lock_init() bakery_lock_init(&mt_cpu_pm_smc_lock)
+#define plat_cpu_pm_smc_lock() bakery_lock_get(&mt_cpu_pm_smc_lock)
+#define plat_cpu_pm_smc_unlock() bakery_lock_release(&mt_cpu_pm_smc_lock)
+#else
+spinlock_t mt_cpu_pm_smc_lock;
+#define plat_cpu_pm_smc_lock_init()
+#define plat_cpu_pm_smc_lock() spin_lock(&mt_cpu_pm_smc_lock)
+#define plat_cpu_pm_smc_unlock() spin_unlock(&mt_cpu_pm_smc_lock)
+#endif /* MT_CPU_PM_USING_BAKERY_LOCK */
+
+static uint64_t cpupm_dispatcher(u_register_t lp_id,
+				 u_register_t act,
+				 u_register_t arg1,
+				 u_register_t arg2,
+				 void *handle,
+				 struct smccc_res *smccc_ret)
+{
+	uint64_t res = 0;
+
+	switch (lp_id) {
+	case CPC_COMMAND:
+		res = mtk_cpc_handler(act, arg1, arg2);
+		break;
+	default:
+		break;
+	}
+
+	return res;
+}
+
+static uint64_t cpupm_lp_dispatcher(u_register_t lp_id,
+				    u_register_t act,
+				    u_register_t arg1,
+				    u_register_t arg2,
+				    void *handle,
+				    struct smccc_res *smccc_ret)
+{
+	uint64_t res = 0;
+#ifdef CPU_PM_IRQ_REMAIN_ENABLE
+	int ret;
+#endif
+	switch (lp_id) {
+	case LP_CPC_COMMAND:
+		res = mtk_cpc_handler(act, arg1, arg2);
+		break;
+#ifdef CPU_PM_IRQ_REMAIN_ENABLE
+	case IRQS_REMAIN_ALLOC:
+		if (act & MT_LPM_SMC_ACT_GET)
+			res = (uint64_t)mt_lp_irqremain_count();
+		break;
+	case IRQS_REMAIN_CTRL:
+		plat_cpu_pm_smc_lock();
+		if (act & MT_LPM_SMC_ACT_SUBMIT)
+			ret = mt_lp_irqremain_submit();
+		else if (act & MT_LPM_SMC_ACT_PUSH) {
+			ret = mt_lp_irqremain_push();
+		if (ret)
+			INFO("Irqs remain push fail\n");
+		} else
+			INFO("Irqs remain control not support! (0x%lx)\n", act);
+		plat_cpu_pm_smc_unlock();
+		break;
+	case IRQS_REMAIN_IRQ:
+	case IRQS_REMAIN_WAKEUP_CAT:
+	case IRQS_REMAIN_WAKEUP_SRC:
+		plat_cpu_pm_smc_lock();
+		if (act & MT_LPM_SMC_ACT_SET) {
+			const struct mt_lp_irqinfo info = {
+				.val = (unsigned int)arg1,
+			};
+
+			ret = mt_lp_irqremain_set((unsigned int)lp_id, &info);
+			if (ret)
+				INFO("Irqs remain command: %lu, set fail\n",
+				     lp_id);
+		} else if (act & MT_LPM_SMC_ACT_GET) {
+			struct mt_lp_irqinfo info;
+
+			ret = mt_lp_irqremain_get((unsigned int)arg1,
+						  (unsigned int)lp_id, &info);
+			if (ret) {
+				INFO("Irqs remain command: %lu, get fail\n",
+				     lp_id);
+				res = 0;
+			} else
+				res = (uint64_t)info.val;
+		} else
+			INFO("Irqs remain command not support! (0x%lx)\n", act);
+		plat_cpu_pm_smc_unlock();
+		break;
+#ifdef CPU_PM_SUSPEND_NOTIFY
+	case SUSPEND_SRC:
+		ret = cpupm_set_suspend_state((unsigned int)act,
+					      (unsigned int)arg1);
+		if (ret)
+			INFO("cpu_pm lp command: %lu, set fail\n", lp_id);
+		break;
+#endif
+	case CPU_PM_COUNTER_CTRL:
+		if (act & MT_LPM_SMC_ACT_SET)
+			mtk_cpu_pm_counter_enable((bool)arg1);
+		else if (act & MT_LPM_SMC_ACT_GET)
+			res = (uint64_t)mtk_cpu_pm_counter_enabled();
+		break;
+	case CPU_PM_RECORD_CTRL:
+		if (act & MT_LPM_SMC_ACT_GET) {
+			if (arg1 == 0)
+				res = mtk_mcusys_off_record_cnt_get();
+			else if (arg1 == 1)
+				res = mtk_mcusys_off_record_name_get();
+		}
+		break;
+#if CONFIG_MTK_PM_SUPPORT && CONFIG_MTK_CPU_SUSPEND_EN
+	case SUSPEND_ABORT_REASON:
+		if (act & MT_LPM_SMC_ACT_GET)
+			res = mtk_suspend_abort_reason_get();
+		break;
+#endif
+#ifdef CONFIG_MTK_CPU_ILDO
+	case CPU_PM_RET_CTRL:
+		if (act & MT_LPM_SMC_ACT_SET)
+			res = cpupm_cpu_retention_control((unsigned int) arg1);
+		else if (act & MT_LPM_SMC_ACT_GET)
+			res = cpupu_get_cpu_retention_control();
+		else if (act & MT_LPM_SMC_ACT_COMPAT)
+			res = CPU_PM_CPU_RET_IS_ENABLED;
+		break;
+#endif
+#endif
+	default:
+		break;
+	}
+	return res;
+}
+
+static uint64_t secure_cpupm_dispatcher(u_register_t lp_id,
+					u_register_t act,
+					u_register_t arg1,
+					u_register_t arg2,
+					void *handle,
+					struct smccc_res *smccc_ret)
+{
+	uint64_t res = 0;
+
+	switch (lp_id) {
+	case CPC_COMMAND:
+		res = mtk_cpc_trace_dump(act, arg1, arg2);
+		break;
+	default:
+		break;
+	}
+
+	return res;
+}
+
+void cpupm_smc_init(void)
+{
+	plat_cpu_pm_smc_lock_init();
+	mt_lpm_dispatcher_registry(MT_LPM_SMC_USER_CPU_PM,
+				   cpupm_dispatcher);
+
+	mt_lpm_dispatcher_registry(MT_LPM_SMC_USER_CPU_PM_LP,
+				   cpupm_lp_dispatcher);
+
+	mt_secure_lpm_dispatcher_registry(MT_LPM_SMC_USER_SECURE_CPU_PM,
+					  secure_cpupm_dispatcher);
+}
diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_smc.h b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_smc.h
new file mode 100644
index 000000000..8b643b066
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_cpu_pm_smc.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MT_CPU_PM_SMC_H
+#define MT_CPU_PM_SMC_H
+
+#include <lpm/mt_lp_rm.h>
+#include <lpm/mt_lpm_dispatch.h>
+#include <lpm/mt_lpm_smc.h>
+
+void cpupm_smc_init(void);
+
+#endif /* MT_CPU_PM_SMC_H */
diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_lp_irqremain.c b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_lp_irqremain.c
new file mode 100644
index 000000000..ded1a60fe
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_lp_irqremain.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+
+#include <drivers/cirq.h>
+
+#include <platform_def.h>
+
+#include <lib/pm/mtk_pm.h>
+#include <lpm/mt_lp_rm.h>
+#include "mt_cpu_pm.h"
+#include "mt_lp_irqremain.h"
+
+static struct mt_irqremain remain_irqs;
+static struct mt_irqremain *p_irqs;
+
+int mt_lp_irqremain_push(void)
+{
+	if (remain_irqs.count >= MT_IRQ_REMAIN_MAX)
+		return -1;
+	remain_irqs.count += 1;
+	return 0;
+}
+
+int mt_lp_irqremain_pop(void)
+{
+	if (remain_irqs.count == 0)
+		return -1;
+	remain_irqs.count -= 1;
+	return 0;
+}
+
+int mt_lp_irqremain_set(unsigned int type,
+			const struct mt_lp_irqinfo *info)
+{
+	unsigned int idx;
+
+	if (p_irqs || !info)
+		return -1;
+
+	idx = remain_irqs.count;
+	switch (type) {
+	case IRQS_REMAIN_IRQ:
+		remain_irqs.irqs[idx] = info->val;
+		break;
+	case IRQS_REMAIN_WAKEUP_CAT:
+		remain_irqs.wakeupsrc_cat[idx] = info->val;
+		break;
+	case IRQS_REMAIN_WAKEUP_SRC:
+		remain_irqs.wakeupsrc[idx] = info->val;
+		break;
+	}
+	return 0;
+}
+
+int mt_lp_irqremain_get(unsigned int idx, unsigned int type,
+			struct mt_lp_irqinfo *info)
+{
+	if (!p_irqs || !info || (idx > remain_irqs.count))
+		return -1;
+
+	switch (type) {
+	case IRQS_REMAIN_IRQ:
+		info->val = remain_irqs.irqs[idx];
+		break;
+	case IRQS_REMAIN_WAKEUP_CAT:
+		info->val = remain_irqs.wakeupsrc_cat[idx];
+		break;
+	case IRQS_REMAIN_WAKEUP_SRC:
+		info->val = remain_irqs.wakeupsrc[idx];
+		break;
+	}
+	return 0;
+}
+
+unsigned int mt_lp_irqremain_count(void)
+{
+	return remain_irqs.count;
+}
+
+int mt_lp_irqremain_submit(void)
+{
+	if (remain_irqs.count == 0)
+		return -1;
+	set_wakeup_sources(remain_irqs.irqs, remain_irqs.count);
+	mt_lp_rm_do_update(-1, PLAT_RC_UPDATE_REMAIN_IRQS, &remain_irqs);
+	p_irqs = &remain_irqs;
+	return 0;
+}
+
+int mt_lp_irqremain_aquire(void)
+{
+	if (!p_irqs)
+		return -1;
+
+	mt_cirq_sw_reset();
+	mt_cirq_clone_gic();
+	mt_cirq_enable();
+	return 0;
+}
+
+int mt_lp_irqremain_release(void)
+{
+	if (!p_irqs)
+		return -1;
+	mt_cirq_flush();
+	mt_cirq_disable();
+	return 0;
+}
+
+void mt_lp_irqremain_init(void)
+{
+	p_irqs = NULL;
+}
diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_lp_irqremain.h b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_lp_irqremain.h
new file mode 100644
index 000000000..1fafe13e0
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_lp_irqremain.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MT_LP_IRQREMAIN_H
+#define MT_LP_IRQREMAIN_H
+
+struct mt_lp_irqinfo {
+	unsigned int val;
+};
+
+enum mt_lp_irqremain_type {
+	MT_LP_IRQREMAIN_IRQ,
+	MT_LP_IRQREMAIN_WAKEUP_CAT,
+	MT_LP_IRQREMAIN_WAKEUP_SRC,
+};
+
+int mt_lp_irqremain_set(unsigned int type,
+			const struct mt_lp_irqinfo *value);
+int mt_lp_irqremain_get(unsigned int idx, unsigned int type,
+			struct mt_lp_irqinfo *value);
+unsigned int mt_lp_irqremain_count(void);
+int mt_lp_irqremain_push(void);
+int mt_lp_irqremain_pop(void);
+int mt_lp_irqremain_submit(void);
+int mt_lp_irqremain_aquire(void);
+int mt_lp_irqremain_release(void);
+void mt_lp_irqremain_init(void);
+
+#endif /* MT_LP_IRQREMAIN_H */
diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_ppu.c b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_ppu.c
new file mode 100644
index 000000000..062a11f52
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_ppu.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "mt_ppu.h"
+
+#define MTK_PPU_PWR_DYNAMIC_POLICY_SET(_ctrl, _policy) \
+	mmio_clrsetbits_32(_ctrl->ppu_pwpr, \
+			   PPU_PWPR_MASK, \
+			   PPU_PWPR_DYNAMIC_MODE | ((_policy) & PPU_PWPR_MASK))
+
+#define MTK_PPU_PWR_STATIC_POLICY_SET(_ctrl, _policy) \
+	mmio_clrsetbits_32(_ctrl->ppu_pwpr, \
+			   PPU_PWPR_MASK | PPU_PWPR_DYNAMIC_MODE, \
+			   ((_policy) & PPU_PWPR_MASK))
+
+void mt_smp_ppu_pwr_dynamic_set(struct ppu_pwr_ctrl *ctrl,
+				unsigned int policy)
+{
+	CPU_PM_ASSERT(ctrl);
+	MTK_PPU_PWR_DYNAMIC_POLICY_SET(ctrl, policy);
+	dmbsy();
+}
+
+void mt_smp_ppu_pwr_static_set(struct ppu_pwr_ctrl *ctrl,
+			       unsigned int policy)
+{
+	CPU_PM_ASSERT(ctrl);
+	MTK_PPU_PWR_STATIC_POLICY_SET(ctrl, policy);
+	dmbsy();
+}
+
+void mt_smp_ppu_pwr_set(struct ppu_pwr_ctrl *ctrl,
+			unsigned int mode,
+			unsigned int policy)
+{
+	CPU_PM_ASSERT(ctrl);
+	if (mode & PPU_PWPR_DYNAMIC_MODE)
+		MTK_PPU_PWR_DYNAMIC_POLICY_SET(ctrl, policy);
+	else
+		MTK_PPU_PWR_STATIC_POLICY_SET(ctrl, policy);
+	mmio_write_32(ctrl->ppu_dcdr0, MT_PPU_DCDR0);
+	mmio_write_32(ctrl->ppu_dcdr1, MT_PPU_DCDR1);
+	dsbsy();
+}
+
+void mt_smp_ppu_op_set(struct ppu_pwr_ctrl *ctrl,
+		       unsigned int mode,
+		       unsigned int policy)
+{
+	unsigned int val;
+
+	CPU_PM_ASSERT(ctrl);
+
+	val = mmio_read_32(ctrl->ppu_pwpr);
+	val &= ~(PPU_PWPR_OP_MASK | PPU_PWPR_OP_DYNAMIC_MODE);
+
+	val |= PPU_PWPR_OP_MODE(policy);
+	if (mode & PPU_PWPR_OP_DYNAMIC_MODE)
+		val |= PPU_PWPR_OP_DYNAMIC_MODE;
+
+	mmio_write_32(ctrl->ppu_pwpr, val);
+	dsbsy();
+}
+
+void mt_smp_ppu_set(struct ppu_pwr_ctrl *ctrl,
+		    unsigned int op_mode,
+		    unsigned int policy,
+		    unsigned int pwr_mode,
+		    unsigned int pwr_policy)
+{
+	unsigned int val;
+
+	CPU_PM_ASSERT(ctrl);
+	val = mmio_read_32(ctrl->ppu_pwpr);
+
+	if (op_mode & PPU_PWPR_OP_DYNAMIC_MODE)
+		val |= (PPU_PWPR_OP_DYNAMIC_MODE |
+		       PPU_PWPR_OP_MODE(policy));
+	else
+		val |= PPU_PWPR_OP_MODE(policy);
+
+	if (pwr_mode & PPU_PWPR_DYNAMIC_MODE) {
+		val &= ~(PPU_PWPR_MASK);
+		val |= (PPU_PWPR_DYNAMIC_MODE | (pwr_policy & PPU_PWPR_MASK));
+	} else {
+		val &= ~(PPU_PWPR_MASK | PPU_PWPR_DYNAMIC_MODE);
+		val |= (pwr_policy & PPU_PWPR_MASK);
+	}
+	mmio_write_32(ctrl->ppu_pwpr, val);
+	dsbsy();
+}
diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_ppu.h b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_ppu.h
new file mode 100644
index 000000000..76f06b5f2
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_ppu.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MT_PPU_H
+#define MT_PPU_H
+
+#include <lib/mmio.h>
+#include "mt_cpu_pm.h"
+
+/* PPU PWPR definition */
+#define PPU_PWPR_MASK		0xF
+#define PPU_PWPR_MODE_MASK	0x1
+#define PPU_PWPR_OFF		0
+#define PPU_PWPR_MEM_RET	2
+#define PPU_PWPR_FULL_RET	5
+#define PPU_PWPR_MEM_OFF	6
+#define PPU_PWPR_FUN_RET	7
+#define PPU_PWPR_ON		8
+#define PPU_PWPR_WARM_RESET	10
+#define PPU_PWPR_DYNAMIC_MODE	BIT(8)
+
+#define PPU_PWPR_OP_MASK			0xF0000
+#define PPU_PWPR_OP_DYNAMIC_MODE		BIT(24)
+#define PPU_PWPR_OP_MODE(_policy)		(((_policy) << 16) & PPU_PWPR_OP_MASK)
+#define PPU_PWPR_OP_ONE_SLICE_SF_ONLY		0
+#define PPU_PWPR_OP_ONE_SLICE_HALF_DRAM		1
+#define PPU_PWPR_OP_ONE_SLICE_FULL_DRAM		3
+#define PPU_PWPR_OP_ALL_SLICE_SF_ONLY		4
+#define PPU_PWPR_OP_ALL_SLICE_HALF_DRAM		5
+#define PPU_PWPR_OP_ALL_SLICE_FULL_DRAM		7
+
+#define DSU_PPU_PWPR_OP_MODE_DEF (PPU_PWPR_OP_ONE_SLICE_HALF_DRAM)
+
+/* PPU PWSR definition */
+#define PPU_PWSR_STATE_ON	BIT(3)
+
+#ifdef CPU_PM_ACP_FSM
+#define PPU_PWSR_OP_STATUS	0x30000
+#define PPU_OP_ST_SF_ONLY	0x0
+#endif /* CPU_PM_ACP_FSM */
+
+#define MT_PPU_DCDR0			0x00606060
+#define MT_PPU_DCDR1			0x00006060
+
+void mt_smp_ppu_pwr_set(struct ppu_pwr_ctrl *ctrl,
+			unsigned int mode,
+			unsigned int policy);
+
+void mt_smp_ppu_op_set(struct ppu_pwr_ctrl *ctrl,
+		       unsigned int mode,
+		       unsigned int policy);
+
+void mt_smp_ppu_pwr_dynamic_set(struct ppu_pwr_ctrl *ctrl,
+				unsigned int policy);
+
+void mt_smp_ppu_pwr_static_set(struct ppu_pwr_ctrl *ctrl,
+			       unsigned int policy);
+
+void mt_smp_ppu_set(struct ppu_pwr_ctrl *ctrl,
+		    unsigned int op_mode,
+		    unsigned int policy,
+		    unsigned int pwr_mode,
+		    unsigned int pwr_policy);
+
+#endif /* MT_PPU_H */
diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_smp.c b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_smp.c
new file mode 100644
index 000000000..6e647060f
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_smp.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <assert.h>
+
+#include <arch_helpers.h>
+#include <common/debug.h>
+#include <drivers/delay_timer.h>
+#include <plat/common/platform.h>
+
+#include <lib/pm/mtk_pm.h>
+#include <mcucfg.h>
+#include "mt_cpu_pm.h"
+#include "mt_ppu.h"
+#include "mt_smp.h"
+
+#define is_core_power_status_on(_pwr_ctrl)\
+	(!!((mmio_read_32(_pwr_ctrl->pwr.ppu_pwsr)) & (PPU_PWSR_STATE_ON)))
+
+#ifndef CPU_PM_CORE_ARCH64_ONLY
+void mt_smp_core_init_arch(int cluster,
+			   int cpu,
+			   int arm64,
+			   struct cpu_pwr_ctrl *pwr_ctrl)
+{
+	CPU_PM_ASSERT(cluster == 0);
+	CPU_PM_ASSERT(pwr_ctrl);
+
+	/* aa64naa32 in bits[16:23] */
+	if (arm64)
+		mmio_setbits_32(pwr_ctrl->arch_addr,
+				BIT(AA64NAA32_FLAG_START_BIT + cpu));
+	else
+		mmio_clrbits_32(pwr_ctrl->arch_addr,
+				BIT(AA64NAA32_FLAG_START_BIT + cpu));
+}
+#endif /* CPU_PM_CORE_ARCH64_ONLY */
+
+void mt_smp_core_bootup_address_set(int cluster,
+				    int cpu,
+				    struct cpu_pwr_ctrl *pwr_ctrl,
+				    uintptr_t entry)
+{
+	CPU_PM_ASSERT(pwr_ctrl);
+
+	/* Set bootup address */
+	mmio_write_32(pwr_ctrl->rvbaraddr_l, entry);
+	mmio_write_32(pwr_ctrl->rvbaraddr_h, 0);
+}
+
+int mt_smp_power_core_on(unsigned int cpu_id, struct cpu_pwr_ctrl *pwr_ctrl)
+{
+	unsigned int val = 0;
+
+	CPU_PM_ASSERT(pwr_ctrl);
+
+	mt_smp_ppu_pwr_set(&pwr_ctrl->pwr, PPU_PWPR_DYNAMIC_MODE, PPU_PWPR_OFF);
+	val = is_core_power_status_on(pwr_ctrl);
+	if (!val) {
+		mmio_clrbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG,
+				GIC_WAKEUP_IGNORE(cpu_id));
+		mmio_setbits_32(SPM_EXT_INT_WAKEUP_REQ_SET, BIT(cpu_id));
+
+		mmio_clrbits_32(SPMC_CONTROL_CONFIG,
+				SPMC_CPU_RESET_PWRON_CONFIG << (cpu_id));
+		dsbsy();
+		isb();
+
+		while (!is_core_power_status_on(pwr_ctrl))
+			DO_SMP_CORE_ON_WAIT_TIMEOUT(cpu_id, val);
+		mmio_setbits_32(SPM_EXT_INT_WAKEUP_REQ_CLR, BIT(cpu_id));
+	} else {
+		mmio_clrbits_32(SPMC_CONTROL_CONFIG,
+				SPMC_CPU_RESET_PWRON_CONFIG << (cpu_id));
+		INFO("[%s:%d] - core_%u have been power on\n",
+		     __func__, __LINE__, cpu_id);
+	}
+
+	return MTK_CPUPM_E_OK;
+}
+
+int mt_smp_power_core_off(unsigned int cpu_id, struct cpu_pwr_ctrl *pwr_ctrl)
+{
+	mmio_setbits_32(CPC_MCUSYS_CPC_FLOW_CTRL_CFG,
+			GIC_WAKEUP_IGNORE(cpu_id));
+	return MTK_CPUPM_E_OK;
+}
+
+void mt_smp_init(void)
+{
+	mmio_write_32(SPM_POWERON_CONFIG_EN, PROJECT_CODE | BCLK_CG_EN);
+
+	/* INFO=SPMC_INIT: clear resetpwron of mcusys/cluster/core0 */
+	mmio_clrbits_32(SPMC_CONTROL_CONFIG, SPMC_MCUSYS_RESET_PWRON_CONFIG);
+	mmio_clrbits_32(SPMC_CONTROL_CONFIG, SPMC_CPUTOP_RESET_PWRON_CONFIG);
+
+	/* Switch DSU ISO/CKDIS control from PCSM to PPU */
+	mmio_setbits_32(CPC_FCM_SPMC_SW_CFG2,
+			(CPUSYS_PPU_CLK_EN_CTRL | CPUSYS_PPU_ISO_CTRL));
+
+#ifdef SPM_CPU_BUCK_ISO_CON
+	/* Make sure that buck iso have been released before power on */
+	mmio_write_32(SPM_CPU_BUCK_ISO_CON, SPM_CPU_BUCK_ISO_DEFAUT);
+#endif /* SPM_CPU_BUCK_ISO_CON */
+}
diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_smp.h b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_smp.h
new file mode 100644
index 000000000..d03483916
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/mt_smp.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2025, MediaTek Inc. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef MT_SMP_H
+#define MT_SMP_H
+
+#include <lib/mmio.h>
+#include <platform_def.h>
+
+#include "mt_cpu_pm.h"
+
+#define CPUSYS_PPU_CLK_EN_CTRL		BIT(12)
+#define CPUSYS_PPU_ISO_CTRL		BIT(13)
+#define AA64NAA32_FLAG_START_BIT	16
+
+#define SMP_CORE_TIMEOUT_MAX		(50000)
+#define DO_SMP_CORE_ON_WAIT_TIMEOUT(cpu_id, k_cnt) ({ \
+	if (k_cnt >= SMP_CORE_TIMEOUT_MAX) { \
+		INFO("[%s:%d] - CORE%d ON WAIT TIMEOUT %u us (> %u)\n", \
+		     __func__, __LINE__, cpu_id, k_cnt, SMP_CORE_TIMEOUT_MAX); \
+		panic(); \
+	} \
+	k_cnt++; udelay(1); })
+
+#ifdef CPU_PM_CORE_ARCH64_ONLY
+#define mt_smp_core_init_arch(_a, _b, _c, _d)
+#else
+void mt_smp_core_init_arch(int cluster, int cpu, int arm64,
+			   struct cpu_pwr_ctrl *pwr_ctrl);
+#endif /* CPU_PM_CORE_ARCH64_ONLY */
+
+void mt_smp_core_bootup_address_set(int cluster,
+				    int cpu,
+				    struct cpu_pwr_ctrl *pwr_ctrl,
+				    uintptr_t entry);
+
+int mt_smp_power_core_on(unsigned int cpu_id, struct cpu_pwr_ctrl *pwr_ctrl);
+int mt_smp_power_core_off(unsigned int cpu_id, struct cpu_pwr_ctrl *pwr_ctrl);
+
+void mt_smp_init(void);
+
+int mt_smp_cluster_pwpr_init(struct cluster_pwr_ctrl *pwr_ctrl);
+int mt_smp_cluster_pwpr_op_init(struct cluster_pwr_ctrl *pwr_ctrl);
+
+#endif /* MT_SMP_H */
diff --git a/plat/mediatek/drivers/cpu_pm/cpcv5_4/rules.mk b/plat/mediatek/drivers/cpu_pm/cpcv5_4/rules.mk
new file mode 100644
index 000000000..9819d0e56
--- /dev/null
+++ b/plat/mediatek/drivers/cpu_pm/cpcv5_4/rules.mk
@@ -0,0 +1,43 @@
+#
+# Copyright (c) 2025, MediaTek Inc. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+#Prologue, init variable
+LOCAL_DIR := $(call GET_LOCAL_DIR)
+
+CPU_PM_PWR_REQ := y
+CPU_PM_PWR_REQ_DEBUG := n
+
+#Define your module name
+MODULE := cpcv${CONFIG_MTK_CPU_PM_ARCH}
+
+#Add your source code here
+LOCAL_SRCS-y := ${LOCAL_DIR}/mt_cpu_pm.c \
+		${LOCAL_DIR}/mt_cpu_pm_cpc.c \
+		${LOCAL_DIR}/mt_cpu_pm_smc.c \
+		${LOCAL_DIR}/mt_ppu.c
+
+LOCAL_SRCS-$(CPU_PM_TINYSYS_SUPPORT) += ${LOCAL_DIR}/mt_cpu_pm_mbox.c
+LOCAL_SRCS-$(CONFIG_MTK_SMP_EN) += ${LOCAL_DIR}/mt_smp.c
+
+LOCAL_SRCS-${CPU_PM_IRQ_REMAIN_ENABLE} += ${LOCAL_DIR}/mt_lp_irqremain.c
+$(eval $(call add_defined_option,CPU_PM_IRQ_REMAIN_ENABLE))
+
+$(eval $(call add_defined_option,CPU_PM_DOMAIN_CORE_ONLY))
+$(eval $(call add_defined_option,CPU_PM_CORE_ARCH64_ONLY))
+$(eval $(call add_defined_option,CPU_PM_TINYSYS_SUPPORT))
+
+$(eval $(call add_defined_option,CPU_PM_SUSPEND_NOTIFY))
+
+$(eval $(call add_defined_option,CPU_PM_PWR_REQ))
+$(eval $(call add_defined_option,CPU_PM_PWR_REQ_DEBUG))
+
+$(eval $(call add_defined_option,CONFIG_MTK_CPU_ILDO))
+$(eval $(call add_defined_option,CPU_PM_CPU_RET_MASK))
+
+#Epilogue, build as module
+$(eval $(call MAKE_MODULE,$(MODULE),$(LOCAL_SRCS-y),$(MTK_BL)))
+
+$(eval $(call add_defined_option,CPU_PM_ACP_FSM))