Implement power management support for the VCP remoteproc driver by adding suspend and resume callbacks. This allows the VCP coprocessor to properly transition to low-power states when the system suspends, and restore functionality when the system resumes.
The suspend/resume functionality coordinates with the VCP firmware to ensure graceful state transitions and maintain communication channel integrity across power state changes. Signed-off-by: Xiangzhi Tang <[email protected]> --- drivers/remoteproc/mtk_vcp_common.c | 111 ++++++++++++++++++++++++++++ drivers/remoteproc/mtk_vcp_common.h | 6 ++ drivers/remoteproc/mtk_vcp_rproc.c | 67 +++++++++++++++++ drivers/remoteproc/mtk_vcp_rproc.h | 2 + 4 files changed, 186 insertions(+) diff --git a/drivers/remoteproc/mtk_vcp_common.c b/drivers/remoteproc/mtk_vcp_common.c index 039c0a469631..a1e2e6e0ada2 100644 --- a/drivers/remoteproc/mtk_vcp_common.c +++ b/drivers/remoteproc/mtk_vcp_common.c @@ -207,6 +207,11 @@ bool is_vcp_ready(struct mtk_vcp_device *vcp, return vcp_is_core_ready(vcp, core_id); } +bool is_vcp_suspending(struct mtk_vcp_device *vcp) +{ + return vcp->vcp_cluster->is_suspending; +} + int wait_core_hart_shutdown(struct mtk_vcp_device *vcp, enum vcp_core_id core_id) { @@ -269,6 +274,92 @@ int wait_core_hart_shutdown(struct mtk_vcp_device *vcp, return 0; } +void vcp_wait_core_stop(struct mtk_vcp_device *vcp, enum vcp_core_id core_id) +{ + u32 status; + u32 stop_ctrl; + u32 num_harts; + int ret; + + if (core_id >= VCP_CORE_TOTAL) { + dev_err(vcp->dev, "%s, Invalid core id %d\n", __func__, core_id); + return; + } + + num_harts = vcp->vcp_cluster->hart_count[core_id]; + + /* Build stop control mask based on number of harts */ + stop_ctrl = B_CORE_GATED | B_HART0_HALT | B_CORE_AXIS_BUSY; + if (num_harts > 1) + stop_ctrl |= B_HART1_HALT; + + if (core_id == VCP_ID) { + ret = readl_poll_timeout(vcp->vcp_cluster->cfg + R_CORE0_STATUS, + status, + (status & stop_ctrl) == (stop_ctrl & ~B_CORE_AXIS_BUSY), + USEC_PER_MSEC, + CORE_HART_SHUTDOWN_TIMEOUT_MS * USEC_PER_MSEC); + if (ret) + dev_err(vcp->dev, "VCP core stop timeout, status 0x%x\n", status); + } else if (core_id == MMUP_ID) { + ret = readl_poll_timeout(vcp->vcp_cluster->cfg + R_CORE1_STATUS, + status, + (status & stop_ctrl) == (stop_ctrl & ~B_CORE_AXIS_BUSY), + USEC_PER_MSEC, + CORE_HART_SHUTDOWN_TIMEOUT_MS * USEC_PER_MSEC); + if (ret) + dev_err(vcp->dev, "MMUP core stop timeout, status 0x%x\n", status); + } +} + +static bool vcp_get_suspend_resume_status(struct mtk_vcp_device *vcp) +{ + if (vcp->vcp_cluster->core_nums > MMUP_ID) + return !!(readl(vcp->vcp_cluster->cfg_sec + R_GPR3_SEC) & VCP_AP_SUSPEND) && + !!(readl(vcp->vcp_cluster->cfg_sec + R_GPR2_SEC) & MMUP_AP_SUSPEND); + + return !!(readl(vcp->vcp_cluster->cfg_sec + R_GPR3_SEC) & VCP_AP_SUSPEND); +} + +void vcp_wait_suspend_resume(struct mtk_vcp_device *vcp, bool suspend) +{ + bool status; + int ret; + + if (suspend) { + writel(B_CORE0_SUSPEND, vcp->vcp_cluster->cfg_core + AP_R_GPR2); + writel(SUSPEND_MAGIC, vcp->vcp_cluster->cfg + VCP_C0_GPR0_SUSPEND_RESUME); + if (vcp->vcp_cluster->core_nums > MMUP_ID) { + writel(B_CORE1_SUSPEND, vcp->vcp_cluster->cfg_core + AP_R_GPR3); + writel(SUSPEND_MAGIC, vcp->vcp_cluster->cfg + VCP_C1_GPR0_SUSPEND_RESUME); + } + } else { + writel(B_CORE0_RESUME, vcp->vcp_cluster->cfg_core + AP_R_GPR2); + writel(RESUME_MAGIC, vcp->vcp_cluster->cfg + VCP_C0_GPR0_SUSPEND_RESUME); + if (vcp->vcp_cluster->core_nums > MMUP_ID) { + writel(B_CORE1_RESUME, vcp->vcp_cluster->cfg_core + AP_R_GPR3); + writel(RESUME_MAGIC, vcp->vcp_cluster->cfg + VCP_C1_GPR0_SUSPEND_RESUME); + } + } + + writel(B_GIPC4_SETCLR_3, vcp->vcp_cluster->cfg_core + R_GIPC_IN_SET); + + ret = read_poll_timeout(vcp_get_suspend_resume_status, + status, (status == suspend), + USEC_PER_MSEC, + SUSPEND_WAIT_TIMEOUT_MS * USEC_PER_MSEC, + false, vcp); + if (ret) + dev_err(vcp->dev, "vcp %s timeout GPIC 0x%x 0x%x 0x%x 0x%x flag 0x%x 0x%x\n", + suspend ? "suspend" : "resume", + readl(vcp->vcp_cluster->cfg_core + R_GIPC_IN_SET), + readl(vcp->vcp_cluster->cfg_core + R_GIPC_IN_CLR), + readl(vcp->vcp_cluster->cfg_core + AP_R_GPR2), + readl(vcp->vcp_cluster->cfg_core + AP_R_GPR3), + readl(vcp->vcp_cluster->cfg_sec + R_GPR2_SEC), + readl(vcp->vcp_cluster->cfg_sec + R_GPR3_SEC)); +} + void vcp_register_notify(struct mtk_vcp_device *vcp, enum vcp_feature_id id, struct notifier_block *nb) @@ -438,6 +529,16 @@ static int vcp_enable_pm_clk(struct mtk_vcp_device *vcp, enum vcp_feature_id id) bool suspend_status; int ret; + ret = read_poll_timeout(is_vcp_suspending, + suspend_status, !suspend_status, + USEC_PER_MSEC, + SUSPEND_WAIT_TIMEOUT_MS * USEC_PER_MSEC, + false, vcp); + if (ret) { + dev_err(vcp->dev, "%s blocked by vcp suspend\n", __func__); + return ret; + } + if (vcp->vcp_cluster->feature_enable[id]) { dev_err(vcp->dev, "%s feature(id=%d) already enabled\n", __func__, id); @@ -465,6 +566,16 @@ static int vcp_disable_pm_clk(struct mtk_vcp_device *vcp, enum vcp_feature_id id bool suspend_status; int ret; + ret = read_poll_timeout(is_vcp_suspending, + suspend_status, !suspend_status, + USEC_PER_MSEC, + SUSPEND_WAIT_TIMEOUT_MS * USEC_PER_MSEC, + false, vcp); + if (ret) { + dev_err(vcp->dev, "%s blocked by vcp suspend\n", __func__); + return ret; + } + if (!vcp->vcp_cluster->feature_enable[id]) { dev_err(vcp->dev, "%s feature(id=%d) already disabled\n", __func__, id); diff --git a/drivers/remoteproc/mtk_vcp_common.h b/drivers/remoteproc/mtk_vcp_common.h index 1238a165cac4..f193e2f66796 100644 --- a/drivers/remoteproc/mtk_vcp_common.h +++ b/drivers/remoteproc/mtk_vcp_common.h @@ -16,9 +16,12 @@ #define VCP_READY_TIMEOUT_MS 3000 #define VCP_IPI_DEV_READY_TIMEOUT 1000 #define CORE_HART_SHUTDOWN_TIMEOUT_MS 10 +#define SUSPEND_WAIT_TIMEOUT_MS 100 /* VCP platform definition */ #define DMA_MAX_MASK_BIT 33 +#define RESUME_MAGIC 0x12345678 +#define SUSPEND_MAGIC 0x87654321 #define PIN_OUT_C_SIZE_SLEEP_0 2 /* VCP load image definition */ @@ -271,5 +274,8 @@ int vcp_register_feature(struct mtk_vcp_device *vcp, int vcp_deregister_feature(struct mtk_vcp_device *vcp, enum vcp_feature_id id); +bool is_vcp_suspending(struct mtk_vcp_device *vcp); int wait_core_hart_shutdown(struct mtk_vcp_device *vcp, enum vcp_core_id core_id); +void vcp_wait_core_stop(struct mtk_vcp_device *vcp, enum vcp_core_id core_id); +void vcp_wait_suspend_resume(struct mtk_vcp_device *vcp, bool suspend); #endif diff --git a/drivers/remoteproc/mtk_vcp_rproc.c b/drivers/remoteproc/mtk_vcp_rproc.c index 2f320849fe15..b27bf1b6f668 100644 --- a/drivers/remoteproc/mtk_vcp_rproc.c +++ b/drivers/remoteproc/mtk_vcp_rproc.c @@ -11,6 +11,7 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/suspend.h> #include <linux/remoteproc.h> #include "mtk_vcp_common.h" @@ -71,6 +72,66 @@ struct mtk_ipi_device *vcp_get_ipidev(struct mtk_vcp_device *vcp) } EXPORT_SYMBOL_GPL(vcp_get_ipidev); +static int mtk_vcp_suspend(struct device *dev) +{ + struct mtk_vcp_device *vcp = platform_get_drvdata(to_platform_device(dev)); + u32 f_id; + int ret; + + vcp_extern_notify(VCP_ID, VCP_EVENT_SUSPEND); + vcp_extern_notify(MMUP_ID, VCP_EVENT_SUSPEND); + + for (f_id = RTOS_FEATURE_ID + 1; f_id < NUM_FEATURE_ID; f_id++) { + if (vcp->vcp_cluster->feature_enable[f_id]) { + dev_err(vcp->dev, "%s, Feature %d still active\n", __func__, f_id); + return -EBUSY; + } + } + + if (!vcp->vcp_cluster->is_suspending) { + vcp->vcp_cluster->is_suspending = true; + vcp->vcp_cluster->vcp_ready[VCP_ID] = false; + vcp->vcp_cluster->vcp_ready[MMUP_ID] = false; + + flush_workqueue(vcp->vcp_cluster->vcp_workqueue); + + vcp_wait_suspend_resume(vcp, true); + vcp_wait_core_stop(vcp, VCP_ID); + vcp_wait_core_stop(vcp, MMUP_ID); + + ret = pm_runtime_put_sync(dev); + if (ret < 0) { + dev_err(dev, "%s, Failed to suspend: %d\n", __func__, ret); + vcp->vcp_cluster->is_suspending = false; + return ret; + } + } + + return 0; +} + +static int mtk_vcp_resume(struct device *dev) +{ + struct mtk_vcp_device *vcp = platform_get_drvdata(to_platform_device(dev)); + int ret; + + if (vcp->vcp_cluster->is_suspending) { + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + return ret; + } + + vcp_wait_suspend_resume(vcp, false); + } + vcp->vcp_cluster->is_suspending = false; + + vcp_extern_notify(MMUP_ID, VCP_EVENT_RESUME); + vcp_extern_notify(VCP_ID, VCP_EVENT_RESUME); + + return 0; +} + static int mtk_vcp_start(struct rproc *rproc) { struct mtk_vcp_device *vcp = rproc->priv; @@ -491,6 +552,11 @@ static const struct mtk_vcp_of_data mt8196_of_data = { }, }; +static const struct dev_pm_ops mtk_vcp_rproc_pm_ops = { + .suspend_noirq = mtk_vcp_suspend, + .resume_noirq = mtk_vcp_resume, +}; + static const struct of_device_id mtk_vcp_of_match[] = { { .compatible = "mediatek,mt8196-vcp", .data = &mt8196_of_data}, {} @@ -504,6 +570,7 @@ static struct platform_driver mtk_vcp_device = { .driver = { .name = "mtk-vcp", .of_match_table = mtk_vcp_of_match, + .pm = pm_ptr(&mtk_vcp_rproc_pm_ops), }, }; diff --git a/drivers/remoteproc/mtk_vcp_rproc.h b/drivers/remoteproc/mtk_vcp_rproc.h index 64a25287dc5c..a1ac6c7efd08 100644 --- a/drivers/remoteproc/mtk_vcp_rproc.h +++ b/drivers/remoteproc/mtk_vcp_rproc.h @@ -23,6 +23,7 @@ * @msg_vcp_ready1: core1 ready ipi msg data * @slp_ipi_ack_data: sleep ipi msg data * @feature_enable: feature status count data + * @is_suspending: suspend status flag * @vcp_ready: vcp core status flag * @share_mem_iova: shared memory iova base * @share_mem_size: shared memory size @@ -46,6 +47,7 @@ struct mtk_vcp_of_cluster { u32 msg_vcp_ready1; u32 slp_ipi_ack_data; bool feature_enable[NUM_FEATURE_ID]; + bool is_suspending; bool vcp_ready[VCP_CORE_TOTAL]; dma_addr_t share_mem_iova; size_t share_mem_size; -- 2.46.0

