ZynqMP TCM information was fixed in driver. Now ZynqMP TCM information
is available in device-tree. Parse TCM information in driver
as per new bindings.

Signed-off-by: Tanmay Shah <tanmay.s...@amd.com>
---

Changes in v9:
  - Introduce new API to request and release core1 TCM power-domains in
    lockstep mode. This will be used during prepare -> add_tcm_banks
    callback to enable TCM in lockstep mode.
  - Parse TCM from device-tree in lockstep mode and split mode in
    uniform way.
  - Fix TCM representation in device-tree in lockstep mode.

Changes in v8:
  - Remove pm_domains framework
  - Remove checking of pm_domain_id validation to power on/off tcm
  - Remove spurious change
  - parse power-domains property from device-tree and use EEMI calls
    to power on/off TCM instead of using pm domains framework

Changes in v7:
  - move checking of pm_domain_id from previous patch
  - fix mem_bank_data memory allocation

 drivers/remoteproc/xlnx_r5_remoteproc.c | 245 +++++++++++++++++++++++-
 1 file changed, 239 insertions(+), 6 deletions(-)

diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c 
b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 4395edea9a64..0f87b984850b 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -74,8 +74,8 @@ struct mbox_info {
 };
 
 /*
- * Hardcoded TCM bank values. This will be removed once TCM bindings are
- * accepted for system-dt specifications and upstreamed in linux kernel
+ * Hardcoded TCM bank values. This will stay in driver to maintain backward
+ * compatibility with device-tree that does not have TCM information.
  */
 static const struct mem_bank_data zynqmp_tcm_banks_split[] = {
        {0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each 
*/
@@ -102,6 +102,7 @@ static const struct mem_bank_data 
zynqmp_tcm_banks_lockstep[] = {
  * @rproc: rproc handle
  * @pm_domain_id: RPU CPU power domain id
  * @ipi: pointer to mailbox information
+ * @lockstep_core1_np: second core's device_node to use in lockstep mode
  */
 struct zynqmp_r5_core {
        struct device *dev;
@@ -111,6 +112,7 @@ struct zynqmp_r5_core {
        struct rproc *rproc;
        u32 pm_domain_id;
        struct mbox_info *ipi;
+       struct device_node *lockstep_core1_np;
 };
 
 /**
@@ -539,6 +541,110 @@ static int tcm_mem_map(struct rproc *rproc,
        return 0;
 }
 
+int request_core1_tcm_lockstep(struct rproc *rproc)
+{
+       struct zynqmp_r5_core *r5_core = rproc->priv;
+       struct of_phandle_args out_args = {0};
+       int ret, i, num_pd, pd_id, ret_err;
+       struct device_node *np;
+
+       np = r5_core->lockstep_core1_np;
+
+       /* Get number of power-domains */
+       num_pd = of_count_phandle_with_args(np, "power-domains",
+                                           "#power-domain-cells");
+       if (num_pd <= 0)
+               return -EINVAL;
+
+       /* Get individual power-domain id and enable TCM */
+       for (i = 1; i < num_pd; i++) {
+               ret = of_parse_phandle_with_args(np, "power-domains",
+                                                "#power-domain-cells",
+                                                i, &out_args);
+               if (ret) {
+                       dev_warn(r5_core->dev,
+                                "failed to get tcm %d in power-domains list, 
ret %d\n",
+                                i, ret);
+                       goto fail_request_core1_tcm;
+               }
+
+               pd_id = out_args.args[0];
+               of_node_put(out_args.np);
+
+               ret = zynqmp_pm_request_node(pd_id, 
ZYNQMP_PM_CAPABILITY_ACCESS, 0,
+                                            ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+               if (ret) {
+                       dev_err(r5_core->dev, "failed to request TCM node 
0x%x\n",
+                               pd_id);
+                       goto fail_request_core1_tcm;
+               }
+       }
+
+       return 0;
+
+fail_request_core1_tcm:
+
+       /* Cache actual error to return later */
+       ret_err = ret;
+
+       /* Release previously requested TCM in case of failure */
+       while (--i > 0) {
+               ret = of_parse_phandle_with_args(np, "power-domains",
+                                                "#power-domain-cells",
+                                                i, &out_args);
+               if (ret)
+                       return ret;
+               pd_id = out_args.args[0];
+               of_node_put(out_args.np);
+               zynqmp_pm_release_node(pd_id);
+       }
+
+       return ret_err;
+}
+
+void release_core1_tcm_lockstep(struct rproc *rproc)
+{
+       struct zynqmp_r5_core *r5_core = rproc->priv;
+       struct of_phandle_args out_args = {0};
+       struct zynqmp_r5_cluster *cluster;
+       int ret, i, num_pd, pd_id;
+       struct device_node *np;
+
+       /* Get R5 core1 node */
+       cluster = dev_get_drvdata(r5_core->dev->parent);
+
+       if (cluster->mode != LOCKSTEP_MODE)
+               return;
+
+       np = r5_core->lockstep_core1_np;
+
+       /* Get number of power-domains */
+       num_pd = of_count_phandle_with_args(np, "power-domains",
+                                           "#power-domain-cells");
+       if (num_pd <= 0)
+               return;
+
+       /* Get individual power-domain id and turn off each TCM */
+       for (i = 1; i < num_pd; i++) {
+               ret = of_parse_phandle_with_args(np, "power-domains",
+                                                "#power-domain-cells",
+                                                i, &out_args);
+               if (ret) {
+                       dev_warn(r5_core->dev,
+                                "failed to get pd of core1 tcm %d in list, ret 
%d\n",
+                                i, ret);
+                       continue;
+               }
+
+               pd_id = out_args.args[0];
+               of_node_put(out_args.np);
+
+               if (zynqmp_pm_release_node(pd_id))
+                       dev_warn(r5_core->dev,
+                                "failed to release core1 tcm pd 0x%x\n", 
pd_id);
+       }
+}
+
 /*
  * add_tcm_carveout_split_mode()
  * @rproc: single R5 core's corresponding rproc instance
@@ -633,6 +739,21 @@ static int add_tcm_carveout_lockstep_mode(struct rproc 
*rproc)
        r5_core = rproc->priv;
        dev = r5_core->dev;
 
+       /*
+        * In lockstep mode, R5 core0 uses TCM of R5 core1 via aliased 
addresses.
+        * Aliased addresses are contiguous with core0 TCM and embedded in "reg"
+        * property. However, R5 core1 TCM power-domains needs to be requested
+        * from firmware to use R5 core1 TCM. Request core1 TCM power-domains
+        * if TCM is parsed from device-tree.
+        */
+       if (of_find_property(r5_core->np, "reg", NULL)) {
+               ret = request_core1_tcm_lockstep(rproc);
+               if (ret) {
+                       dev_err(r5_core->dev, "failed to request core1 TCM 
power-domains\n");
+                       return ret;
+               }
+       }
+
        /* Go through zynqmp banks for r5 node */
        num_banks = r5_core->tcm_bank_count;
 
@@ -689,6 +810,9 @@ static int add_tcm_carveout_lockstep_mode(struct rproc 
*rproc)
                pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
                zynqmp_pm_release_node(pm_domain_id);
        }
+
+       release_core1_tcm_lockstep(rproc);
+
        return ret;
 }
 
@@ -808,6 +932,8 @@ static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
                                 "can't turn off TCM bank 0x%x", pm_domain_id);
        }
 
+       release_core1_tcm_lockstep(rproc);
+
        return 0;
 }
 
@@ -878,6 +1004,95 @@ static struct zynqmp_r5_core 
*zynqmp_r5_add_rproc_core(struct device *cdev)
        return ERR_PTR(ret);
 }
 
+static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
+{
+       int i, j, tcm_bank_count, ret, tcm_pd_idx;
+       struct of_phandle_args out_args = {0};
+       struct zynqmp_r5_core *r5_core;
+       struct platform_device *cpdev;
+       struct mem_bank_data *tcm;
+       struct device_node *np;
+       struct resource *res;
+       u64 abs_addr, size;
+       struct device *dev;
+
+       for (i = 0; i < cluster->core_count; i++) {
+               r5_core = cluster->r5_cores[i];
+               dev = r5_core->dev;
+               np = r5_core->np;
+
+               /* we have address cell 2 and size cell as 2 */
+               tcm_bank_count = of_property_count_elems_of_size(np, "reg",
+                                                                4 * 
sizeof(u32));
+               if (tcm_bank_count <= 0) {
+                       dev_err(dev, "can't get reg property err %d\n", 
tcm_bank_count);
+                       return -EINVAL;
+               }
+
+               r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count,
+                                                 sizeof(struct mem_bank_data 
*),
+                                                 GFP_KERNEL);
+               if (!r5_core->tcm_banks)
+                       ret = -ENOMEM;
+
+               r5_core->tcm_bank_count = tcm_bank_count;
+               for (j = 0, tcm_pd_idx = 1; j < tcm_bank_count; j++, 
tcm_pd_idx++) {
+                       tcm = devm_kzalloc(dev, sizeof(struct mem_bank_data),
+                                          GFP_KERNEL);
+                       if (!tcm)
+                               return -ENOMEM;
+
+                       r5_core->tcm_banks[j] = tcm;
+
+                       /* Get power-domains id of TCM. */
+                       ret = of_parse_phandle_with_args(np, "power-domains",
+                                                        "#power-domain-cells",
+                                                        tcm_pd_idx, &out_args);
+                       if (ret) {
+                               dev_err(r5_core->dev,
+                                       "failed to get tcm %d pm domain, ret 
%d\n",
+                                       tcm_pd_idx, ret);
+                               return ret;
+                       }
+                       tcm->pm_domain_id = out_args.args[0];
+                       of_node_put(out_args.np);
+
+                       /* Get TCM address without translation. */
+                       ret = of_property_read_reg(np, j, &abs_addr, &size);
+                       if (ret) {
+                               dev_err(dev, "failed to get reg property\n");
+                               return ret;
+                       }
+
+                       /*
+                        * Remote processor can address only 32 bits
+                        * so convert 64-bits into 32-bits. This will discard
+                        * any unwanted upper 32-bits.
+                        */
+                       tcm->da = (u32)abs_addr;
+                       tcm->size = (u32)size;
+
+                       cpdev = to_platform_device(dev);
+                       res = platform_get_resource(cpdev, IORESOURCE_MEM, j);
+                       if (!res) {
+                               dev_err(dev, "failed to get tcm resource\n");
+                               return -EINVAL;
+                       }
+
+                       tcm->addr = (u32)res->start;
+                       tcm->bank_name = (char *)res->name;
+                       res = devm_request_mem_region(dev, tcm->addr, tcm->size,
+                                                     tcm->bank_name);
+                       if (!res) {
+                               dev_err(dev, "failed to request tcm 
resource\n");
+                               return -EINVAL;
+                       }
+               }
+       }
+
+       return 0;
+}
+
 /**
  * zynqmp_r5_get_tcm_node()
  * Ideally this function should parse tcm node and store information
@@ -956,9 +1171,14 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster 
*cluster,
        struct zynqmp_r5_core *r5_core;
        int ret, i;
 
-       ret = zynqmp_r5_get_tcm_node(cluster);
-       if (ret < 0) {
-               dev_err(dev, "can't get tcm node, err %d\n", ret);
+       r5_core = cluster->r5_cores[0];
+       if (of_find_property(r5_core->np, "reg", NULL))
+               ret = zynqmp_r5_get_tcm_node_from_dt(cluster);
+       else
+               ret = zynqmp_r5_get_tcm_node(cluster);
+
+       if (ret) {
+               dev_err(dev, "can't get tcm, err %d\n", ret);
                return ret;
        }
 
@@ -1099,7 +1319,19 @@ static int zynqmp_r5_cluster_init(struct 
zynqmp_r5_cluster *cluster)
                 * then ignore second child node.
                 */
                if (cluster_mode == LOCKSTEP_MODE) {
-                       of_node_put(child);
+                       /*
+                        * Get second core's device node only to use its 
power-domains.
+                        * Also, no need to use of_node_put on first core's 
device_node
+                        * as it is taken care by of_get_next_available_child.
+                        */
+                       r5_cores[i]->lockstep_core1_np =
+                               of_get_next_available_child(dev_node, child);
+
+                       if (!r5_cores[i]->lockstep_core1_np) {
+                               ret = -EINVAL;
+                               goto release_r5_cores;
+                       }
+
                        break;
                }
 
@@ -1158,6 +1390,7 @@ static void zynqmp_r5_cluster_exit(void *data)
                r5_core = cluster->r5_cores[i];
                zynqmp_r5_free_mbox(r5_core->ipi);
                of_reserved_mem_device_release(r5_core->dev);
+               of_node_put(r5_core->lockstep_core1_np);
                put_device(r5_core->dev);
                rproc_del(r5_core->rproc);
                rproc_free(r5_core->rproc);
-- 
2.25.1


Reply via email to