ZynqMP TCM information was fixed in driver. Now ZynqMP TCM information
is available in device-tree. Parse TCM information in driver
as per new bindings.

Signed-off-by: Tanmay Shah <tanmay.s...@amd.com>
---
 drivers/remoteproc/xlnx_r5_remoteproc.c | 112 ++++++++++++++++++++++--
 1 file changed, 107 insertions(+), 5 deletions(-)

diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c 
b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 42b0384d34f2..d4a22caebaad 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -74,8 +74,8 @@ struct mbox_info {
 };
 
 /*
- * Hardcoded TCM bank values. This will be removed once TCM bindings are
- * accepted for system-dt specifications and upstreamed in linux kernel
+ * Hardcoded TCM bank values. This will stay in driver to maintain backward
+ * compatibility with device-tree that does not have TCM information.
  */
 static const struct mem_bank_data zynqmp_tcm_banks_split[] = {
        {0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each 
*/
@@ -757,6 +757,103 @@ static struct zynqmp_r5_core 
*zynqmp_r5_add_rproc_core(struct device *cdev)
        return ERR_PTR(ret);
 }
 
+static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
+{
+       int i, j, tcm_bank_count, ret, tcm_pd_idx, pd_count;
+       struct of_phandle_args out_args;
+       struct zynqmp_r5_core *r5_core;
+       struct platform_device *cpdev;
+       struct mem_bank_data *tcm;
+       struct device_node *np;
+       struct resource *res;
+       u64 abs_addr, size;
+       struct device *dev;
+
+       for (i = 0; i < cluster->core_count; i++) {
+               r5_core = cluster->r5_cores[i];
+               dev = r5_core->dev;
+               np = r5_core->np;
+
+               pd_count = of_count_phandle_with_args(np, "power-domains",
+                                                     "#power-domain-cells");
+
+               if (pd_count <= 0) {
+                       dev_err(dev, "invalid power-domains property, %d\n", 
pd_count);
+                       return -EINVAL;
+               }
+
+               /* First entry in power-domains list is for r5 core, rest for 
TCM. */
+               tcm_bank_count = pd_count - 1;
+
+               if (tcm_bank_count <= 0) {
+                       dev_err(dev, "invalid TCM count %d\n", tcm_bank_count);
+                       return -EINVAL;
+               }
+
+               r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count,
+                                                 sizeof(struct mem_bank_data 
*),
+                                                 GFP_KERNEL);
+               if (!r5_core->tcm_banks)
+                       return -ENOMEM;
+
+               r5_core->tcm_bank_count = tcm_bank_count;
+               for (j = 0, tcm_pd_idx = 1; j < tcm_bank_count; j++, 
tcm_pd_idx++) {
+                       tcm = devm_kzalloc(dev, sizeof(struct mem_bank_data),
+                                          GFP_KERNEL);
+                       if (!tcm)
+                               return -ENOMEM;
+
+                       r5_core->tcm_banks[j] = tcm;
+
+                       /* Get power-domains id of TCM. */
+                       ret = of_parse_phandle_with_args(np, "power-domains",
+                                                        "#power-domain-cells",
+                                                        tcm_pd_idx, &out_args);
+                       if (ret) {
+                               dev_err(r5_core->dev,
+                                       "failed to get tcm %d pm domain, ret 
%d\n",
+                                       tcm_pd_idx, ret);
+                               return ret;
+                       }
+                       tcm->pm_domain_id = out_args.args[0];
+                       of_node_put(out_args.np);
+
+                       /* Get TCM address without translation. */
+                       ret = of_property_read_reg(np, j, &abs_addr, &size);
+                       if (ret) {
+                               dev_err(dev, "failed to get reg property\n");
+                               return ret;
+                       }
+
+                       /*
+                        * Remote processor can address only 32 bits
+                        * so convert 64-bits into 32-bits. This will discard
+                        * any unwanted upper 32-bits.
+                        */
+                       tcm->da = (u32)abs_addr;
+                       tcm->size = (u32)size;
+
+                       cpdev = to_platform_device(dev);
+                       res = platform_get_resource(cpdev, IORESOURCE_MEM, j);
+                       if (!res) {
+                               dev_err(dev, "failed to get tcm resource\n");
+                               return -EINVAL;
+                       }
+
+                       tcm->addr = (u32)res->start;
+                       tcm->bank_name = (char *)res->name;
+                       res = devm_request_mem_region(dev, tcm->addr, tcm->size,
+                                                     tcm->bank_name);
+                       if (!res) {
+                               dev_err(dev, "failed to request tcm 
resource\n");
+                               return -EINVAL;
+                       }
+               }
+       }
+
+       return 0;
+}
+
 /**
  * zynqmp_r5_get_tcm_node()
  * Ideally this function should parse tcm node and store information
@@ -835,9 +932,14 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster 
*cluster,
        struct zynqmp_r5_core *r5_core;
        int ret, i;
 
-       ret = zynqmp_r5_get_tcm_node(cluster);
-       if (ret < 0) {
-               dev_err(dev, "can't get tcm node, err %d\n", ret);
+       r5_core = cluster->r5_cores[0];
+       if (of_find_property(r5_core->np, "reg", NULL))
+               ret = zynqmp_r5_get_tcm_node_from_dt(cluster);
+       else
+               ret = zynqmp_r5_get_tcm_node(cluster);
+
+       if (ret) {
+               dev_err(dev, "can't get tcm, err %d\n", ret);
                return ret;
        }
 
-- 
2.25.1


Reply via email to