From: Mathieu Poirier <mathieu.poirier@linaro.org>
To: Tanmay Shah <tanmay.shah@amd.com>
Cc: andersson@kernel.org, robh+dt@kernel.org,
krzysztof.kozlowski+dt@linaro.org, michal.simek@amd.com,
radhey.shyam.pandey@amd.com, ben.levinsky@amd.com,
linux-remoteproc@vger.kernel.org, devicetree@vger.kernel.org,
linux-arm-kernel@lists.infradead.org
Subject: Re: [PATCH v5 4/4] remoteproc: zynqmp: parse TCM from device tree
Date: Mon, 2 Oct 2023 11:39:38 -0600 [thread overview]
Message-ID: <ZRsAWodXRJr7TT3B@p14s> (raw)
In-Reply-To: <20230928155900.3987103-5-tanmay.shah@amd.com>
On Thu, Sep 28, 2023 at 08:59:00AM -0700, Tanmay Shah wrote:
> ZynqMP TCM information is fixed in driver. Now ZynqMP TCM information
> is available in device-tree. Parse TCM information in driver
> as per new bindings
Missing '.' at the end of the sentence.
>
> Signed-off-by: Tanmay Shah <tanmay.shah@amd.com>
> ---
> drivers/remoteproc/xlnx_r5_remoteproc.c | 122 ++++++++++++++++++++++--
> 1 file changed, 114 insertions(+), 8 deletions(-)
>
> diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
> index 27ed2c070ebb..749e9da68906 100644
> --- a/drivers/remoteproc/xlnx_r5_remoteproc.c
> +++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
> @@ -75,8 +75,8 @@ struct mbox_info {
> };
>
> /*
> - * Hardcoded TCM bank values. This will be removed once TCM bindings are
> - * accepted for system-dt specifications and upstreamed in linux kernel
> + * Hardcoded TCM bank values. This will stay in driver to maintain backward
> + * compatibility with device-tree that does not have TCM information.
> */
> static const struct mem_bank_data zynqmp_tcm_banks_split[] = {
> {0xffe00000UL, 0x0, 0x10000UL, PD_R5_0_ATCM, "atcm0"}, /* TCM 64KB each */
> @@ -613,7 +613,8 @@ static int add_tcm_carveout_split_mode(struct rproc *rproc)
> bank_name);
> if (!rproc_mem) {
> ret = -ENOMEM;
> - zynqmp_pm_release_node(pm_domain_id);
> + if (pm_domain_id)
> + zynqmp_pm_release_node(pm_domain_id);
> goto release_tcm_split;
> }
>
> @@ -626,7 +627,8 @@ static int add_tcm_carveout_split_mode(struct rproc *rproc)
> /* If failed, Turn off all TCM banks turned on before */
> for (i--; i >= 0; i--) {
> pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
> - zynqmp_pm_release_node(pm_domain_id);
> + if (pm_domain_id)
> + zynqmp_pm_release_node(pm_domain_id);
> }
> return ret;
> }
> @@ -1064,6 +1066,107 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
> return ERR_PTR(ret);
> }
>
> +static int zynqmp_r5_get_tcm_node_from_dt(struct zynqmp_r5_cluster *cluster)
> +{
> + int i, j, tcm_bank_count, ret = -EINVAL;
> + struct platform_device *cpdev;
> + struct resource *res = NULL;
> + u64 abs_addr = 0, size = 0;
> + struct mem_bank_data *tcm;
> + struct device_node *np;
> + struct device *dev;
As far as I can tell, none of the above initialization is needed. I have
commented on that before.
> +
> + for (i = 0; i < cluster->core_count; i++) {
> + r5_core = cluster->r5_cores[i];
> + dev = r5_core->dev;
> + np = dev_of_node(dev);
> +
> + /* we have address cell 2 and size cell as 2 */
> + ret = of_property_count_elems_of_size(np, "reg",
> + 4 * sizeof(u32));
> + if (ret == 0) {
> + ret = -EINVAL;
> + goto fail_tcm;
> + }
> + if (ret < 0)
> + goto fail_tcm;
if (ret <= 0) {
ret = -EINVAL;
goto fail_tcm;
}
> +
> + tcm_bank_count = ret;
> +
> + r5_core->tcm_banks = devm_kcalloc(dev, tcm_bank_count,
> + sizeof(struct mem_bank_data *),
> + GFP_KERNEL);
> + if (!r5_core->tcm_banks) {
> + ret = -ENOMEM;
> + goto fail_tcm;
> + }
> +
> + r5_core->tcm_bank_count = tcm_bank_count;
> + for (j = 0; j < tcm_bank_count; j++) {
> + tcm = devm_kzalloc(dev, sizeof(struct mem_bank_data *),
> + GFP_KERNEL);
> + if (!tcm) {
> + ret = -ENOMEM;
> + goto fail_tcm;
> + }
> +
> + r5_core->tcm_banks[j] = tcm;
> +
> + /* get tcm address without translation */
> + ret = of_property_read_reg(np, j, &abs_addr, &size);
> + if (ret) {
> + dev_err(dev, "failed to get reg property\n");
> + goto fail_tcm;
> + }
> +
> + /*
> + * remote processor can address only 32 bits
> + * so convert 64-bits into 32-bits. This will discard
> + * any unwanted upper 32-bits.
> + */
> + tcm->da = (u32)abs_addr;
> + tcm->size = (u32)size;
> +
> + cpdev = to_platform_device(dev);
> + res = platform_get_resource(cpdev, IORESOURCE_MEM, j);
> + if (!res) {
> + dev_err(dev, "failed to get tcm resource\n");
> + ret = -EINVAL;
> + goto fail_tcm;
> + }
> +
> + tcm->addr = (u32)res->start;
> + tcm->bank_name = (char *)res->name;
> + res = devm_request_mem_region(dev, tcm->addr, tcm->size,
> + tcm->bank_name);
> + if (!res) {
> + dev_err(dev, "failed to request tcm resource\n");
> + ret = -EINVAL;
> + goto fail_tcm;
> + }
> + }
> + }
> +
> + return 0;
> +
> +fail_tcm:
> + while (i >= 0) {
> + r5_core = cluster->r5_cores[i];
> + for (j = 0; j < r5_core->tcm_bank_count; j++) {
> + if (!r5_core->tcm_banks || !r5_core->tcm_banks[j])
> + continue;
> + tcm = r5_core->tcm_banks[j];
> + devm_kfree(r5_core->dev, tcm);
> + }
> + devm_kfree(r5_core->dev, r5_core->tcm_banks);
> + r5_core->tcm_banks = NULL;
> + i--;
> + }
Given the devm_xyz() API used in this function, is the above needed?
Thanks,
Mathieu
> +
> + return ret;
> +}
> +
> /**
> * zynqmp_r5_get_tcm_node()
> * Ideally this function should parse tcm node and store information
> @@ -1142,10 +1245,13 @@ static int zynqmp_r5_core_init(struct zynqmp_r5_cluster *cluster,
> struct zynqmp_r5_core *r5_core;
> int ret, i;
>
> - ret = zynqmp_r5_get_tcm_node(cluster);
> - if (ret < 0) {
> - dev_err(dev, "can't get tcm node, err %d\n", ret);
> - return ret;
> + ret = zynqmp_r5_get_tcm_node_from_dt(cluster);
> + if (ret) {
> + ret = zynqmp_r5_get_tcm_node(cluster);
> + if (ret < 0) {
> + dev_err(dev, "can't get tcm node, err %d\n", ret);
> + return ret;
> + }
> }
>
> for (i = 0; i < cluster->core_count; i++) {
> --
> 2.25.1
>
prev parent reply other threads:[~2023-10-02 17:39 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-09-28 15:58 [PATCH v5 0/4] add zynqmp TCM bindings Tanmay Shah
2023-09-28 15:58 ` [PATCH v5 1/4] dt-bindings: remoteproc: add Tightly Coupled Memory (TCM) bindings Tanmay Shah
2023-09-28 15:58 ` [PATCH v5 2/4] dts: zynqmp: add properties for TCM in remoteproc Tanmay Shah
2023-10-02 15:55 ` Mathieu Poirier
2023-10-02 16:25 ` Tanmay Shah
2023-10-02 17:12 ` Tanmay Shah
2023-10-02 20:17 ` Mathieu Poirier
2023-10-02 20:54 ` Tanmay Shah
2023-10-03 15:31 ` Mathieu Poirier
2023-10-03 16:32 ` Tanmay Shah
2023-09-28 15:58 ` [PATCH v5 3/4] remoteproc: zynqmp: add pm domains support Tanmay Shah
2023-10-02 17:11 ` Mathieu Poirier
2023-10-02 20:39 ` Tanmay Shah
2023-09-28 15:59 ` [PATCH v5 4/4] remoteproc: zynqmp: parse TCM from device tree Tanmay Shah
2023-10-02 17:39 ` Mathieu Poirier [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ZRsAWodXRJr7TT3B@p14s \
--to=mathieu.poirier@linaro.org \
--cc=andersson@kernel.org \
--cc=ben.levinsky@amd.com \
--cc=devicetree@vger.kernel.org \
--cc=krzysztof.kozlowski+dt@linaro.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-remoteproc@vger.kernel.org \
--cc=michal.simek@amd.com \
--cc=radhey.shyam.pandey@amd.com \
--cc=robh+dt@kernel.org \
--cc=tanmay.shah@amd.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).