From: "Peng Fan (OSS)" <peng.fan@oss.nxp.com>
To: Jassi Brar <jassisinghbrar@gmail.com>,
Rob Herring <robh+dt@kernel.org>,
Krzysztof Kozlowski <krzysztof.kozlowski+dt@linaro.org>,
Conor Dooley <conor+dt@kernel.org>,
Dong Aisheng <aisheng.dong@nxp.com>,
Shawn Guo <shawnguo@kernel.org>,
Sascha Hauer <s.hauer@pengutronix.de>,
Pengutronix Kernel Team <kernel@pengutronix.de>,
Fabio Estevam <festevam@gmail.com>,
NXP Linux Team <linux-imx@nxp.com>
Cc: linux-kernel@vger.kernel.org, devicetree@vger.kernel.org,
linux-arm-kernel@lists.infradead.org,
Peng Fan <peng.fan@nxp.com>
Subject: [PATCH v6 2/4] mailbox: imx: support return value of init
Date: Thu, 01 Feb 2024 07:25:38 +0800 [thread overview]
Message-ID: <20240201-imx-mailbox-v6-2-76f4f35b403e@nxp.com> (raw)
In-Reply-To: <20240201-imx-mailbox-v6-0-76f4f35b403e@nxp.com>
From: Peng Fan <peng.fan@nxp.com>
There will be changes that init may fail, so adding return value for
init function.
Reviewed-by: Sascha Hauer <s.hauer@pengutronix.de>
Signed-off-by: Peng Fan <peng.fan@nxp.com>
---
drivers/mailbox/imx-mailbox.c | 35 ++++++++++++++++++++++++-----------
1 file changed, 24 insertions(+), 11 deletions(-)
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 656171362fe9..dced4614065f 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -110,7 +110,7 @@ struct imx_mu_dcfg {
int (*tx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data);
int (*rx)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
int (*rxdb)(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp);
- void (*init)(struct imx_mu_priv *priv);
+ int (*init)(struct imx_mu_priv *priv);
enum imx_mu_type type;
u32 xTR; /* Transmit Register0 */
u32 xRR; /* Receive Register0 */
@@ -737,7 +737,7 @@ static struct mbox_chan *imx_mu_seco_xlate(struct mbox_controller *mbox,
return imx_mu_xlate(mbox, sp);
}
-static void imx_mu_init_generic(struct imx_mu_priv *priv)
+static int imx_mu_init_generic(struct imx_mu_priv *priv)
{
unsigned int i;
unsigned int val;
@@ -757,7 +757,7 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
priv->mbox.of_xlate = imx_mu_xlate;
if (priv->side_b)
- return;
+ return 0;
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
@@ -770,9 +770,11 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
/* Clear any pending RSR */
for (i = 0; i < IMX_MU_NUM_RR; i++)
imx_mu_read(priv, priv->dcfg->xRR + (i % 4) * 4);
+
+ return 0;
}
-static void imx_mu_init_specific(struct imx_mu_priv *priv)
+static int imx_mu_init_specific(struct imx_mu_priv *priv)
{
unsigned int i;
int num_chans = priv->dcfg->type & IMX_MU_V2_S4 ? IMX_MU_S4_CHANS : IMX_MU_SCU_CHANS;
@@ -794,12 +796,20 @@ static void imx_mu_init_specific(struct imx_mu_priv *priv)
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
+
+ return 0;
}
-static void imx_mu_init_seco(struct imx_mu_priv *priv)
+static int imx_mu_init_seco(struct imx_mu_priv *priv)
{
- imx_mu_init_generic(priv);
+ int ret;
+
+ ret = imx_mu_init_generic(priv);
+ if (ret)
+ return ret;
priv->mbox.of_xlate = imx_mu_seco_xlate;
+
+ return 0;
}
static int imx_mu_probe(struct platform_device *pdev)
@@ -866,7 +876,11 @@ static int imx_mu_probe(struct platform_device *pdev)
priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
- priv->dcfg->init(priv);
+ ret = priv->dcfg->init(priv);
+ if (ret) {
+ dev_err(dev, "Failed to init MU\n");
+ goto disable_clk;
+ }
spin_lock_init(&priv->xcr_lock);
@@ -878,10 +892,8 @@ static int imx_mu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, priv);
ret = devm_mbox_controller_register(dev, &priv->mbox);
- if (ret) {
- clk_disable_unprepare(priv->clk);
- return ret;
- }
+ if (ret)
+ goto disable_clk;
pm_runtime_enable(dev);
@@ -899,6 +911,7 @@ static int imx_mu_probe(struct platform_device *pdev)
disable_runtime_pm:
pm_runtime_disable(dev);
+disable_clk:
clk_disable_unprepare(priv->clk);
return ret;
}
--
2.37.1
next prev parent reply other threads:[~2024-01-31 23:21 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-01-31 23:25 [PATCH v6 0/4] mailbox: imx: support i.MX95 ELE/V2X MU Peng Fan (OSS)
2024-01-31 23:25 ` [PATCH v6 1/4] dt-bindings: mailbox: fsl,mu: add i.MX95 Generic/ELE/V2X MU compatible Peng Fan (OSS)
2024-01-31 23:25 ` Peng Fan (OSS) [this message]
2024-01-31 23:25 ` [PATCH v6 3/4] mailbox: imx: get RR/TR registers num from Parameter register Peng Fan (OSS)
2024-01-31 23:25 ` [PATCH v6 4/4] mailbox: imx: support i.MX95 ELE/V2X MU Peng Fan (OSS)
2024-02-18 1:38 ` [PATCH v6 0/4] " Peng Fan
2024-02-18 2:07 ` Jassi Brar
2024-02-18 2:10 ` Peng Fan
2024-02-18 3:10 ` Jassi Brar
2024-02-18 6:48 ` Peng Fan
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240201-imx-mailbox-v6-2-76f4f35b403e@nxp.com \
--to=peng.fan@oss.nxp.com \
--cc=aisheng.dong@nxp.com \
--cc=conor+dt@kernel.org \
--cc=devicetree@vger.kernel.org \
--cc=festevam@gmail.com \
--cc=jassisinghbrar@gmail.com \
--cc=kernel@pengutronix.de \
--cc=krzysztof.kozlowski+dt@linaro.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-imx@nxp.com \
--cc=linux-kernel@vger.kernel.org \
--cc=peng.fan@nxp.com \
--cc=robh+dt@kernel.org \
--cc=s.hauer@pengutronix.de \
--cc=shawnguo@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).