netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Dave Jiang <dave.jiang@intel.com>
To: Pavan Chebbi <pavan.chebbi@broadcom.com>
Cc: jgg@ziepe.ca, michael.chan@broadcom.com, saeedm@nvidia.com,
	Jonathan.Cameron@huawei.com, davem@davemloft.net, corbet@lwn.net,
	edumazet@google.com, gospo@broadcom.com, kuba@kernel.org,
	netdev@vger.kernel.org, pabeni@redhat.com, andrew+netdev@lunn.ch,
	selvin.xavier@broadcom.com, leon@kernel.org,
	kalesh-anakkur.purayil@broadcom.com
Subject: Re: [PATCH net-next v2 5/6] bnxt_fwctl: Add bnxt fwctl device
Date: Thu, 25 Sep 2025 11:17:55 -0700	[thread overview]
Message-ID: <05958f83-b703-4ce7-a526-c6d0bc3fb81e@intel.com> (raw)
In-Reply-To: <74540a81-a7af-4a50-b832-679e7873cfe0@intel.com>



On 9/25/25 8:46 AM, Dave Jiang wrote:
> 
> 
> On 9/24/25 9:31 PM, Pavan Chebbi wrote:
>> On Thu, Sep 25, 2025 at 4:02 AM Dave Jiang <dave.jiang@intel.com> wrote:
>>>
>>
>>>> +static void *bnxtctl_fw_rpc(struct fwctl_uctx *uctx,
>>>> +                         enum fwctl_rpc_scope scope,
>>>> +                         void *in, size_t in_len, size_t *out_len)
>>>> +{
>>>> +     struct bnxtctl_dev *bnxtctl =
>>>> +             container_of(uctx->fwctl, struct bnxtctl_dev, fwctl);
>>>> +     struct bnxt_aux_priv *bnxt_aux_priv = bnxtctl->aux_priv;
>>>> +     struct fwctl_dma_info_bnxt *dma_buf = NULL;
>>>> +     struct device *dev = &uctx->fwctl->dev;
>>>> +     struct fwctl_rpc_bnxt *msg = in;
>>>> +     struct bnxt_fw_msg rpc_in;
>>>> +     int i, rc, err = 0;
>>>> +     int dma_buf_size;
>>>> +
>>>> +     rpc_in.msg = kzalloc(msg->req_len, GFP_KERNEL);
>>>
>>> I think if you use __free(kfree) for all the allocations in the function, you can be rid of the gotos.
>>>
>> Thanks Dave for the review. Would you be fine if I defer using scope
>> based cleanup for later?
>> I need some time to understand the mechanism better and correctly
>> define the macros as some
>> pointers holding the memory are members within a stack variable. I
>> will fix the goto/free issues
>> you highlighted in the next revision. I hope that is going to be OK?
> 
> Sure that is fine. The way things are done in this function makes things a bit tricky to do cleanup properly via the scope based cleanup. I might play with it a bit and see if I can come up with something. It looks like it needs a bit of refactoring to split some things out. Probably not a bad thing in the long run. 
> 

Here's a potential template for doing it with scoped based cleanup. It applies on top of this current patch. I only compile tested. There probably will be errors in the conversion. Feel free to use it.

---

diff --git a/drivers/fwctl/bnxt/main.c b/drivers/fwctl/bnxt/main.c
index 1bec4567e35c..714106fd1033 100644
--- a/drivers/fwctl/bnxt/main.c
+++ b/drivers/fwctl/bnxt/main.c
@@ -22,8 +22,6 @@ struct bnxtctl_uctx {
 struct bnxtctl_dev {
 	struct fwctl_device fwctl;
 	struct bnxt_aux_priv *aux_priv;
-	void *dma_virt_addr[MAX_NUM_DMA_INDICATIONS];
-	dma_addr_t dma_addr[MAX_NUM_DMA_INDICATIONS];
 };
 
 DEFINE_FREE(bnxtctl, struct bnxtctl_dev *, if (_T) fwctl_put(&_T->fwctl))
@@ -76,61 +74,133 @@ static bool bnxtctl_validate_rpc(struct bnxt_en_dev *edev,
 	return false;
 }
 
-static int bnxt_fw_setup_input_dma(struct bnxtctl_dev *bnxt_dev,
-				   struct device *dev,
-				   int num_dma,
-				   struct fwctl_dma_info_bnxt *msg,
-				   struct bnxt_fw_msg *fw_msg)
+struct bnxtctl_dma {
+	struct device *dev;
+	int num_dma;
+	void *dma_virt_addr[MAX_NUM_DMA_INDICATIONS];
+	dma_addr_t dma_addr[MAX_NUM_DMA_INDICATIONS];
+};
+
+struct dma_context {
+	struct bnxtctl_dma *dma;
+	struct fwctl_dma_info_bnxt *dma_info;
+};
+
+static void free_dma(struct bnxtctl_dma *dma)
+{
+	int i;
+
+	for (i = 0; i < dma->num_dma; i++) {
+		if (dma->dma_virt_addr[i])
+			dma_free_coherent(dma->dev, 0, dma->dma_virt_addr[i],
+					  dma->dma_addr[i]);
+	}
+	kfree(dma);
+}
+DEFINE_FREE(free_dma, struct bnxtctl_dma *, if (_T) free_dma(_T))
+
+static struct bnxtctl_dma *
+allocate_and_setup_dma_bufs(struct device *dev,
+			    struct fwctl_dma_info_bnxt *dma_info,
+			    int num_dma,
+			    struct bnxt_fw_msg *fw_msg)
 {
-	u8 i, num_allocated = 0;
 	void *dma_ptr;
-	int rc = 0;
+	int i;
 
+	struct bnxtctl_dma *dma __free(free_dma) =
+		kzalloc(sizeof(*dma), GFP_KERNEL);
+	if (!dma)
+		return ERR_PTR(-ENOMEM);
+
+	dma->dev = dev->parent;
 	for (i = 0; i < num_dma; i++) {
-		if (msg->len == 0 || msg->len > MAX_DMA_MEM_SIZE) {
-			rc = -EINVAL;
-			goto err;
-		}
-		bnxt_dev->dma_virt_addr[i] = dma_alloc_coherent(dev->parent,
-								msg->len,
-								&bnxt_dev->dma_addr[i],
-								GFP_KERNEL);
-		if (!bnxt_dev->dma_virt_addr[i]) {
-			rc = -ENOMEM;
-			goto err;
-		}
-		num_allocated++;
-		if (!(msg->read_from_device)) {
-			if (copy_from_user(bnxt_dev->dma_virt_addr[i],
-					   u64_to_user_ptr(msg->data),
-					   msg->len)) {
-				rc = -EFAULT;
-				goto err;
-			}
-		}
-		dma_ptr = fw_msg->msg + msg->offset;
+		__le64 *dmap;
 
-		if ((PTR_ALIGN(dma_ptr, 8) == dma_ptr) &&
-		    msg->offset < fw_msg->msg_len) {
-			__le64 *dmap = dma_ptr;
+		if (dma_info->len == 0 || dma_info->len > MAX_DMA_MEM_SIZE)
+			return ERR_PTR(-EINVAL);
 
-			*dmap = cpu_to_le64(bnxt_dev->dma_addr[i]);
-		} else {
-			rc = -EINVAL;
-			goto err;
+		dma->dma_virt_addr[i] =
+			dma_alloc_coherent(dma->dev, dma_info->len,
+					   &dma->dma_addr[i], GFP_KERNEL);
+		if (!dma->dma_virt_addr[i])
+			return ERR_PTR(-ENOMEM);
+
+		dma->num_dma++;
+		if (!(dma_info->read_from_device)) {
+			if (copy_from_user(dma->dma_virt_addr[i],
+					   u64_to_user_ptr(dma_info->data),
+					   dma_info->len))
+				return ERR_PTR(-EFAULT);
 		}
-		msg += 1;
+		dma_ptr = fw_msg->msg + dma_info->offset;
+
+		if (PTR_ALIGN(dma_ptr, 8) != dma_ptr ||
+		    dma_info->offset >= fw_msg->msg_len)
+			return ERR_PTR(-EINVAL);
+
+		dmap = dma_ptr;
+		*dmap = cpu_to_le64(dma->dma_addr[i]);
+		dma_info += 1;
+	}
+
+	return no_free_ptr(dma);
+}
+
+static void free_dma_context(struct dma_context *dma_ctx)
+{
+	if (dma_ctx->dma)
+		free_dma(dma_ctx->dma);
+	if (dma_ctx->dma_info)
+		kfree(dma_ctx->dma_info);
+	kfree(dma_ctx);
+}
+DEFINE_FREE(free_dma_ctx, struct dma_context *, if (_T) free_dma_context(_T))
+
+static struct dma_context *
+allocate_and_setup_dma_context(struct device *dev,
+			       struct fwctl_rpc_bnxt *rpc_msg,
+			       struct bnxt_fw_msg *fw_msg)
+{
+	int num_dma = rpc_msg->num_dma;
+	int dma_buf_size;
+
+	if (!num_dma)
+		return NULL;
+
+	struct dma_context *dma_ctx __free(free_dma_ctx) =
+		kzalloc(sizeof(*dma_ctx), GFP_KERNEL);
+	if (!dma_ctx)
+		return ERR_PTR(-ENOMEM);
+
+	if (num_dma > MAX_NUM_DMA_INDICATIONS) {
+		dev_err(dev, "DMA buffers exceed the number supported\n");
+		return ERR_PTR(-EINVAL);
 	}
 
-	return rc;
-err:
-	for (i = 0; i < num_allocated; i++)
-		dma_free_coherent(dev->parent,
-				  msg->len,
-				  bnxt_dev->dma_virt_addr[i],
-				  bnxt_dev->dma_addr[i]);
+	dma_buf_size = num_dma * sizeof(struct fwctl_dma_info_bnxt);
+	struct fwctl_dma_info_bnxt *dma_info __free(kfree)
+		= kzalloc(dma_buf_size, GFP_KERNEL);
+	if (!dma_info) {
+		dev_err(dev, "Failed to allocate dma buffers\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	if (copy_from_user(dma_info, u64_to_user_ptr(rpc_msg->payload),
+			   dma_buf_size)) {
+		dev_dbg(dev, "Failed to copy payload from user\n");
+		return ERR_PTR(-EFAULT);
+	}
+
+	struct bnxtctl_dma *dma __free(free_dma) =
+		allocate_and_setup_dma_bufs(dev, dma_info, num_dma, fw_msg);
+	if (IS_ERR(dma))
+		return ERR_CAST(dma);
+
+	dma_ctx->dma_info = no_free_ptr(dma_info);
+	dma_ctx->dma = no_free_ptr(dma);
 
-	return rc;
+	return no_free_ptr(dma_ctx);
 }
 
 static void *bnxtctl_fw_rpc(struct fwctl_uctx *uctx,
@@ -140,34 +210,28 @@ static void *bnxtctl_fw_rpc(struct fwctl_uctx *uctx,
 	struct bnxtctl_dev *bnxtctl =
 		container_of(uctx->fwctl, struct bnxtctl_dev, fwctl);
 	struct bnxt_aux_priv *bnxt_aux_priv = bnxtctl->aux_priv;
-	struct fwctl_dma_info_bnxt *dma_buf = NULL;
 	struct device *dev = &uctx->fwctl->dev;
 	struct fwctl_rpc_bnxt *msg = in;
 	struct bnxt_fw_msg rpc_in;
-	int i, rc, err = 0;
-	int dma_buf_size;
+	int i, rc;
+
+	void *rpc_in_msg __free(kfree) = kzalloc(msg->req_len, GFP_KERNEL);
+	if (!rpc_in_msg)
+		return ERR_PTR(-ENOMEM);
 
-	rpc_in.msg = kzalloc(msg->req_len, GFP_KERNEL);
-	if (!rpc_in.msg) {
-		err = -ENOMEM;
-		goto err_out;
-	}
 	if (copy_from_user(rpc_in.msg, u64_to_user_ptr(msg->req),
 			   msg->req_len)) {
 		dev_dbg(dev, "Failed to copy in_payload from user\n");
-		err = -EFAULT;
-		goto err_out;
+		return ERR_PTR(-EFAULT);
 	}
 
 	if (!bnxtctl_validate_rpc(bnxt_aux_priv->edev, &rpc_in))
 		return ERR_PTR(-EPERM);
 
 	rpc_in.msg_len = msg->req_len;
-	rpc_in.resp = kzalloc(*out_len, GFP_KERNEL);
-	if (!rpc_in.resp) {
-		err = -ENOMEM;
-		goto err_out;
-	}
+	void *rpc_in_resp __free(kfree) = kzalloc(*out_len, GFP_KERNEL);
+	if (!rpc_in_resp)
+		return ERR_PTR(-ENOMEM);
 
 	rpc_in.resp_max_len = *out_len;
 	if (!msg->timeout)
@@ -175,64 +239,37 @@ static void *bnxtctl_fw_rpc(struct fwctl_uctx *uctx,
 	else
 		rpc_in.timeout = msg->timeout;
 
-	if (msg->num_dma) {
-		if (msg->num_dma > MAX_NUM_DMA_INDICATIONS) {
-			dev_err(dev, "DMA buffers exceed the number supported\n");
-			err = -EINVAL;
-			goto err_out;
-		}
-		dma_buf_size = msg->num_dma * sizeof(*dma_buf);
-		dma_buf = kzalloc(dma_buf_size, GFP_KERNEL);
-		if (!dma_buf) {
-			dev_err(dev, "Failed to allocate dma buffers\n");
-			err = -ENOMEM;
-			goto err_out;
-		}
-
-		if (copy_from_user(dma_buf, u64_to_user_ptr(msg->payload),
-				   dma_buf_size)) {
-			dev_dbg(dev, "Failed to copy payload from user\n");
-			err = -EFAULT;
-			goto err_out;
-		}
-
-		rc = bnxt_fw_setup_input_dma(bnxtctl, dev, msg->num_dma,
-					     dma_buf, &rpc_in);
-		if (rc) {
-			err = -EIO;
-			goto err_out;
-		}
-	}
+	struct dma_context *dma_ctx __free(free_dma_ctx) =
+		allocate_and_setup_dma_context(dev, msg, &rpc_in);
+	if (IS_ERR(dma_ctx))
+		return ERR_CAST(dma_ctx);
 
+	rpc_in.msg = rpc_in_msg;
+	rpc_in.resp = rpc_in_resp;
 	rc = bnxt_send_msg(bnxt_aux_priv->edev, &rpc_in);
-	if (rc) {
-		err = -EIO;
-		goto err_out;
-	}
+	if (rc)
+		return ERR_PTR(rc);
+
+	if (!dma_ctx)
+		return no_free_ptr(rpc_in_resp);
 
 	for (i = 0; i < msg->num_dma; i++) {
-		if (dma_buf[i].read_from_device) {
-			if (copy_to_user(u64_to_user_ptr(dma_buf[i].data),
-					 bnxtctl->dma_virt_addr[i],
-					 dma_buf[i].len)) {
-				dev_dbg(dev, "Failed to copy resp to user\n");
-				err = -EFAULT;
-			}
+		struct fwctl_dma_info_bnxt *dma_info =
+			&dma_ctx->dma_info[i];
+		struct bnxtctl_dma *dma = dma_ctx->dma;
+
+		if (!dma_info->read_from_device)
+			continue;
+
+		if (copy_to_user(u64_to_user_ptr(dma_info->data),
+				 dma->dma_virt_addr[i],
+				 dma_info->len)) {
+			dev_dbg(dev, "Failed to copy resp to user\n");
+			return ERR_PTR(-EFAULT);
 		}
 	}
-	for (i = 0; i < msg->num_dma; i++)
-		dma_free_coherent(dev->parent, dma_buf[i].len,
-				  bnxtctl->dma_virt_addr[i],
-				  bnxtctl->dma_addr[i]);
 
-err_out:
-	kfree(dma_buf);
-	kfree(rpc_in.msg);
-
-	if (err)
-		return ERR_PTR(err);
-
-	return rpc_in.resp;
+	return no_free_ptr(rpc_in_resp);
 }
 
 static const struct fwctl_ops bnxtctl_ops = {


  reply	other threads:[~2025-09-25 18:17 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-09-23  9:58 [PATCH net-next v2 0/6] bnxt_fwctl: fwctl for Broadcom Netxtreme devices Pavan Chebbi
2025-09-23  9:58 ` [PATCH net-next v2 1/6] bnxt_en: Move common definitions to include/linux/bnxt/ Pavan Chebbi
2025-09-23 10:35   ` Jonathan Cameron
2025-09-23 16:33   ` Saeed Mahameed
2025-09-23 17:16     ` Pavan Chebbi
2025-09-23 18:00       ` Pavan Chebbi
2025-09-23  9:58 ` [PATCH net-next v2 2/6] bnxt_en: Refactor aux bus functions to be generic Pavan Chebbi
2025-09-23 10:46   ` Jonathan Cameron
2025-09-23  9:58 ` [PATCH net-next v2 3/6] bnxt_en: Make a lookup table for supported aux bus devices Pavan Chebbi
2025-09-23 10:49   ` Jonathan Cameron
2025-09-23 12:21     ` Pavan Chebbi
2025-09-23  9:58 ` [PATCH net-next v2 4/6] bnxt_en: Create an aux device for fwctl Pavan Chebbi
2025-09-23 10:56   ` Jonathan Cameron
2025-09-23  9:58 ` [PATCH net-next v2 5/6] bnxt_fwctl: Add bnxt fwctl device Pavan Chebbi
2025-09-23 11:17   ` Jonathan Cameron
2025-09-23 12:19     ` Pavan Chebbi
2025-09-23 20:00   ` [External] : " ALOK TIWARI
2025-09-24 22:31   ` Dave Jiang
2025-09-25  4:31     ` Pavan Chebbi
2025-09-25 15:46       ` Dave Jiang
2025-09-25 18:17         ` Dave Jiang [this message]
2025-09-26  4:00           ` Pavan Chebbi
2025-09-23  9:58 ` [PATCH net-next v2 6/6] bnxt_fwctl: Add documentation entries Pavan Chebbi
2025-09-23 10:31   ` Jonathan Cameron
2025-09-24 22:40   ` Dave Jiang
2025-09-23 16:25 ` [PATCH net-next v2 0/6] bnxt_fwctl: fwctl for Broadcom Netxtreme devices Saeed Mahameed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=05958f83-b703-4ce7-a526-c6d0bc3fb81e@intel.com \
    --to=dave.jiang@intel.com \
    --cc=Jonathan.Cameron@huawei.com \
    --cc=andrew+netdev@lunn.ch \
    --cc=corbet@lwn.net \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=gospo@broadcom.com \
    --cc=jgg@ziepe.ca \
    --cc=kalesh-anakkur.purayil@broadcom.com \
    --cc=kuba@kernel.org \
    --cc=leon@kernel.org \
    --cc=michael.chan@broadcom.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=pavan.chebbi@broadcom.com \
    --cc=saeedm@nvidia.com \
    --cc=selvin.xavier@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).