public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH rdma-next 1/1] RDMA/mana_ib: cleanup the usage of mana_gd_send_request()
@ 2026-03-18 17:39 Konstantin Taranov
  2026-03-18 18:26 ` Leon Romanovsky
  2026-03-18 18:27 ` Leon Romanovsky
  0 siblings, 2 replies; 3+ messages in thread
From: Konstantin Taranov @ 2026-03-18 17:39 UTC (permalink / raw)
  To: kotaranov, shirazsaleem, longli, jgg, leon; +Cc: linux-rdma, linux-kernel

From: Konstantin Taranov <kotaranov@microsoft.com>

Do not check the status of the response header returned by mana_gd_send_request(),
as the returned error code already indicates the request status.

The mana_gd_send_request() may return no error code and have the response status
GDMA_STATUS_MORE_ENTRIES, which is a successful completion. It is used
for checking the correctness of multi-request operations, such as creation of
a dma region with mana_ib_gd_create_dma_region().

Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
---
 drivers/infiniband/hw/mana/main.c | 105 ++++--------------------------
 drivers/infiniband/hw/mana/mr.c   |  38 ++---------
 drivers/infiniband/hw/mana/qp.c   |   9 +--
 3 files changed, 19 insertions(+), 133 deletions(-)

diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 8d99cd00f002..47c71793793f 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -90,15 +90,8 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
 	err = mana_gd_send_request(gc, sizeof(req), &req,
 				   sizeof(resp), &resp);
 
-	if (err || resp.hdr.status) {
-		ibdev_dbg(&dev->ib_dev,
-			  "Failed to get pd_id err %d status %u\n", err,
-			  resp.hdr.status);
-		if (!err)
-			err = -EPROTO;
-
+	if (err)
 		return err;
-	}
 
 	pd->pd_handle = resp.pd_handle;
 	pd->pdn = resp.pd_id;
@@ -118,7 +111,6 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
 	struct gdma_destroy_pd_req req = {};
 	struct mana_ib_dev *dev;
 	struct gdma_context *gc;
-	int err;
 
 	dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
 	gc = mdev_to_gc(dev);
@@ -127,18 +119,8 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
 			     sizeof(resp));
 
 	req.pd_handle = pd->pd_handle;
-	err = mana_gd_send_request(gc, sizeof(req), &req,
-				   sizeof(resp), &resp);
 
-	if (err || resp.hdr.status) {
-		ibdev_dbg(&dev->ib_dev,
-			  "Failed to destroy pd_handle 0x%llx err %d status %u",
-			  pd->pd_handle, err, resp.hdr.status);
-		if (!err)
-			err = -EPROTO;
-	}
-
-	return err;
+	return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 }
 
 static int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
@@ -146,7 +128,6 @@ static int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
 {
 	struct gdma_destroy_resource_range_req req = {};
 	struct gdma_resp_hdr resp = {};
-	int err;
 
 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE,
 			     sizeof(req), sizeof(resp));
@@ -155,15 +136,7 @@ static int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
 	req.num_resources = 1;
 	req.allocated_resources = doorbell_page;
 
-	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-	if (err || resp.status) {
-		dev_err(gc->dev,
-			"Failed to destroy doorbell page: ret %d, 0x%x\n",
-			err, resp.status);
-		return err ?: -EPROTO;
-	}
-
-	return 0;
+	return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 }
 
 static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
@@ -184,12 +157,8 @@ static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
 	req.allocated_resources = 0;
 
 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-	if (err || resp.hdr.status) {
-		dev_err(gc->dev,
-			"Failed to allocate doorbell page: ret %d, 0x%x\n",
-			err, resp.hdr.status);
-		return err ?: -EPROTO;
-	}
+	if (err)
+		return err;
 
 	*doorbell_page = resp.allocated_resources;
 
@@ -861,20 +830,13 @@ int mana_ib_gd_destroy_rnic_adapter(struct mana_ib_dev *mdev)
 	struct mana_rnic_destroy_adapter_resp resp = {};
 	struct mana_rnic_destroy_adapter_req req = {};
 	struct gdma_context *gc;
-	int err;
 
 	gc = mdev_to_gc(mdev);
 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_ADAPTER, sizeof(req), sizeof(resp));
 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
 	req.adapter = mdev->adapter_handle;
 
-	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-	if (err) {
-		ibdev_err(&mdev->ib_dev, "Failed to destroy RNIC adapter err %d", err);
-		return err;
-	}
-
-	return 0;
+	return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 }
 
 int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
@@ -884,7 +846,6 @@ int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
 	struct mana_rnic_config_addr_resp resp = {};
 	struct gdma_context *gc = mdev_to_gc(mdev);
 	struct mana_rnic_config_addr_req req = {};
-	int err;
 
 	if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
 		ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
@@ -898,13 +859,7 @@ int mana_ib_gd_add_gid(const struct ib_gid_attr *attr, void **context)
 	req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
 	copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
 
-	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-	if (err) {
-		ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
-		return err;
-	}
-
-	return 0;
+	return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 }
 
 int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
@@ -914,7 +869,6 @@ int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
 	struct mana_rnic_config_addr_resp resp = {};
 	struct gdma_context *gc = mdev_to_gc(mdev);
 	struct mana_rnic_config_addr_req req = {};
-	int err;
 
 	if (ntype != RDMA_NETWORK_IPV4 && ntype != RDMA_NETWORK_IPV6) {
 		ibdev_dbg(&mdev->ib_dev, "Unsupported rdma network type %d", ntype);
@@ -928,13 +882,7 @@ int mana_ib_gd_del_gid(const struct ib_gid_attr *attr, void **context)
 	req.sgid_type = (ntype == RDMA_NETWORK_IPV6) ? SGID_TYPE_IPV6 : SGID_TYPE_IPV4;
 	copy_in_reverse(req.ip_addr, attr->gid.raw, sizeof(union ib_gid));
 
-	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-	if (err) {
-		ibdev_err(&mdev->ib_dev, "Failed to config IP addr err %d\n", err);
-		return err;
-	}
-
-	return 0;
+	return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 }
 
 int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8 *mac)
@@ -942,7 +890,6 @@ int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8
 	struct mana_rnic_config_mac_addr_resp resp = {};
 	struct mana_rnic_config_mac_addr_req req = {};
 	struct gdma_context *gc = mdev_to_gc(mdev);
-	int err;
 
 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_CONFIG_MAC_ADDR, sizeof(req), sizeof(resp));
 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
@@ -950,13 +897,7 @@ int mana_ib_gd_config_mac(struct mana_ib_dev *mdev, enum mana_ib_addr_op op, u8
 	req.op = op;
 	copy_in_reverse(req.mac_addr, mac, ETH_ALEN);
 
-	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-	if (err) {
-		ibdev_err(&mdev->ib_dev, "Failed to config Mac addr err %d", err);
-		return err;
-	}
-
-	return 0;
+	return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 }
 
 int mana_ib_gd_create_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq, u32 doorbell)
@@ -996,7 +937,6 @@ int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
 	struct gdma_context *gc = mdev_to_gc(mdev);
 	struct mana_rnic_destroy_cq_resp resp = {};
 	struct mana_rnic_destroy_cq_req req = {};
-	int err;
 
 	if (cq->cq_handle == INVALID_MANA_HANDLE)
 		return 0;
@@ -1006,14 +946,7 @@ int mana_ib_gd_destroy_cq(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
 	req.adapter = mdev->adapter_handle;
 	req.cq_handle = cq->cq_handle;
 
-	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-
-	if (err) {
-		ibdev_err(&mdev->ib_dev, "Failed to destroy cq err %d", err);
-		return err;
-	}
-
-	return 0;
+	return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 }
 
 int mana_ib_gd_create_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
@@ -1061,18 +994,13 @@ int mana_ib_gd_destroy_rc_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
 	struct mana_rnic_destroy_rc_qp_resp resp = {0};
 	struct mana_rnic_destroy_rc_qp_req req = {0};
 	struct gdma_context *gc = mdev_to_gc(mdev);
-	int err;
 
 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_RC_QP, sizeof(req), sizeof(resp));
 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
 	req.adapter = mdev->adapter_handle;
 	req.rc_qp_handle = qp->qp_handle;
-	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-	if (err) {
-		ibdev_err(&mdev->ib_dev, "Failed to destroy rc qp err %d", err);
-		return err;
-	}
-	return 0;
+
+	return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 }
 
 int mana_ib_gd_create_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp,
@@ -1119,16 +1047,11 @@ int mana_ib_gd_destroy_ud_qp(struct mana_ib_dev *mdev, struct mana_ib_qp *qp)
 	struct mana_rnic_destroy_udqp_resp resp = {0};
 	struct mana_rnic_destroy_udqp_req req = {0};
 	struct gdma_context *gc = mdev_to_gc(mdev);
-	int err;
 
 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_DESTROY_UD_QP, sizeof(req), sizeof(resp));
 	req.hdr.dev_id = mdev->gdma_dev->dev_id;
 	req.adapter = mdev->adapter_handle;
 	req.qp_handle = qp->qp_handle;
-	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-	if (err) {
-		ibdev_err(&mdev->ib_dev, "Failed to destroy ud qp err %d", err);
-		return err;
-	}
-	return 0;
+
+	return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 }
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 9613b225dad4..9bae99c8e846 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -70,15 +70,8 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
 	}
 
 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-
-	if (err || resp.hdr.status) {
-		ibdev_dbg(&dev->ib_dev, "Failed to create mr %d, %u", err,
-			  resp.hdr.status);
-		if (!err)
-			err = -EPROTO;
-
+	if (err)
 		return err;
-	}
 
 	mr->ibmr.lkey = resp.lkey;
 	mr->ibmr.rkey = resp.rkey;
@@ -92,23 +85,13 @@ static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
 	struct gdma_destroy_mr_response resp = {};
 	struct gdma_destroy_mr_request req = {};
 	struct gdma_context *gc = mdev_to_gc(dev);
-	int err;
 
 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
 			     sizeof(resp));
 
 	req.mr_handle = mr_handle;
 
-	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-	if (err || resp.hdr.status) {
-		dev_err(gc->dev, "Failed to destroy MR: %d, 0x%x\n", err,
-			resp.hdr.status);
-		if (!err)
-			err = -EPROTO;
-		return err;
-	}
-
-	return 0;
+	return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 }
 
 struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
@@ -339,12 +322,8 @@ static int mana_ib_gd_alloc_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm,
 	req.flags =  attr->flags;
 
 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-	if (err || resp.hdr.status) {
-		if (!err)
-			err = -EPROTO;
-
+	if (err)
 		return err;
-	}
 
 	dm->dm_handle = resp.dm_handle;
 
@@ -380,20 +359,11 @@ static int mana_ib_gd_destroy_dm(struct mana_ib_dev *mdev, struct mana_ib_dm *dm
 	struct gdma_context *gc = mdev_to_gc(mdev);
 	struct gdma_destroy_dm_resp resp = {};
 	struct gdma_destroy_dm_req req = {};
-	int err;
 
 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DM, sizeof(req), sizeof(resp));
 	req.dm_handle = dm->dm_handle;
 
-	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-	if (err || resp.hdr.status) {
-		if (!err)
-			err = -EPROTO;
-
-		return err;
-	}
-
-	return 0;
+	return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 }
 
 int mana_ib_dealloc_dm(struct ib_dm *ibdm, struct uverbs_attr_bundle *attrs)
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 82f84f7ad37a..7bf0753a7d28 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -731,7 +731,6 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 	struct gdma_context *gc = mdev_to_gc(mdev);
 	struct mana_port_context *mpc;
 	struct net_device *ndev;
-	int err;
 
 	mana_gd_init_req_hdr(&req.hdr, MANA_IB_SET_QP_STATE, sizeof(req), sizeof(resp));
 
@@ -784,13 +783,7 @@ static int mana_ib_gd_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 		req.ah_attr.flow_label = attr->ah_attr.grh.flow_label;
 	}
 
-	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
-	if (err) {
-		ibdev_err(&mdev->ib_dev, "Failed modify qp err %d", err);
-		return err;
-	}
-
-	return 0;
+	return mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
 }
 
 int mana_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-- 
2.43.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH rdma-next 1/1] RDMA/mana_ib: cleanup the usage of mana_gd_send_request()
  2026-03-18 17:39 [PATCH rdma-next 1/1] RDMA/mana_ib: cleanup the usage of mana_gd_send_request() Konstantin Taranov
@ 2026-03-18 18:26 ` Leon Romanovsky
  2026-03-18 18:27 ` Leon Romanovsky
  1 sibling, 0 replies; 3+ messages in thread
From: Leon Romanovsky @ 2026-03-18 18:26 UTC (permalink / raw)
  To: kotaranov, shirazsaleem, longli, jgg, Konstantin Taranov
  Cc: linux-rdma, linux-kernel


On Wed, 18 Mar 2026 10:39:39 -0700, Konstantin Taranov wrote:
> Do not check the status of the response header returned by mana_gd_send_request(),
> as the returned error code already indicates the request status.
> 
> The mana_gd_send_request() may return no error code and have the response status
> GDMA_STATUS_MORE_ENTRIES, which is a successful completion. It is used
> for checking the correctness of multi-request operations, such as creation of
> a dma region with mana_ib_gd_create_dma_region().
> 
> [...]

Applied, thanks!

[1/1] RDMA/mana_ib: cleanup the usage of mana_gd_send_request()
      https://git.kernel.org/rdma/rdma/c/684603da1e1567

Best regards,
-- 
Leon Romanovsky <leon@kernel.org>


^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH rdma-next 1/1] RDMA/mana_ib: cleanup the usage of mana_gd_send_request()
  2026-03-18 17:39 [PATCH rdma-next 1/1] RDMA/mana_ib: cleanup the usage of mana_gd_send_request() Konstantin Taranov
  2026-03-18 18:26 ` Leon Romanovsky
@ 2026-03-18 18:27 ` Leon Romanovsky
  1 sibling, 0 replies; 3+ messages in thread
From: Leon Romanovsky @ 2026-03-18 18:27 UTC (permalink / raw)
  To: Konstantin Taranov
  Cc: kotaranov, shirazsaleem, longli, jgg, linux-rdma, linux-kernel

On Wed, Mar 18, 2026 at 10:39:39AM -0700, Konstantin Taranov wrote:
> From: Konstantin Taranov <kotaranov@microsoft.com>
> 
> Do not check the status of the response header returned by mana_gd_send_request(),
> as the returned error code already indicates the request status.
> 
> The mana_gd_send_request() may return no error code and have the response status
> GDMA_STATUS_MORE_ENTRIES, which is a successful completion. It is used
> for checking the correctness of multi-request operations, such as creation of
> a dma region with mana_ib_gd_create_dma_region().
> 
> Signed-off-by: Konstantin Taranov <kotaranov@microsoft.com>
> ---
>  drivers/infiniband/hw/mana/main.c | 105 ++++--------------------------
>  drivers/infiniband/hw/mana/mr.c   |  38 ++---------
>  drivers/infiniband/hw/mana/qp.c   |   9 +--
>  3 files changed, 19 insertions(+), 133 deletions(-)

I fixed coding style errors, removed same debug print from other
mana_gd_send_request() callers, together with netdev prints, which don't
belong to RDMA and applied the patch.

Thanks

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2026-03-18 18:27 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2026-03-18 17:39 [PATCH rdma-next 1/1] RDMA/mana_ib: cleanup the usage of mana_gd_send_request() Konstantin Taranov
2026-03-18 18:26 ` Leon Romanovsky
2026-03-18 18:27 ` Leon Romanovsky

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox