netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Leon Romanovsky <leon@kernel.org>
To: Doug Ledford <dledford@redhat.com>, Jason Gunthorpe <jgg@mellanox.com>
Cc: Leon Romanovsky <leonro@mellanox.com>,
	RDMA mailing list <linux-rdma@vger.kernel.org>,
	Majd Dibbiny <majd@mellanox.com>, Moni Shoua <monis@mellanox.com>,
	Saeed Mahameed <saeedm@mellanox.com>,
	linux-netdev <netdev@vger.kernel.org>
Subject: [PATCH rdma-next 01/12] IB/mlx5: Fix locking SRQ object in ODP event
Date: Tue, 22 Jan 2019 08:48:40 +0200	[thread overview]
Message-ID: <20190122064851.6032-2-leon@kernel.org> (raw)
In-Reply-To: <20190122064851.6032-1-leon@kernel.org>

From: Moni Shoua <monis@mellanox.com>

QP and SRQ objects are stored in different containers so the action to
get and lock a common resource during ODP event needs to address that.

While that get rid of 'refcount' and 'free' fields in mlx5_core_srq
struct and use the fields with same semantics in common structure.

Fixes: 032080ab43ac ("IB/mlx5: Lock QP during page fault handling")
Signed-off-by: Moni Shoua <monis@mellanox.com>
Reviewed-by: Majd Dibbiny <majd@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
---
 drivers/infiniband/hw/mlx5/cq.c      |  4 ++--
 drivers/infiniband/hw/mlx5/odp.c     | 13 ++++++++-----
 drivers/infiniband/hw/mlx5/srq.h     |  2 --
 drivers/infiniband/hw/mlx5/srq_cmd.c | 16 +++++++---------
 4 files changed, 17 insertions(+), 18 deletions(-)

diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 202f977e7092..eb149de9f156 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -187,8 +187,8 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
 			wqe_ctr = be16_to_cpu(cqe->wqe_counter);
 			wc->wr_id = srq->wrid[wqe_ctr];
 			mlx5_ib_free_srq_wqe(srq, wqe_ctr);
-			if (msrq && atomic_dec_and_test(&msrq->refcount))
-				complete(&msrq->free);
+			if (msrq)
+				mlx5_core_res_put(&msrq->common);
 		}
 	} else {
 		wq	  = &qp->rq;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 82ac6cdc7130..beff8c8908b5 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -1115,22 +1115,25 @@ static int mlx5_ib_mr_responder_pfault_handler(
 static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
 						       u32 wq_num, int pf_type)
 {
-	enum mlx5_res_type res_type;
+	struct mlx5_core_rsc_common *common = NULL;
+	struct mlx5_core_srq *srq;
 
 	switch (pf_type) {
 	case MLX5_WQE_PF_TYPE_RMP:
-		res_type = MLX5_RES_SRQ;
+		srq = mlx5_cmd_get_srq(dev, wq_num);
+		if (srq)
+			common = &srq->common;
 		break;
 	case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
 	case MLX5_WQE_PF_TYPE_RESP:
 	case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
-		res_type = MLX5_RES_QP;
+		common = mlx5_core_res_hold(dev->mdev, wq_num, MLX5_RES_QP);
 		break;
 	default:
-		return NULL;
+		break;
 	}
 
-	return mlx5_core_res_hold(dev->mdev, wq_num, res_type);
+	return common;
 }
 
 static inline struct mlx5_ib_qp *res_to_qp(struct mlx5_core_rsc_common *res)
diff --git a/drivers/infiniband/hw/mlx5/srq.h b/drivers/infiniband/hw/mlx5/srq.h
index 75eb5839ae95..c330af35ff10 100644
--- a/drivers/infiniband/hw/mlx5/srq.h
+++ b/drivers/infiniband/hw/mlx5/srq.h
@@ -46,8 +46,6 @@ struct mlx5_core_srq {
 	int wqe_shift;
 	void (*event)(struct mlx5_core_srq *srq, enum mlx5_event e);
 
-	atomic_t refcount;
-	struct completion free;
 	u16 uid;
 };
 
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index 7aaaffbd4afa..63ac38bb3498 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -87,7 +87,7 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
 
 	srq = radix_tree_lookup(&table->tree, srqn);
 	if (srq)
-		atomic_inc(&srq->refcount);
+		atomic_inc(&srq->common.refcount);
 
 	spin_unlock(&table->lock);
 
@@ -594,8 +594,8 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
 	if (err)
 		return err;
 
-	atomic_set(&srq->refcount, 1);
-	init_completion(&srq->free);
+	atomic_set(&srq->common.refcount, 1);
+	init_completion(&srq->common.free);
 
 	spin_lock_irq(&table->lock);
 	err = radix_tree_insert(&table->tree, srq->srqn, srq);
@@ -627,9 +627,8 @@ int mlx5_cmd_destroy_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq)
 	if (err)
 		return err;
 
-	if (atomic_dec_and_test(&srq->refcount))
-		complete(&srq->free);
-	wait_for_completion(&srq->free);
+	mlx5_core_res_put(&srq->common);
+	wait_for_completion(&srq->common.free);
 
 	return 0;
 }
@@ -685,7 +684,7 @@ static int srq_event_notifier(struct notifier_block *nb,
 
 	srq = radix_tree_lookup(&table->tree, srqn);
 	if (srq)
-		atomic_inc(&srq->refcount);
+		atomic_inc(&srq->common.refcount);
 
 	spin_unlock(&table->lock);
 
@@ -694,8 +693,7 @@ static int srq_event_notifier(struct notifier_block *nb,
 
 	srq->event(srq, eqe->type);
 
-	if (atomic_dec_and_test(&srq->refcount))
-		complete(&srq->free);
+	mlx5_core_res_put(&srq->common);
 
 	return NOTIFY_OK;
 }
-- 
2.19.1


  reply	other threads:[~2019-01-22  6:49 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-01-22  6:48 [PATCH rdma-next 00/12] Add SRQ and XRC support for ODP MRs Leon Romanovsky
2019-01-22  6:48 ` Leon Romanovsky [this message]
2019-01-22  6:48 ` [PATCH rdma-next 02/12] IB/core: Allocate bit for SRQ ODP support Leon Romanovsky
2019-01-22  6:48 ` [PATCH rdma-next 03/12] IB/uverbs: Expose XRC ODP device capabilities Leon Romanovsky
2019-01-22  6:48 ` [PATCH rdma-next 04/12] IB/mlx5: Remove useless check in ODP handler Leon Romanovsky
2019-01-22  6:48 ` [PATCH rdma-next 05/12] IB/mlx5: Clean mlx5_ib_mr_responder_pfault_handler() signature Leon Romanovsky
2019-01-22  6:48 ` [PATCH rdma-next 06/12] IB/mlx5: Add XRC initiator ODP support Leon Romanovsky
2019-01-22  6:48 ` [PATCH rdma-next 07/12] IB/mlx5: Let read user wqe also from SRQ buffer Leon Romanovsky
2019-01-22  6:48 ` [PATCH rdma-next 08/12] IB/mlx5: Add ODP SRQ support Leon Romanovsky
2019-01-22  6:48 ` [PATCH rdma-next 09/12] IB/mlx5: Advertise SRQ ODP support for supported transports Leon Romanovsky
2019-01-22  6:48 ` [PATCH mlx5-next 10/12] net/mlx5: Add XRC transport to ODP device capabilities layout Leon Romanovsky
2019-01-22  6:48 ` [PATCH rdma-next 11/12] IB/mlx5: Advertise XRC ODP support Leon Romanovsky
2019-01-22  6:48 ` [PATCH mlx5-next 12/12] net/mlx5: Set ODP SRQ support in firmware Leon Romanovsky
2019-01-31 23:28   ` Jason Gunthorpe
2019-02-03  9:03     ` Leon Romanovsky
2019-02-04 21:23       ` Jason Gunthorpe
2019-02-04 23:54         ` Saeed Mahameed
2019-02-04 23:47   ` Saeed Mahameed
2019-02-05  6:27     ` Leon Romanovsky
2019-01-24 12:25 ` [PATCH rdma-next 00/12] Add SRQ and XRC support for ODP MRs Leon Romanovsky
2019-01-31 23:27 ` Jason Gunthorpe
2019-02-03 10:54   ` Leon Romanovsky
2019-02-04 21:53     ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190122064851.6032-2-leon@kernel.org \
    --to=leon@kernel.org \
    --cc=dledford@redhat.com \
    --cc=jgg@mellanox.com \
    --cc=leonro@mellanox.com \
    --cc=linux-rdma@vger.kernel.org \
    --cc=majd@mellanox.com \
    --cc=monis@mellanox.com \
    --cc=netdev@vger.kernel.org \
    --cc=saeedm@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).