* [PATCH 1/2] RDMA/cxgb4: Serialize calls to cq's comp_handler by lock
@ 2011-10-24 15:50 Kumar Sanghvi
[not found] ` <1319471422-30113-1-git-send-email-kumaras-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
0 siblings, 1 reply; 4+ messages in thread
From: Kumar Sanghvi @ 2011-10-24 15:50 UTC (permalink / raw)
To: linux-rdma-u79uwXL29TY76Z2rM5mHXA
Cc: roland-BHEL68pLQRGGvPXPguhicg,
swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW,
divy-ut6Up61K2wZBDgjK7y7TUQ, Parav.Pandit-iH1Dq9VlAzfQT0dZR+AlfA,
Kumar Sanghvi
Commit 01e7da6ba53ca4d6189a1eae45607c0331c871f2 introduced a potential
problem wherein the cq's comp_handler can get called simultaneously from
different places in iw_cxgb4 driver. This does not comply with
Documentation/infiniband/core_locking.txt, which states that at a given
point of time, there should be only one callback per CQ should be active.
This problem was reported by Parav Pandit <Parav.Pandit-iH1Dq9VlAzfQT0dZR+AlfA@public.gmane.org>.
Based on discussion between Parav Pandit and Steve Wise, this patch
aims to correct above problem by serializing the calls to cq's comp_handler
using a spin_lock.
Reported-by: Parav Pandit <Parav.Pandit-iH1Dq9VlAzfQT0dZR+AlfA@public.gmane.org>
Signed-off-by: Kumar Sanghvi <kumaras-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
---
drivers/infiniband/hw/cxgb4/cq.c | 1 +
drivers/infiniband/hw/cxgb4/ev.c | 10 ++++++++--
drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 1 +
drivers/infiniband/hw/cxgb4/qp.c | 15 +++++++++++++--
4 files changed, 23 insertions(+), 4 deletions(-)
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 901c5fb..f35a935 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -818,6 +818,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
chp->cq.size--; /* status page */
chp->ibcq.cqe = entries - 2;
spin_lock_init(&chp->lock);
+ spin_lock_init(&chp->comp_handler_lock);
atomic_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait);
ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index c13041a..397cb36 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -42,6 +42,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
{
struct ib_event event;
struct c4iw_qp_attributes attrs;
+ unsigned long flag;
if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
(qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
@@ -72,7 +73,9 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
if (qhp->ibqp.event_handler)
(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);
+ spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
+ spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
}
void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
@@ -183,11 +186,14 @@ out:
int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
{
struct c4iw_cq *chp;
+ unsigned long flag;
chp = get_chp(dev, qid);
- if (chp)
+ if (chp) {
+ spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
- else
+ spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
+ } else
PDBG("%s unknown cqid 0x%x\n", __func__, qid);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 62cea0e..1357c5b 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -309,6 +309,7 @@ struct c4iw_cq {
struct c4iw_dev *rhp;
struct t4_cq cq;
spinlock_t lock;
+ spinlock_t comp_handler_lock;
atomic_t refcnt;
wait_queue_head_t wait;
};
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index b59b56c..a391a4a 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -945,8 +945,11 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&rchp->lock, flag);
- if (flushed)
+ if (flushed) {
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+ }
/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, flag);
@@ -956,13 +959,17 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, flag);
- if (flushed)
+ if (flushed) {
+ spin_lock_irqsave(&schp->comp_handler_lock, flag);
(*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+ }
}
static void flush_qp(struct c4iw_qp *qhp)
{
struct c4iw_cq *rchp, *schp;
+ unsigned long flag;
rchp = get_chp(qhp->rhp, qhp->attr.rcq);
schp = get_chp(qhp->rhp, qhp->attr.scq);
@@ -970,11 +977,15 @@ static void flush_qp(struct c4iw_qp *qhp)
if (qhp->ibqp.uobject) {
t4_set_wq_in_error(&qhp->wq);
t4_set_cq_in_error(&rchp->cq);
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
if (schp != rchp) {
t4_set_cq_in_error(&schp->cq);
+ spin_lock_irqsave(&schp->comp_handler_lock, flag);
(*schp->ibcq.comp_handler)(&schp->ibcq,
schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
}
return;
}
--
1.7.1
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
^ permalink raw reply related [flat|nested] 4+ messages in thread[parent not found: <1319471422-30113-1-git-send-email-kumaras-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>]
* [PATCH 2/2] RDMA/cxgb3: Serialize calls to cq's comp_handler by lock [not found] ` <1319471422-30113-1-git-send-email-kumaras-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org> @ 2011-10-24 15:50 ` Kumar Sanghvi [not found] ` <1319471422-30113-2-git-send-email-kumaras-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org> 2011-10-28 13:57 ` [PATCH 1/2] RDMA/cxgb4: " Steve Wise 1 sibling, 1 reply; 4+ messages in thread From: Kumar Sanghvi @ 2011-10-24 15:50 UTC (permalink / raw) To: linux-rdma-u79uwXL29TY76Z2rM5mHXA Cc: roland-BHEL68pLQRGGvPXPguhicg, swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW, divy-ut6Up61K2wZBDgjK7y7TUQ, Parav.Pandit-iH1Dq9VlAzfQT0dZR+AlfA, Kumar Sanghvi iw_cxgb3 has a potential problem wherein the cq's comp_handler can get called simultaneously from different places in iw_cxgb3 driver. This does not comply with Documentation/infiniband/core_locking.txt, which states that at a given point of time, there should be only one callback per CQ should be active. Such problem was reported by Parav Pandit <Parav.Pandit-iH1Dq9VlAzfQT0dZR+AlfA@public.gmane.org> for iw_cxgb4 driver. Based on discussion between Parav Pandit and Steve Wise, this patch aims to correct above problem by serializing the calls to cq's comp_handler using a spin_lock. Signed-off-by: Kumar Sanghvi <kumaras-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org> --- drivers/infiniband/hw/cxgb3/iwch_ev.c | 6 ++++++ drivers/infiniband/hw/cxgb3/iwch_provider.c | 1 + drivers/infiniband/hw/cxgb3/iwch_provider.h | 1 + drivers/infiniband/hw/cxgb3/iwch_qp.c | 14 ++++++++++++-- 4 files changed, 20 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/cxgb3/iwch_ev.c b/drivers/infiniband/hw/cxgb3/iwch_ev.c index 71e0d84..abcc9e7 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_ev.c +++ b/drivers/infiniband/hw/cxgb3/iwch_ev.c @@ -46,6 +46,7 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, struct ib_event event; struct iwch_qp_attributes attrs; struct iwch_qp *qhp; + unsigned long flag; spin_lock(&rnicp->lock); qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe)); @@ -94,7 +95,9 @@ static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp, if (qhp->ibqp.event_handler) (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); + spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); + spin_unlock_irqrestore(&chp->comp_handler_lock, flag); if (atomic_dec_and_test(&qhp->refcnt)) wake_up(&qhp->wait); @@ -107,6 +110,7 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb) struct iwch_cq *chp; struct iwch_qp *qhp; u32 cqid = RSPQ_CQID(rsp_msg); + unsigned long flag; rnicp = (struct iwch_dev *) rdev_p->ulp; spin_lock(&rnicp->lock); @@ -170,7 +174,9 @@ void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb) */ if (qhp->ep && SQ_TYPE(rsp_msg->cqe)) dst_confirm(qhp->ep->dst); + spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); + spin_unlock_irqrestore(&chp->comp_handler_lock, flag); break; case TPT_ERR_STAG: diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index c7d9411..37c224f 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -190,6 +190,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve chp->rhp = rhp; chp->ibcq.cqe = 1 << chp->cq.size_log2; spin_lock_init(&chp->lock); + spin_lock_init(&chp->comp_handler_lock); atomic_set(&chp->refcnt, 1); init_waitqueue_head(&chp->wait); if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) { diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h index 9a342c9..87c14b0 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.h +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h @@ -103,6 +103,7 @@ struct iwch_cq { struct iwch_dev *rhp; struct t3_cq cq; spinlock_t lock; + spinlock_t comp_handler_lock; atomic_t refcnt; wait_queue_head_t wait; u32 __user *user_rptr_addr; diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index ecd313f..bea5839 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -822,8 +822,11 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&rchp->lock, *flag); - if (flushed) + if (flushed) { + spin_lock_irqsave(&rchp->comp_handler_lock, *flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); + spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); + } /* locking hierarchy: cq lock first, then qp lock. */ spin_lock_irqsave(&schp->lock, *flag); @@ -833,8 +836,11 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); spin_unlock(&qhp->lock); spin_unlock_irqrestore(&schp->lock, *flag); - if (flushed) + if (flushed) { + spin_lock_irqsave(&schp->comp_handler_lock, *flag); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); + spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); + } /* deref */ if (atomic_dec_and_test(&qhp->refcnt)) @@ -853,11 +859,15 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) if (qhp->ibqp.uobject) { cxio_set_wq_in_error(&qhp->wq); cxio_set_cq_in_error(&rchp->cq); + spin_lock_irqsave(&rchp->comp_handler_lock, *flag); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); + spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); if (schp != rchp) { cxio_set_cq_in_error(&schp->cq); + spin_lock_irqsave(&schp->comp_handler_lock, *flag); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); + spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); } return; } -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe linux-rdma" in the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org More majordomo info at http://vger.kernel.org/majordomo-info.html ^ permalink raw reply related [flat|nested] 4+ messages in thread
[parent not found: <1319471422-30113-2-git-send-email-kumaras-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>]
* Re: [PATCH 2/2] RDMA/cxgb3: Serialize calls to cq's comp_handler by lock [not found] ` <1319471422-30113-2-git-send-email-kumaras-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org> @ 2011-10-28 13:57 ` Steve Wise 0 siblings, 0 replies; 4+ messages in thread From: Steve Wise @ 2011-10-28 13:57 UTC (permalink / raw) To: Kumar Sanghvi Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, roland-BHEL68pLQRGGvPXPguhicg, divy-ut6Up61K2wZBDgjK7y7TUQ, Parav.Pandit-iH1Dq9VlAzfQT0dZR+AlfA On 10/24/2011 10:50 AM, Kumar Sanghvi wrote: > iw_cxgb3 has a potential problem wherein the cq's comp_handler > can get called simultaneously from different places in iw_cxgb3 > driver. This does not comply with Documentation/infiniband/core_locking.txt, > which states that at a given point of time, there should be only one > callback per CQ should be active. > > Such problem was reported by Parav Pandit<Parav.Pandit-iH1Dq9VlAzfQT0dZR+AlfA@public.gmane.org> for > iw_cxgb4 driver. Based on discussion between Parav Pandit and Steve Wise, > this patch aims to correct above problem by serializing the calls to cq's > comp_handler using a spin_lock. > > Signed-off-by: Kumar Sanghvi<kumaras-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org> Acked-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org> -- To unsubscribe from this list: send the line "unsubscribe linux-rdma" in the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org More majordomo info at http://vger.kernel.org/majordomo-info.html ^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH 1/2] RDMA/cxgb4: Serialize calls to cq's comp_handler by lock [not found] ` <1319471422-30113-1-git-send-email-kumaras-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org> 2011-10-24 15:50 ` [PATCH 2/2] RDMA/cxgb3: " Kumar Sanghvi @ 2011-10-28 13:57 ` Steve Wise 1 sibling, 0 replies; 4+ messages in thread From: Steve Wise @ 2011-10-28 13:57 UTC (permalink / raw) To: Kumar Sanghvi Cc: linux-rdma-u79uwXL29TY76Z2rM5mHXA, roland-BHEL68pLQRGGvPXPguhicg, divy-ut6Up61K2wZBDgjK7y7TUQ, Parav.Pandit-iH1Dq9VlAzfQT0dZR+AlfA On 10/24/2011 10:50 AM, Kumar Sanghvi wrote: > Commit 01e7da6ba53ca4d6189a1eae45607c0331c871f2 introduced a potential > problem wherein the cq's comp_handler can get called simultaneously from > different places in iw_cxgb4 driver. This does not comply with > Documentation/infiniband/core_locking.txt, which states that at a given > point of time, there should be only one callback per CQ should be active. > > This problem was reported by Parav Pandit<Parav.Pandit-iH1Dq9VlAzfQT0dZR+AlfA@public.gmane.org>. > Based on discussion between Parav Pandit and Steve Wise, this patch > aims to correct above problem by serializing the calls to cq's comp_handler > using a spin_lock. > > Reported-by: Parav Pandit<Parav.Pandit-iH1Dq9VlAzfQT0dZR+AlfA@public.gmane.org> > Signed-off-by: Kumar Sanghvi<kumaras-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org> Acked-by: Steve Wise <swise-7bPotxP6k4+P2YhJcF5u+vpXobYPEAuW@public.gmane.org> -- To unsubscribe from this list: send the line "unsubscribe linux-rdma" in the body of a message to majordomo-u79uwXL29TY76Z2rM5mHXA@public.gmane.org More majordomo info at http://vger.kernel.org/majordomo-info.html ^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2011-10-28 13:57 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-10-24 15:50 [PATCH 1/2] RDMA/cxgb4: Serialize calls to cq's comp_handler by lock Kumar Sanghvi
[not found] ` <1319471422-30113-1-git-send-email-kumaras-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
2011-10-24 15:50 ` [PATCH 2/2] RDMA/cxgb3: " Kumar Sanghvi
[not found] ` <1319471422-30113-2-git-send-email-kumaras-ut6Up61K2wZBDgjK7y7TUQ@public.gmane.org>
2011-10-28 13:57 ` Steve Wise
2011-10-28 13:57 ` [PATCH 1/2] RDMA/cxgb4: " Steve Wise
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox