* [PATCH for-next] RDMA/rxe: Fix FIXME in rxe_udp_encap_recv()
@ 2021-01-28 23:33 Bob Pearson
2021-02-05 17:59 ` Jason Gunthorpe
0 siblings, 1 reply; 2+ messages in thread
From: Bob Pearson @ 2021-01-28 23:33 UTC (permalink / raw)
To: jgg, zyjzyj2000, linux-rdma; +Cc: Bob Pearson
rxe_udp_encap_recv() drops the reference to rxe->ib_dev taken by
rxe_get_dev_from_net() which should be held until each received
skb is freed. This patch moves the calls to ib_device_put() to
each place a received skb is freed. It also takes references to
the ib_device for each cloned skb created to process received
multicast packets.
Fixes: 4c173f596b3ff ("RDMA/rxe: Use ib_device_get_by_netdev()
instead of open coding")
Signed-off-by: Bob Pearson <rpearson@hpe.com>
---
drivers/infiniband/sw/rxe/rxe_comp.c | 49 ++++++++++++----------------
drivers/infiniband/sw/rxe/rxe_net.c | 12 +++----
drivers/infiniband/sw/rxe/rxe_recv.c | 15 +++++++--
drivers/infiniband/sw/rxe/rxe_resp.c | 3 ++
4 files changed, 42 insertions(+), 37 deletions(-)
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 0a1e6393250b..96d32903a3b5 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -515,6 +515,7 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
while ((skb = skb_dequeue(&qp->resp_pkts))) {
rxe_drop_ref(qp);
kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
}
while ((wqe = queue_head(qp->sq.queue))) {
@@ -527,6 +528,17 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
}
}
+void free_pkt(struct rxe_pkt_info *pkt)
+{
+ struct sk_buff *skb = PKT_TO_SKB(pkt);
+ struct rxe_qp *qp = pkt->qp;
+ struct ib_device *dev = qp->ibqp.device;
+
+ kfree_skb(skb);
+ rxe_drop_ref(qp);
+ ib_device_put(dev);
+}
+
int rxe_completer(void *arg)
{
struct rxe_qp *qp = (struct rxe_qp *)arg;
@@ -624,11 +636,8 @@ int rxe_completer(void *arg)
break;
case COMPST_DONE:
- if (pkt) {
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
- }
+ if (pkt)
+ free_pkt(pkt);
goto done;
case COMPST_EXIT:
@@ -671,12 +680,8 @@ int rxe_completer(void *arg)
*/
if (qp->comp.started_retry &&
!qp->comp.timeout_retry) {
- if (pkt) {
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
- }
-
+ if (pkt)
+ free_pkt(pkt);
goto done;
}
@@ -699,13 +704,8 @@ int rxe_completer(void *arg)
qp->comp.started_retry = 1;
rxe_run_task(&qp->req.task, 0);
}
-
- if (pkt) {
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
- }
-
+ if (pkt)
+ free_pkt(pkt);
goto done;
} else {
@@ -726,9 +726,7 @@ int rxe_completer(void *arg)
mod_timer(&qp->rnr_nak_timer,
jiffies + rnrnak_jiffies(aeth_syn(pkt)
& ~AETH_TYPE_MASK));
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
+ free_pkt(pkt);
goto exit;
} else {
rxe_counter_inc(rxe,
@@ -742,13 +740,8 @@ int rxe_completer(void *arg)
WARN_ON_ONCE(wqe->status == IB_WC_SUCCESS);
do_complete(qp, wqe);
rxe_qp_error(qp);
-
- if (pkt) {
- rxe_drop_ref(pkt->qp);
- kfree_skb(skb);
- skb = NULL;
- }
-
+ if (pkt)
+ free_pkt(pkt);
goto exit;
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index c4b06ced30a7..1dba23e57d92 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -152,10 +152,14 @@ static struct dst_entry *rxe_find_route(struct net_device *ndev,
static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct udphdr *udph;
+ struct rxe_dev *rxe;
struct net_device *ndev = skb->dev;
- struct rxe_dev *rxe = rxe_get_dev_from_net(ndev);
struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
+ /* takes a reference on rxe->ib_dev
+ * drop when skb is freed
+ */
+ rxe = rxe_get_dev_from_net(ndev);
if (!rxe)
goto drop;
@@ -174,12 +178,6 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
rxe_rcv(skb);
- /*
- * FIXME: this is in the wrong place, it needs to be done when pkt is
- * destroyed
- */
- ib_device_put(&rxe->ib_dev);
-
return 0;
drop:
kfree_skb(skb);
diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
index c9984a28eecc..8e60d9eaf79a 100644
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -266,10 +266,19 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
/* for all but the last qp create a new clone of the
* skb and pass to the qp.
*/
- if (mce->qp_list.next != &mcg->qp_list)
+ if (mce->qp_list.next != &mcg->qp_list) {
per_qp_skb = skb_clone(skb, GFP_ATOMIC);
- else
+ if (!ib_device_try_get(&rxe->ib_dev)) {
+ /* shouldn't happen we already have
+ * one ref for skb.
+ */
+ pr_warn("ib_device_try_get failed\n");
+ kfree_skb(per_qp_skb);
+ continue;
+ }
+ } else {
per_qp_skb = skb;
+ }
if (unlikely(!per_qp_skb))
continue;
@@ -288,6 +297,7 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
err1:
kfree_skb(skb);
+ ib_device_put(&rxe->ib_dev);
}
/**
@@ -397,4 +407,5 @@ void rxe_rcv(struct sk_buff *skb)
rxe_drop_ref(pkt->qp);
kfree_skb(skb);
+ ib_device_put(&rxe->ib_dev);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 5a098083a9d2..5fd26786d79b 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -99,6 +99,7 @@ static inline enum resp_states get_req(struct rxe_qp *qp,
while ((skb = skb_dequeue(&qp->req_pkts))) {
rxe_drop_ref(qp);
kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
}
/* go drain recv wr queue */
@@ -1012,6 +1013,7 @@ static enum resp_states cleanup(struct rxe_qp *qp,
skb = skb_dequeue(&qp->req_pkts);
rxe_drop_ref(qp);
kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
}
if (qp->resp.mr) {
@@ -1176,6 +1178,7 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
while ((skb = skb_dequeue(&qp->req_pkts))) {
rxe_drop_ref(qp);
kfree_skb(skb);
+ ib_device_put(qp->ibqp.device);
}
if (notify)
--
2.27.0
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [PATCH for-next] RDMA/rxe: Fix FIXME in rxe_udp_encap_recv()
2021-01-28 23:33 [PATCH for-next] RDMA/rxe: Fix FIXME in rxe_udp_encap_recv() Bob Pearson
@ 2021-02-05 17:59 ` Jason Gunthorpe
0 siblings, 0 replies; 2+ messages in thread
From: Jason Gunthorpe @ 2021-02-05 17:59 UTC (permalink / raw)
To: Bob Pearson; +Cc: zyjzyj2000, linux-rdma, Bob Pearson
On Thu, Jan 28, 2021 at 05:33:19PM -0600, Bob Pearson wrote:
> rxe_udp_encap_recv() drops the reference to rxe->ib_dev taken by
> rxe_get_dev_from_net() which should be held until each received
> skb is freed. This patch moves the calls to ib_device_put() to
> each place a received skb is freed. It also takes references to
> the ib_device for each cloned skb created to process received
> multicast packets.
>
> Fixes: 4c173f596b3ff ("RDMA/rxe: Use ib_device_get_by_netdev()
> instead of open coding")
Do not line wrap fixes lines
> diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
> index c9984a28eecc..8e60d9eaf79a 100644
> --- a/drivers/infiniband/sw/rxe/rxe_recv.c
> +++ b/drivers/infiniband/sw/rxe/rxe_recv.c
> @@ -266,10 +266,19 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
> /* for all but the last qp create a new clone of the
> * skb and pass to the qp.
> */
> - if (mce->qp_list.next != &mcg->qp_list)
> + if (mce->qp_list.next != &mcg->qp_list) {
> per_qp_skb = skb_clone(skb, GFP_ATOMIC);
> - else
> + if (!ib_device_try_get(&rxe->ib_dev)) {
> + /* shouldn't happen we already have
> + * one ref for skb.
> + */
> + pr_warn("ib_device_try_get failed\n");
> + kfree_skb(per_qp_skb);
I fixed this to just
+ if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
+ kfree_skb(per_qp_skb);
+ continue;
+ }
> + continue;
> + }
> + } else {
> per_qp_skb = skb;
> + }
And this had a merge collision with the mcast patch, I fixed it up
Applied to for-next, thanks
Jason
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2021-02-05 18:02 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2021-01-28 23:33 [PATCH for-next] RDMA/rxe: Fix FIXME in rxe_udp_encap_recv() Bob Pearson
2021-02-05 17:59 ` Jason Gunthorpe
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).