netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net] bnxt: don't handle XDP in netpoll
@ 2023-07-27 17:05 Jakub Kicinski
  2023-07-27 18:04 ` Andy Gospodarek
                   ` (2 more replies)
  0 siblings, 3 replies; 8+ messages in thread
From: Jakub Kicinski @ 2023-07-27 17:05 UTC (permalink / raw)
  To: davem; +Cc: netdev, edumazet, pabeni, Jakub Kicinski, michael.chan, gospo

Similarly to other recently fixed drivers make sure we don't
try to access XDP or page pool APIs when NAPI budget is 0.
NAPI budget of 0 may mean that we are in netpoll.

This may result in running software IRQs in hard IRQ context,
leading to deadlocks or crashes.

Fixes: 322b87ca55f2 ("bnxt_en: add page_pool support")
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
CC: michael.chan@broadcom.com
CC: gospo@broadcom.com

Side note - having to plumb the "budget" everywhere really makes
me wonder if we shouldn't have had those APIs accept a pointer
to napi_struct instead :S
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c     | 19 +++++++++++--------
 drivers/net/ethernet/broadcom/bnxt/bnxt.h     |  2 +-
 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c |  6 +++++-
 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h |  3 ++-
 4 files changed, 19 insertions(+), 11 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index a3bbd13c070f..fe1d645c39d0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -687,7 +687,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
 	return NETDEV_TX_OK;
 }
 
-static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts,
+			int budget)
 {
 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
 	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
@@ -2595,10 +2596,11 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 	return rx_pkts;
 }
 
-static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
+static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
+				  int budget)
 {
 	if (bnapi->tx_pkts && !bnapi->tx_fault) {
-		bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
+		bnapi->tx_int(bp, bnapi, bnapi->tx_pkts, budget);
 		bnapi->tx_pkts = 0;
 	}
 
@@ -2629,7 +2631,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
 	 */
 	bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
 
-	__bnxt_poll_work_done(bp, bnapi);
+	__bnxt_poll_work_done(bp, bnapi, budget);
 	return rx_pkts;
 }
 
@@ -2760,7 +2762,7 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
 }
 
 static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
-				 u64 dbr_type)
+				 u64 dbr_type, int budget)
 {
 	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
 	int i;
@@ -2776,7 +2778,7 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
 			cpr2->had_work_done = 0;
 		}
 	}
-	__bnxt_poll_work_done(bp, bnapi);
+	__bnxt_poll_work_done(bp, bnapi, budget);
 }
 
 static int bnxt_poll_p5(struct napi_struct *napi, int budget)
@@ -2806,7 +2808,8 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
 			if (cpr->has_more_work)
 				break;
 
-			__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
+			__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
+					     budget);
 			cpr->cp_raw_cons = raw_cons;
 			if (napi_complete_done(napi, work_done))
 				BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
@@ -2836,7 +2839,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
 		}
 		raw_cons = NEXT_RAW_CMP(raw_cons);
 	}
-	__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
+	__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
 	if (raw_cons != cpr->cp_raw_cons) {
 		cpr->cp_raw_cons = raw_cons;
 		BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 9d16757e27fe..bd44a5701e5f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1005,7 +1005,7 @@ struct bnxt_napi {
 	struct bnxt_tx_ring_info	*tx_ring;
 
 	void			(*tx_int)(struct bnxt *, struct bnxt_napi *,
-					  int);
+					  int tx_pkts, int budget);
 	int			tx_pkts;
 	u8			events;
 	u8			tx_fault:1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 5b6fbdc4dc40..33b7eddfbf41 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -125,7 +125,8 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
 	dma_unmap_len_set(tx_buf, len, 0);
 }
 
-void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts,
+		     int budget)
 {
 	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
 	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
@@ -135,6 +136,9 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
 	u16 last_tx_cons = tx_cons;
 	int i, j, frags;
 
+	if (!budget)
+		return;
+
 	for (i = 0; i < nr_pkts; i++) {
 		tx_buf = &txr->tx_buf_ring[tx_cons];
 
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index ea430d6961df..3ab47ae2f26d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -16,7 +16,8 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
 				   struct bnxt_tx_ring_info *txr,
 				   dma_addr_t mapping, u32 len,
 				   struct xdp_buff *xdp);
-void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
+void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts,
+		     int budget);
 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
 		 struct xdp_buff xdp, struct page *page, u8 **data_ptr,
 		 unsigned int *len, u8 *event);
-- 
2.41.0


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH net] bnxt: don't handle XDP in netpoll
  2023-07-27 17:05 [PATCH net] bnxt: don't handle XDP in netpoll Jakub Kicinski
@ 2023-07-27 18:04 ` Andy Gospodarek
  2023-07-27 18:26 ` Jakub Kicinski
  2023-07-27 18:52 ` Michael Chan
  2 siblings, 0 replies; 8+ messages in thread
From: Andy Gospodarek @ 2023-07-27 18:04 UTC (permalink / raw)
  To: Jakub Kicinski; +Cc: davem, netdev, edumazet, pabeni, michael.chan

On Thu, Jul 27, 2023 at 10:05:05AM -0700, Jakub Kicinski wrote:
> Similarly to other recently fixed drivers make sure we don't
> try to access XDP or page pool APIs when NAPI budget is 0.
> NAPI budget of 0 may mean that we are in netpoll.
> 
> This may result in running software IRQs in hard IRQ context,
> leading to deadlocks or crashes.
> 
> Fixes: 322b87ca55f2 ("bnxt_en: add page_pool support")
> Signed-off-by: Jakub Kicinski <kuba@kernel.org>

Reviewed-by: Andy Gospodarek <gospo@broadcom.com>

> ---
> CC: michael.chan@broadcom.com
> CC: gospo@broadcom.com
> 
> Side note - having to plumb the "budget" everywhere really makes
> me wonder if we shouldn't have had those APIs accept a pointer
> to napi_struct instead :S

We could also consider adding budget to struct bnxt_napi.  I'm not sure that
would work in all cases, however.

I'm good if this goes in as-is and we make enhancements from this as a starting
point.

> ---
>  drivers/net/ethernet/broadcom/bnxt/bnxt.c     | 19 +++++++++++--------
>  drivers/net/ethernet/broadcom/bnxt/bnxt.h     |  2 +-
>  drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c |  6 +++++-
>  drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h |  3 ++-
>  4 files changed, 19 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> index a3bbd13c070f..fe1d645c39d0 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> @@ -687,7 +687,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
>  	return NETDEV_TX_OK;
>  }
>  
> -static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
> +static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts,
> +			int budget)
>  {
>  	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
>  	struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
> @@ -2595,10 +2596,11 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
>  	return rx_pkts;
>  }
>  
> -static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
> +static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
> +				  int budget)
>  {
>  	if (bnapi->tx_pkts && !bnapi->tx_fault) {
> -		bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
> +		bnapi->tx_int(bp, bnapi, bnapi->tx_pkts, budget);
>  		bnapi->tx_pkts = 0;
>  	}
>  
> @@ -2629,7 +2631,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
>  	 */
>  	bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
>  
> -	__bnxt_poll_work_done(bp, bnapi);
> +	__bnxt_poll_work_done(bp, bnapi, budget);
>  	return rx_pkts;
>  }
>  
> @@ -2760,7 +2762,7 @@ static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
>  }
>  
>  static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
> -				 u64 dbr_type)
> +				 u64 dbr_type, int budget)
>  {
>  	struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
>  	int i;
> @@ -2776,7 +2778,7 @@ static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
>  			cpr2->had_work_done = 0;
>  		}
>  	}
> -	__bnxt_poll_work_done(bp, bnapi);
> +	__bnxt_poll_work_done(bp, bnapi, budget);
>  }
>  
>  static int bnxt_poll_p5(struct napi_struct *napi, int budget)
> @@ -2806,7 +2808,8 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
>  			if (cpr->has_more_work)
>  				break;
>  
> -			__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL);
> +			__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
> +					     budget);
>  			cpr->cp_raw_cons = raw_cons;
>  			if (napi_complete_done(napi, work_done))
>  				BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
> @@ -2836,7 +2839,7 @@ static int bnxt_poll_p5(struct napi_struct *napi, int budget)
>  		}
>  		raw_cons = NEXT_RAW_CMP(raw_cons);
>  	}
> -	__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ);
> +	__bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, budget);
>  	if (raw_cons != cpr->cp_raw_cons) {
>  		cpr->cp_raw_cons = raw_cons;
>  		BNXT_DB_NQ_P5(&cpr->cp_db, raw_cons);
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
> index 9d16757e27fe..bd44a5701e5f 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
> @@ -1005,7 +1005,7 @@ struct bnxt_napi {
>  	struct bnxt_tx_ring_info	*tx_ring;
>  
>  	void			(*tx_int)(struct bnxt *, struct bnxt_napi *,
> -					  int);
> +					  int tx_pkts, int budget);
>  	int			tx_pkts;
>  	u8			events;
>  	u8			tx_fault:1;
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
> index 5b6fbdc4dc40..33b7eddfbf41 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
> @@ -125,7 +125,8 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
>  	dma_unmap_len_set(tx_buf, len, 0);
>  }
>  
> -void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
> +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts,
> +		     int budget)
>  {
>  	struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
>  	struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
> @@ -135,6 +136,9 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
>  	u16 last_tx_cons = tx_cons;
>  	int i, j, frags;
>  
> +	if (!budget)
> +		return;
> +
>  	for (i = 0; i < nr_pkts; i++) {
>  		tx_buf = &txr->tx_buf_ring[tx_cons];
>  
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
> index ea430d6961df..3ab47ae2f26d 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
> @@ -16,7 +16,8 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
>  				   struct bnxt_tx_ring_info *txr,
>  				   dma_addr_t mapping, u32 len,
>  				   struct xdp_buff *xdp);
> -void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
> +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts,
> +		     int budget);
>  bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
>  		 struct xdp_buff xdp, struct page *page, u8 **data_ptr,
>  		 unsigned int *len, u8 *event);
> -- 
> 2.41.0
> 

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH net] bnxt: don't handle XDP in netpoll
  2023-07-27 17:05 [PATCH net] bnxt: don't handle XDP in netpoll Jakub Kicinski
  2023-07-27 18:04 ` Andy Gospodarek
@ 2023-07-27 18:26 ` Jakub Kicinski
  2023-07-27 18:52 ` Michael Chan
  2 siblings, 0 replies; 8+ messages in thread
From: Jakub Kicinski @ 2023-07-27 18:26 UTC (permalink / raw)
  To: davem; +Cc: netdev, edumazet, pabeni, michael.chan, gospo

On Thu, 27 Jul 2023 10:05:05 -0700 Jakub Kicinski wrote:
>  	if (bnapi->tx_pkts && !bnapi->tx_fault) {
> -		bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
> +		bnapi->tx_int(bp, bnapi, bnapi->tx_pkts, budget);

I forgot the tx_fault detection went into net-next and not net :(
I'll have to rebase and deal with the conflict.
-- 
pw-bot: cr

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH net] bnxt: don't handle XDP in netpoll
  2023-07-27 17:05 [PATCH net] bnxt: don't handle XDP in netpoll Jakub Kicinski
  2023-07-27 18:04 ` Andy Gospodarek
  2023-07-27 18:26 ` Jakub Kicinski
@ 2023-07-27 18:52 ` Michael Chan
  2023-07-27 19:05   ` Jakub Kicinski
  2 siblings, 1 reply; 8+ messages in thread
From: Michael Chan @ 2023-07-27 18:52 UTC (permalink / raw)
  To: Jakub Kicinski; +Cc: davem, netdev, edumazet, pabeni, gospo

[-- Attachment #1: Type: text/plain, Size: 2145 bytes --]

On Thu, Jul 27, 2023 at 10:05 AM Jakub Kicinski <kuba@kernel.org> wrote:
>
> Similarly to other recently fixed drivers make sure we don't
> try to access XDP or page pool APIs when NAPI budget is 0.
> NAPI budget of 0 may mean that we are in netpoll.
>
> This may result in running software IRQs in hard IRQ context,
> leading to deadlocks or crashes.
>
> Fixes: 322b87ca55f2 ("bnxt_en: add page_pool support")
> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
> ---
> CC: michael.chan@broadcom.com
> CC: gospo@broadcom.com
>
> Side note - having to plumb the "budget" everywhere really makes
> me wonder if we shouldn't have had those APIs accept a pointer
> to napi_struct instead :S
> ---
>  drivers/net/ethernet/broadcom/bnxt/bnxt.c     | 19 +++++++++++--------
>  drivers/net/ethernet/broadcom/bnxt/bnxt.h     |  2 +-
>  drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c |  6 +++++-
>  drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h |  3 ++-
>  4 files changed, 19 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
> index 5b6fbdc4dc40..33b7eddfbf41 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
> @@ -125,7 +125,8 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
>         dma_unmap_len_set(tx_buf, len, 0);
>  }
>
> -void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
> +void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts,
> +                    int budget)
>  {
>         struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
>         struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
> @@ -135,6 +136,9 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
>         u16 last_tx_cons = tx_cons;
>         int i, j, frags;
>
> +       if (!budget)
> +               return;
> +

These TX packet completions have already been counted in
__bnxt_poll_work().  If we do nothing here, I think the TX ring will
forever be out-of-sync with the completion ring.

[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH net] bnxt: don't handle XDP in netpoll
  2023-07-27 18:52 ` Michael Chan
@ 2023-07-27 19:05   ` Jakub Kicinski
  2023-07-27 19:29     ` Michael Chan
  0 siblings, 1 reply; 8+ messages in thread
From: Jakub Kicinski @ 2023-07-27 19:05 UTC (permalink / raw)
  To: Michael Chan; +Cc: davem, netdev, edumazet, pabeni, gospo

On Thu, 27 Jul 2023 11:52:10 -0700 Michael Chan wrote:
> These TX packet completions have already been counted in
> __bnxt_poll_work().  If we do nothing here, I think the TX ring will
> forever be out-of-sync with the completion ring.

I see...

Do you prefer adding a return value to tx_int() to tell
__bnxt_poll_work_done() whether the work has been done;
or to clear tx_pkts in the handler itself rather than
the caller?


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH net] bnxt: don't handle XDP in netpoll
  2023-07-27 19:05   ` Jakub Kicinski
@ 2023-07-27 19:29     ` Michael Chan
  2023-07-27 19:40       ` Michael Chan
  2023-07-27 19:42       ` Jakub Kicinski
  0 siblings, 2 replies; 8+ messages in thread
From: Michael Chan @ 2023-07-27 19:29 UTC (permalink / raw)
  To: Jakub Kicinski; +Cc: davem, netdev, edumazet, pabeni, gospo

[-- Attachment #1: Type: text/plain, Size: 964 bytes --]

On Thu, Jul 27, 2023 at 12:05 PM Jakub Kicinski <kuba@kernel.org> wrote:
>
> On Thu, 27 Jul 2023 11:52:10 -0700 Michael Chan wrote:
> > These TX packet completions have already been counted in
> > __bnxt_poll_work().  If we do nothing here, I think the TX ring will
> > forever be out-of-sync with the completion ring.
>
> I see...
>
> Do you prefer adding a return value to tx_int() to tell
> __bnxt_poll_work_done() whether the work has been done;
> or to clear tx_pkts in the handler itself rather than
> the caller?
>

It's a bigger problem.  When we transmit packets, we store these
packet buffers and advance the producer index.  The completion ring
tells us how many TX packets have completed.  We then walk the TX ring
for the number of TX packets completed and free the buffers.  If we
cannot free the buffers now, we have to save this information (the
consumer index).  We won't get this information again in the
completion ring.

[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH net] bnxt: don't handle XDP in netpoll
  2023-07-27 19:29     ` Michael Chan
@ 2023-07-27 19:40       ` Michael Chan
  2023-07-27 19:42       ` Jakub Kicinski
  1 sibling, 0 replies; 8+ messages in thread
From: Michael Chan @ 2023-07-27 19:40 UTC (permalink / raw)
  To: Jakub Kicinski; +Cc: davem, netdev, edumazet, pabeni, gospo

[-- Attachment #1: Type: text/plain, Size: 1042 bytes --]

On Thu, Jul 27, 2023 at 12:29 PM Michael Chan <michael.chan@broadcom.com> wrote:
>
> On Thu, Jul 27, 2023 at 12:05 PM Jakub Kicinski <kuba@kernel.org> wrote:
> >
> > I see...
> >
> > Do you prefer adding a return value to tx_int() to tell
> > __bnxt_poll_work_done() whether the work has been done;
> > or to clear tx_pkts in the handler itself rather than
> > the caller?
> >
>
> It's a bigger problem.  When we transmit packets, we store these
> packet buffers and advance the producer index.  The completion ring
> tells us how many TX packets have completed.  We then walk the TX ring
> for the number of TX packets completed and free the buffers.  If we
> cannot free the buffers now, we have to save this information (the
> consumer index).  We won't get this information again in the
> completion ring.

I reread your comments again.  Yes, if we don't clear bnapi->tx_pkts
if we do nothing, it should work.  Since we always free all the
packets or none, it might be easier to clear it in the tx_int handler.

[-- Attachment #2: S/MIME Cryptographic Signature --]
[-- Type: application/pkcs7-signature, Size: 4209 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH net] bnxt: don't handle XDP in netpoll
  2023-07-27 19:29     ` Michael Chan
  2023-07-27 19:40       ` Michael Chan
@ 2023-07-27 19:42       ` Jakub Kicinski
  1 sibling, 0 replies; 8+ messages in thread
From: Jakub Kicinski @ 2023-07-27 19:42 UTC (permalink / raw)
  To: Michael Chan; +Cc: davem, netdev, edumazet, pabeni, gospo

On Thu, 27 Jul 2023 12:29:24 -0700 Michael Chan wrote:
> > Do you prefer adding a return value to tx_int() to tell
> > __bnxt_poll_work_done() whether the work has been done;
> > or to clear tx_pkts in the handler itself rather than
> > the caller?
> 
> It's a bigger problem.  When we transmit packets, we store these
> packet buffers and advance the producer index.  The completion ring
> tells us how many TX packets have completed.  We then walk the TX ring
> for the number of TX packets completed and free the buffers.  If we
> cannot free the buffers now, we have to save this information (the
> consumer index).  We won't get this information again in the
> completion ring.

It's already saved in bnapi->tx_pkts, isn't it?

This makes me wonder if the bug we were seeing with unexpected
completions isn't tx_pkts being stale. Because it's not getting
wiped on reset.

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2023-07-27 19:42 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-07-27 17:05 [PATCH net] bnxt: don't handle XDP in netpoll Jakub Kicinski
2023-07-27 18:04 ` Andy Gospodarek
2023-07-27 18:26 ` Jakub Kicinski
2023-07-27 18:52 ` Michael Chan
2023-07-27 19:05   ` Jakub Kicinski
2023-07-27 19:29     ` Michael Chan
2023-07-27 19:40       ` Michael Chan
2023-07-27 19:42       ` Jakub Kicinski

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).