netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
To: Michael Chan <michael.chan@broadcom.com>
Cc: davem@davemloft.net, netdev@vger.kernel.org, edumazet@google.com,
	kuba@kernel.org, pabeni@redhat.com, andrew+netdev@lunn.ch,
	pavan.chebbi@broadcom.com, andrew.gospodarek@broadcom.com,
	somnath.kotur@broadcom.com,
	Ajit Khaparde <ajit.khaparde@broadcom.com>,
	David Wei <dw@davidwei.uk>
Subject: Re: [PATCH net-next 09/10] bnxt_en: Extend queue stop/start for Tx rings
Date: Mon, 13 Jan 2025 09:40:01 +0100	[thread overview]
Message-ID: <Z4TRYfno5jCz84KD@mev-dev.igk.intel.com> (raw)
In-Reply-To: <20250113063927.4017173-10-michael.chan@broadcom.com>

On Sun, Jan 12, 2025 at 10:39:26PM -0800, Michael Chan wrote:
> From: Somnath Kotur <somnath.kotur@broadcom.com>
> 
> In order to use queue_stop/queue_start to support the new Steering
> Tags, we need to free the TX ring and TX completion ring if it is a
> combined channel with TX/RX sharing the same NAPI.  Otherwise
> TX completions will not have the updated Steering Tag.  With that
> we can now add napi_disable() and napi_enable() during queue_stop()/
> queue_start().  This will guarantee that NAPI will stop processing
> the completion entries in case there are additional pending entries
> in the completion rings after queue_stop().
> 
> There could be some NQEs sitting unprocessed while NAPI is disabled
> thereby leaving the NQ unarmed.  Explictily Re-arm the NQ after
> napi_enable() in queue start so that NAPI will resume properly.
> 
> Reviewed-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
> Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
> Reviewed-by: Michael Chan <michael.chan@broadcom.com>
> ---
> Cc: David Wei <dw@davidwei.uk>
> 
> Discussion about adding napi_disable()/napi_enable():
> 
> https://lore.kernel.org/netdev/5336d624-8d8b-40a6-b732-b020e4a119a2@davidwei.uk/#t
> ---
>  drivers/net/ethernet/broadcom/bnxt/bnxt.c | 99 ++++++++++++++++++++++-
>  1 file changed, 98 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> index fe350d0ba99c..eddb4de959c6 100644
> --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
> @@ -7341,6 +7341,22 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
>  	return 0;
>  }
>  
> +static void bnxt_hwrm_tx_ring_free(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
> +				   bool close_path)
> +{
> +	struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
> +	u32 cmpl_ring_id;
> +
> +	if (ring->fw_ring_id == INVALID_HW_RING_ID)
> +		return;
> +
> +	cmpl_ring_id = close_path ? bnxt_cp_ring_for_tx(bp, txr) :
> +		       INVALID_HW_RING_ID;
> +	hwrm_ring_free_send_msg(bp, ring, RING_FREE_REQ_RING_TYPE_TX,
> +				cmpl_ring_id);
> +	ring->fw_ring_id = INVALID_HW_RING_ID;
> +}
> +
>  static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
>  				   struct bnxt_rx_ring_info *rxr,
>  				   bool close_path)
> @@ -11247,6 +11263,69 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
>  	return 0;
>  }
>  
> +static void bnxt_tx_queue_stop(struct bnxt *bp, int idx)
> +{
> +	struct bnxt_tx_ring_info *txr;
> +	struct netdev_queue *txq;
> +	struct bnxt_napi *bnapi;
> +	int i;
> +
> +	bnapi = bp->bnapi[idx];
> +	bnxt_for_each_napi_tx(i, bnapi, txr) {
> +		WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
> +		synchronize_net();
> +
> +		if (!(bnapi->flags & BNXT_NAPI_FLAG_XDP)) {
> +			txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
> +			if (txq) {
> +				__netif_tx_lock_bh(txq);
> +				netif_tx_stop_queue(txq);
> +				__netif_tx_unlock_bh(txq);
> +			}
> +		}
> +		bnxt_hwrm_tx_ring_free(bp, txr, true);
> +		bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
> +		bnxt_free_one_tx_ring_skbs(bp, txr, txr->txq_index);
> +		bnxt_clear_one_cp_ring(bp, txr->tx_cpr);
> +	}
> +}
> +
> +static int bnxt_tx_queue_start(struct bnxt *bp, int idx)
> +{
> +	struct bnxt_tx_ring_info *txr;
> +	struct netdev_queue *txq;
> +	struct bnxt_napi *bnapi;
> +	int rc, i;
> +
> +	bnapi = bp->bnapi[idx];
> +	bnxt_for_each_napi_tx(i, bnapi, txr) {
> +		rc = bnxt_hwrm_cp_ring_alloc_p5(bp, txr->tx_cpr);
> +		if (rc)
> +			return rc;
> +
> +		rc = bnxt_hwrm_tx_ring_alloc(bp, txr, false);
> +		if (rc) {
> +			bnxt_hwrm_cp_ring_free(bp, txr->tx_cpr);
What about ring allocated in previous steps? Don't you need to free them
too?

> +			return rc;
> +		}
> +		txr->tx_prod = 0;
> +		txr->tx_cons = 0;
> +		txr->tx_hw_cons = 0;
> +
> +		WRITE_ONCE(txr->dev_state, 0);
> +		synchronize_net();
> +
> +		if (bnapi->flags & BNXT_NAPI_FLAG_XDP)
> +			continue;
> +
> +		txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
> +		if (txq)
> +			netif_tx_start_queue(txq);
> +	}
> +
> +	return 0;
> +}
> +
>  static void bnxt_free_irq(struct bnxt *bp)
>  {
>  	struct bnxt_irq *irq;
> @@ -15647,6 +15726,16 @@ static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
>  	cpr = &rxr->bnapi->cp_ring;
>  	cpr->sw_stats->rx.rx_resets++;
>  
> +	if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
> +		rc = bnxt_tx_queue_start(bp, idx);
> +		if (rc)
> +			netdev_warn(bp->dev,
> +				    "tx queue restart failed: rc=%d\n", rc);
> +	}
> +
> +	napi_enable(&rxr->bnapi->napi);
> +	bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
> +
>  	for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
>  		vnic = &bp->vnic_info[i];
>  
> @@ -15675,6 +15764,7 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
>  	struct bnxt *bp = netdev_priv(dev);
>  	struct bnxt_rx_ring_info *rxr;
>  	struct bnxt_vnic_info *vnic;
> +	struct bnxt_napi *bnapi;
>  	int i;
>  
>  	for (i = 0; i <= BNXT_VNIC_NTUPLE; i++) {
> @@ -15686,15 +15776,22 @@ static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
>  	/* Make sure NAPI sees that the VNIC is disabled */
>  	synchronize_net();
>  	rxr = &bp->rx_ring[idx];
> -	cancel_work_sync(&rxr->bnapi->cp_ring.dim.work);
> +	bnapi = rxr->bnapi;
> +	cancel_work_sync(&bnapi->cp_ring.dim.work);
>  	bnxt_hwrm_rx_ring_free(bp, rxr, false);
>  	bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
>  	page_pool_disable_direct_recycling(rxr->page_pool);
>  	if (bnxt_separate_head_pool())
>  		page_pool_disable_direct_recycling(rxr->head_pool);
>  
> +	if (bp->flags & BNXT_FLAG_SHARED_RINGS)
> +		bnxt_tx_queue_stop(bp, idx);
> +
> +	napi_disable(&bnapi->napi);
> +
>  	bnxt_hwrm_cp_ring_free(bp, rxr->rx_cpr);
>  	bnxt_clear_one_cp_ring(bp, rxr->rx_cpr);
> +	bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
>  
>  	memcpy(qmem, rxr, sizeof(*rxr));
>  	bnxt_init_rx_ring_struct(bp, qmem);
> -- 
> 2.30.1

  reply	other threads:[~2025-01-13  8:43 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-01-13  6:39 [PATCH net-next 00/10] bnxt_en: Add NPAR 1.2 and TPH support Michael Chan
2025-01-13  6:39 ` [PATCH net-next 01/10] bnxt_en: Set NAPR 1.2 support when registering with firmware Michael Chan
2025-01-13  7:56   ` Michal Swiatkowski
2025-01-13  6:39 ` [PATCH net-next 02/10] bnxt_en Refactor completion ring allocation logic for P5_PLUS chips Michael Chan
2025-01-13  7:59   ` Michal Swiatkowski
2025-01-13  6:39 ` [PATCH net-next 03/10] bnxt_en: Refactor TX ring allocation logic Michael Chan
2025-01-13  8:02   ` Michal Swiatkowski
2025-01-13  6:39 ` [PATCH net-next 04/10] bnxt_en: Refactor completion ring free routine Michael Chan
2025-01-13  8:06   ` Michal Swiatkowski
2025-01-13  6:39 ` [PATCH net-next 05/10] bnxt_en: Refactor bnxt_free_tx_rings() to free per Tx ring Michael Chan
2025-01-13  8:17   ` Michal Swiatkowski
2025-01-13  6:39 ` [PATCH net-next 06/10] bnxt_en: Refactor RX/RX AGG ring parameters setup for P5_PLUS Michael Chan
2025-01-13  8:27   ` Michal Swiatkowski
2025-01-13  6:39 ` [PATCH net-next 07/10] bnxt_en: Pass NQ ID to the FW when allocating RX/RX AGG rings Michael Chan
2025-01-13  8:29   ` Michal Swiatkowski
2025-01-13  6:39 ` [PATCH net-next 08/10] bnxt_en: Reallocate Rx completion ring for TPH support Michael Chan
2025-01-13  8:35   ` Michal Swiatkowski
2025-01-14 21:42     ` Michael Chan
2025-01-15  5:58       ` Michal Swiatkowski
2025-01-13  6:39 ` [PATCH net-next 09/10] bnxt_en: Extend queue stop/start for Tx rings Michael Chan
2025-01-13  8:40   ` Michal Swiatkowski [this message]
2025-01-14  2:05     ` Somnath Kotur
2025-01-15  1:29     ` Michael Chan
2025-01-13 16:01   ` Bjorn Helgaas
2025-01-14  4:57     ` Somnath Kotur
2025-01-14  8:48   ` kernel test robot
2025-01-14 11:23   ` kernel test robot
2025-01-13  6:39 ` [PATCH net-next 10/10] bnxt_en: Add TPH support in BNXT driver Michael Chan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Z4TRYfno5jCz84KD@mev-dev.igk.intel.com \
    --to=michal.swiatkowski@linux.intel.com \
    --cc=ajit.khaparde@broadcom.com \
    --cc=andrew+netdev@lunn.ch \
    --cc=andrew.gospodarek@broadcom.com \
    --cc=davem@davemloft.net \
    --cc=dw@davidwei.uk \
    --cc=edumazet@google.com \
    --cc=kuba@kernel.org \
    --cc=michael.chan@broadcom.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=pavan.chebbi@broadcom.com \
    --cc=somnath.kotur@broadcom.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).