netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jakub Kicinski <kuba@kernel.org>
To: davem@davemloft.net
Cc: netdev@vger.kernel.org, edumazet@google.com, pabeni@redhat.com,
	ecree.xilinx@gmail.com, michael.chan@broadcom.com,
	horms@kernel.org, pavan.chebbi@broadcom.com,
	przemyslaw.kitszel@intel.com, Jakub Kicinski <kuba@kernel.org>
Subject: [PATCH net-next 08/11] eth: bnxt: use the RSS context XArray instead of the local list
Date: Thu, 11 Jul 2024 15:07:10 -0700	[thread overview]
Message-ID: <20240711220713.283778-9-kuba@kernel.org> (raw)
In-Reply-To: <20240711220713.283778-1-kuba@kernel.org>

Core already maintains all RSS contexts in an XArray, no need
to keep a second list in the driver.

Remove bnxt_get_max_rss_ctx_ring() completely since core performs
the same check already.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
---
v2:
 - remove bnxt_get_max_rss_ctx_ring()
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c     | 56 +++++++------------
 drivers/net/ethernet/broadcom/bnxt/bnxt.h     |  3 -
 .../net/ethernet/broadcom/bnxt/bnxt_ethtool.c | 17 ++----
 3 files changed, 26 insertions(+), 50 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e3cc34712f33..f9554f512314 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -5970,17 +5970,20 @@ bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
 			  struct hwrm_cfa_ntuple_filter_alloc_input *req,
 			  struct bnxt_ntuple_filter *fltr)
 {
-	struct bnxt_rss_ctx *rss_ctx, *tmp;
 	u16 rxq = fltr->base.rxq;
 
 	if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
-		list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
-			if (rss_ctx->index == fltr->base.fw_vnic_id) {
-				struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+		struct ethtool_rxfh_context *ctx;
+		struct bnxt_rss_ctx *rss_ctx;
+		struct bnxt_vnic_info *vnic;
 
-				req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
-				break;
-			}
+		ctx = xa_load(&bp->dev->ethtool->rss_ctx,
+			      fltr->base.fw_vnic_id);
+		if (ctx) {
+			rss_ctx = ethtool_rxfh_context_priv(ctx);
+			vnic = &rss_ctx->vnic;
+
+			req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
 		}
 		return;
 	}
@@ -6282,21 +6285,6 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
 	return max_ring;
 }
 
-u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp)
-{
-	u16 i, tbl_size, max_ring = 0;
-	struct bnxt_rss_ctx *rss_ctx;
-
-	tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
-
-	list_for_each_entry(rss_ctx, &bp->rss_ctx_list, list) {
-		for (i = 0; i < tbl_size; i++)
-			max_ring = max(max_ring, rss_ctx->rss_indir_tbl[i]);
-	}
-
-	return max_ring;
-}
-
 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
 {
 	if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
@@ -10237,16 +10225,17 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
 				  vnic->rss_table,
 				  vnic->rss_table_dma_addr);
 	kfree(rss_ctx->rss_indir_tbl);
-	list_del(&rss_ctx->list);
 	bp->num_rss_ctx--;
 }
 
 static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
 {
 	bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
-	struct bnxt_rss_ctx *rss_ctx, *tmp;
+	struct ethtool_rxfh_context *ctx;
+	unsigned long context;
 
-	list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) {
+	xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+		struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
 		struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
 
 		if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
@@ -10262,16 +10251,14 @@ static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
 
 void bnxt_clear_rss_ctxs(struct bnxt *bp)
 {
-	struct bnxt_rss_ctx *rss_ctx, *tmp;
+	struct ethtool_rxfh_context *ctx;
+	unsigned long context;
+
+	xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+		struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
 
-	list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
 		bnxt_del_one_rss_ctx(bp, rss_ctx, false);
-}
-
-static void bnxt_init_multi_rss_ctx(struct bnxt *bp)
-{
-	INIT_LIST_HEAD(&bp->rss_ctx_list);
-	bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
+	}
 }
 
 /* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
@@ -15859,8 +15846,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 	INIT_LIST_HEAD(&bp->usr_fltr_list);
 
 	if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
-		bnxt_init_multi_rss_ctx(bp);
-
+		bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
 
 	rc = register_netdev(dev);
 	if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index c5fd7a4e6681..be40e0513777 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1291,7 +1291,6 @@ struct bnxt_vnic_info {
 };
 
 struct bnxt_rss_ctx {
-	struct list_head list;
 	struct bnxt_vnic_info vnic;
 	u16	*rss_indir_tbl;
 	u8	index;
@@ -2330,7 +2329,6 @@ struct bnxt {
 	/* grp_info indexed by completion ring index */
 	struct bnxt_ring_grp_info	*grp_info;
 	struct bnxt_vnic_info	*vnic_info;
-	struct list_head	rss_ctx_list;
 	u32			num_rss_ctx;
 	int			nr_vnics;
 	u16			*rss_indir_tbl;
@@ -2812,7 +2810,6 @@ int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
 void bnxt_fill_ipv6_mask(__be32 mask[4]);
 int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
 void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx);
-u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp);
 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index de8e13412151..74765583405b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -961,12 +961,6 @@ static int bnxt_set_channels(struct net_device *dev,
 		return rc;
 	}
 
-	if (req_rx_rings < bp->rx_nr_rings &&
-	    req_rx_rings <= bnxt_get_max_rss_ctx_ring(bp)) {
-		netdev_warn(dev, "Can't deactivate rings used by RSS contexts\n");
-		return -EINVAL;
-	}
-
 	if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
 	    bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
 	    netif_is_rxfh_configured(dev)) {
@@ -1216,12 +1210,12 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
 static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp,
 							u32 index)
 {
-	struct bnxt_rss_ctx *rss_ctx, *tmp;
+	struct ethtool_rxfh_context *ctx;
 
-	list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list)
-		if (rss_ctx->index == index)
-			return rss_ctx;
-	return NULL;
+	ctx = xa_load(&bp->dev->ethtool->rss_ctx, index);
+	if (!ctx)
+		return NULL;
+	return ethtool_rxfh_context_priv(ctx);
 }
 
 static int bnxt_alloc_rss_ctx_rss_table(struct bnxt *bp,
@@ -1909,7 +1903,6 @@ static int bnxt_create_rxfh_context(struct net_device *dev,
 
 	rss_ctx = ethtool_rxfh_context_priv(ctx);
 
-	list_add_tail(&rss_ctx->list, &bp->rss_ctx_list);
 	bp->num_rss_ctx++;
 
 	vnic = &rss_ctx->vnic;
-- 
2.45.2


  parent reply	other threads:[~2024-07-11 22:07 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-07-11 22:07 [PATCH net-next 00/11] eth: bnxt: use the new RSS API Jakub Kicinski
2024-07-11 22:07 ` [PATCH net-next 01/11] net: ethtool: let drivers remove lost RSS contexts Jakub Kicinski
2024-07-11 22:07 ` [PATCH net-next 02/11] net: ethtool: let drivers declare max size of RSS indir table and key Jakub Kicinski
2024-07-11 22:07 ` [PATCH net-next 03/11] eth: bnxt: allow deleting RSS contexts when the device is down Jakub Kicinski
2024-07-11 22:07 ` [PATCH net-next 04/11] eth: bnxt: move from .set_rxfh to .create_rxfh_context and friends Jakub Kicinski
2024-07-11 22:07 ` [PATCH net-next 05/11] eth: bnxt: remove rss_ctx_bmap Jakub Kicinski
2024-07-11 22:07 ` [PATCH net-next 06/11] eth: bnxt: depend on core cleaning up RSS contexts Jakub Kicinski
2024-07-11 22:07 ` [PATCH net-next 07/11] eth: bnxt: use context priv for struct bnxt_rss_ctx Jakub Kicinski
2024-07-11 22:07 ` Jakub Kicinski [this message]
2024-07-11 22:07 ` [PATCH net-next 09/11] eth: bnxt: pad out the correct indirection table Jakub Kicinski
2024-07-11 22:07 ` [PATCH net-next 10/11] eth: bnxt: bump the entry size in indir tables to u32 Jakub Kicinski
2024-07-11 22:07 ` [PATCH net-next 11/11] eth: bnxt: use the indir table from ethtool context Jakub Kicinski
2024-07-12  5:08 ` [PATCH net-next 00/11] eth: bnxt: use the new RSS API Pavan Chebbi
2024-07-13  5:30 ` patchwork-bot+netdevbpf
  -- strict thread matches above, loose matches on Subject: below --
2024-07-02 23:47 Jakub Kicinski
2024-07-02 23:47 ` [PATCH net-next 08/11] eth: bnxt: use the RSS context XArray instead of the local list Jakub Kicinski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240711220713.283778-9-kuba@kernel.org \
    --to=kuba@kernel.org \
    --cc=davem@davemloft.net \
    --cc=ecree.xilinx@gmail.com \
    --cc=edumazet@google.com \
    --cc=horms@kernel.org \
    --cc=michael.chan@broadcom.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=pavan.chebbi@broadcom.com \
    --cc=przemyslaw.kitszel@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).