Intel-Wired-Lan Archive on lore.kernel.org
 help / color / mirror / Atom feed
From: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
To: intel-wired-lan@lists.osuosl.org
Cc: netdev@vger.kernel.org, sridhar.samudrala@intel.com
Subject: [Intel-wired-lan] [iwl-next v1 3/3] ice: allow changing SF VSI queues number
Date: Thu, 31 Oct 2024 07:00:09 +0100	[thread overview]
Message-ID: <20241031060009.38979-4-michal.swiatkowski@linux.intel.com> (raw)
In-Reply-To: <20241031060009.38979-1-michal.swiatkowski@linux.intel.com>

Move setting number of Rx and Tx queues to the separate functions and
use it in SF case.

Adjust getting max Rx and Tx queues for SF usecase.

Reviewed-by: Sridhar Samudrala <sridhar.samudrala@intel.com>
Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
---
 drivers/net/ethernet/intel/ice/ice_ethtool.c | 37 +++++++-----
 drivers/net/ethernet/intel/ice/ice_lib.c     | 63 ++++++++++++--------
 2 files changed, 60 insertions(+), 40 deletions(-)

diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 9e2f20ed55d5..c68f7796b83e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -3786,22 +3786,31 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
 
 /**
  * ice_get_max_txq - return the maximum number of Tx queues for in a PF
- * @pf: PF structure
+ * @vsi: VSI structure
  */
-static int ice_get_max_txq(struct ice_pf *pf)
+static int ice_get_max_txq(struct ice_vsi *vsi)
 {
-	return min3(pf->num_lan_msix, (u16)num_online_cpus(),
-		    (u16)pf->hw.func_caps.common_cap.num_txq);
+	u16 num_queues = vsi->back->num_lan_msix;
+
+	if (vsi->max_io_eqs)
+		num_queues = vsi->max_io_eqs;
+	return min3(num_queues, (u16)num_online_cpus(),
+		    (u16)vsi->back->hw.func_caps.common_cap.num_txq);
 }
 
 /**
  * ice_get_max_rxq - return the maximum number of Rx queues for in a PF
- * @pf: PF structure
+ * @vsi: VSI structure
  */
-static int ice_get_max_rxq(struct ice_pf *pf)
+static int ice_get_max_rxq(struct ice_vsi *vsi)
 {
-	return min3(pf->num_lan_msix, (u16)num_online_cpus(),
-		    (u16)pf->hw.func_caps.common_cap.num_rxq);
+	u16 num_queues = vsi->back->num_lan_msix;
+
+	if (vsi->max_io_eqs)
+		num_queues = vsi->max_io_eqs;
+
+	return min3(num_queues, (u16)num_online_cpus(),
+		    (u16)vsi->back->hw.func_caps.common_cap.num_rxq);
 }
 
 /**
@@ -3839,8 +3848,8 @@ ice_get_channels(struct net_device *dev, struct ethtool_channels *ch)
 	struct ice_pf *pf = vsi->back;
 
 	/* report maximum channels */
-	ch->max_rx = ice_get_max_rxq(pf);
-	ch->max_tx = ice_get_max_txq(pf);
+	ch->max_rx = ice_get_max_rxq(vsi);
+	ch->max_tx = ice_get_max_txq(vsi);
 	ch->max_combined = min_t(int, ch->max_rx, ch->max_tx);
 
 	/* report current channels */
@@ -3958,14 +3967,14 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
 			   vsi->tc_cfg.numtc);
 		return -EINVAL;
 	}
-	if (new_rx > ice_get_max_rxq(pf)) {
+	if (new_rx > ice_get_max_rxq(vsi)) {
 		netdev_err(dev, "Maximum allowed Rx channels is %d\n",
-			   ice_get_max_rxq(pf));
+			   ice_get_max_rxq(vsi));
 		return -EINVAL;
 	}
-	if (new_tx > ice_get_max_txq(pf)) {
+	if (new_tx > ice_get_max_txq(vsi)) {
 		netdev_err(dev, "Maximum allowed Tx channels is %d\n",
-			   ice_get_max_txq(pf));
+			   ice_get_max_txq(vsi));
 		return -EINVAL;
 	}
 
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 01220e21cc81..64a6152eaaef 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -157,6 +157,32 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
 	}
 }
 
+static void ice_vsi_set_num_txqs(struct ice_vsi *vsi, u16 def_qs)
+{
+	if (vsi->req_txq) {
+		vsi->alloc_txq = vsi->req_txq;
+		vsi->num_txq = vsi->req_txq;
+	} else {
+		vsi->alloc_txq = min_t(u16, def_qs, (u16)num_online_cpus());
+	}
+}
+
+static void ice_vsi_set_num_rxqs(struct ice_vsi *vsi, bool rss_ena, u16 def_qs)
+{
+	/* only 1 Rx queue unless RSS is enabled */
+	if (rss_ena) {
+		vsi->alloc_rxq = 1;
+		return;
+	}
+
+	if (vsi->req_rxq) {
+		vsi->alloc_rxq = vsi->req_rxq;
+		vsi->num_rxq = vsi->req_rxq;
+	} else {
+		vsi->alloc_rxq = min_t(u16, def_qs, (u16)num_online_cpus());
+	}
+}
+
 /**
  * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
  * @vsi: the VSI being configured
@@ -174,31 +200,13 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
 
 	switch (vsi_type) {
 	case ICE_VSI_PF:
-		if (vsi->req_txq) {
-			vsi->alloc_txq = vsi->req_txq;
-			vsi->num_txq = vsi->req_txq;
-		} else {
-			vsi->alloc_txq = min3(pf->num_lan_msix,
-					      ice_get_avail_txq_count(pf),
-					      (u16)num_online_cpus());
-		}
-
+		ice_vsi_set_num_txqs(vsi, min(pf->num_lan_msix,
+					      ice_get_avail_txq_count(pf)));
 		pf->num_lan_tx = vsi->alloc_txq;
 
-		/* only 1 Rx queue unless RSS is enabled */
-		if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
-			vsi->alloc_rxq = 1;
-		} else {
-			if (vsi->req_rxq) {
-				vsi->alloc_rxq = vsi->req_rxq;
-				vsi->num_rxq = vsi->req_rxq;
-			} else {
-				vsi->alloc_rxq = min3(pf->num_lan_msix,
-						      ice_get_avail_rxq_count(pf),
-						      (u16)num_online_cpus());
-			}
-		}
-
+		ice_vsi_set_num_rxqs(vsi, !test_bit(ICE_FLAG_RSS_ENA, pf->flags),
+				     min(pf->num_lan_msix,
+					 ice_get_avail_rxq_count(pf)));
 		pf->num_lan_rx = vsi->alloc_rxq;
 
 		vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
@@ -206,9 +214,12 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
 						 vsi->alloc_txq));
 		break;
 	case ICE_VSI_SF:
-		vsi->alloc_txq = 1;
-		vsi->alloc_rxq = 1;
-		vsi->num_q_vectors = 1;
+		ice_vsi_set_num_txqs(vsi, min(vsi->max_io_eqs,
+					      ice_get_avail_txq_count(pf)));
+		ice_vsi_set_num_rxqs(vsi, !test_bit(ICE_FLAG_RSS_ENA, pf->flags),
+				     min(vsi->max_io_eqs,
+					 ice_get_avail_rxq_count(pf)));
+		vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
 		vsi->irq_dyn_alloc = true;
 		break;
 	case ICE_VSI_VF:
-- 
2.42.0


      parent reply	other threads:[~2024-10-31 15:20 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-31  6:00 [Intel-wired-lan] [iwl-next v1 0/3] ice: multiqueue on subfunction Michal Swiatkowski
2024-10-31  6:00 ` [Intel-wired-lan] [iwl-next v1 1/3] ice: support max_io_eqs for subfunction Michal Swiatkowski
2024-11-06  9:53   ` Simon Horman
2024-10-31  6:00 ` [Intel-wired-lan] [iwl-next v1 2/3] ice: ethtool support for SF Michal Swiatkowski
2024-10-31  6:00 ` Michal Swiatkowski [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241031060009.38979-4-michal.swiatkowski@linux.intel.com \
    --to=michal.swiatkowski@linux.intel.com \
    --cc=intel-wired-lan@lists.osuosl.org \
    --cc=netdev@vger.kernel.org \
    --cc=sridhar.samudrala@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox