netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
To: intel-wired-lan@lists.osuosl.org
Cc: netdev@vger.kernel.org, piotr.raczynski@intel.com,
	wojciech.drewek@intel.com, marcin.szycik@intel.com,
	jacob.e.keller@intel.com, przemyslaw.kitszel@intel.com,
	jesse.brandeburg@intel.com,
	Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
Subject: [PATCH iwl-next v1 15/15] ice: reserve number of CP queues
Date: Tue, 24 Oct 2023 13:09:29 +0200	[thread overview]
Message-ID: <20231024110929.19423-16-michal.swiatkowski@linux.intel.com> (raw)
In-Reply-To: <20231024110929.19423-1-michal.swiatkowski@linux.intel.com>

Rebuilding CP VSI each time the PR is created drastically increase the
time of maximum VFs creation. Add function to reserve number of CP
queues to deal with this problem.

Use the same function to decrease number of queues in case of removing
VFs. Assume that caller of ice_eswitch_reserve_cp_queues() will also
call ice_eswitch_attach/detach() correct number of times.

Still one by one PR adding is handy for VF resetting routine.

Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
---
 drivers/net/ethernet/intel/ice/ice.h         |  6 +++
 drivers/net/ethernet/intel/ice/ice_eswitch.c | 52 +++++++++++++++++---
 drivers/net/ethernet/intel/ice/ice_eswitch.h |  4 ++
 drivers/net/ethernet/intel/ice/ice_sriov.c   |  3 ++
 4 files changed, 58 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 597bdb6945c6..cd7dcd0fa7f2 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -528,6 +528,12 @@ struct ice_eswitch {
 	struct ice_esw_br_offloads *br_offloads;
 	struct xarray reprs;
 	bool is_running;
+	/* struct to allow cp queues management optimization */
+	struct {
+		int to_reach;
+		int value;
+		bool is_reaching;
+	} qs;
 };
 
 struct ice_agg_node {
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 9ff4fe4fb133..3f80e2081e5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -176,7 +176,7 @@ static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch)
 
 		repr = xa_find(&eswitch->reprs, &repr_id, U32_MAX,
 			       XA_PRESENT);
-		if (WARN_ON(!repr))
+		if (!repr)
 			break;
 
 		repr_id += 1;
@@ -455,6 +455,8 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
 		return -ENODEV;
 
 	ctrl_vsi = pf->eswitch.control_vsi;
+	/* cp VSI is createad with 1 queue as default */
+	pf->eswitch.qs.value = 1;
 	pf->eswitch.uplink_vsi = uplink_vsi;
 
 	if (ice_eswitch_setup_env(pf))
@@ -487,6 +489,7 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
 	ice_vsi_release(ctrl_vsi);
 
 	pf->eswitch.is_running = false;
+	pf->eswitch.qs.is_reaching = false;
 }
 
 /**
@@ -615,15 +618,33 @@ static void
 ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change)
 {
 	struct ice_vsi *cp = eswitch->control_vsi;
+	int queues = 0;
+
+	if (eswitch->qs.is_reaching) {
+		if (eswitch->qs.to_reach >= eswitch->qs.value + change) {
+			queues = eswitch->qs.to_reach;
+			eswitch->qs.is_reaching = false;
+		} else {
+			queues = 0;
+		}
+	} else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) ||
+		   change < 0) {
+		queues = cp->alloc_txq + change;
+	}
 
-	ice_vsi_close(cp);
+	if (queues) {
+		cp->req_txq = queues;
+		cp->req_rxq = queues;
+		ice_vsi_close(cp);
+		ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT);
+		ice_vsi_open(cp);
+	} else if (!change) {
+		/* change == 0 means that VSI wasn't open, open it here */
+		ice_vsi_open(cp);
+	}
 
-	cp->req_txq = cp->alloc_txq + change;
-	cp->req_rxq = cp->alloc_rxq + change;
-	ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT);
+	eswitch->qs.value += change;
 	ice_eswitch_remap_rings_to_vectors(eswitch);
-
-	ice_vsi_open(cp);
 }
 
 int
@@ -641,6 +662,7 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
 		if (err)
 			return err;
 		/* Control plane VSI is created with 1 queue as default */
+		pf->eswitch.qs.to_reach -= 1;
 		change = 0;
 	}
 
@@ -732,3 +754,19 @@ int ice_eswitch_rebuild(struct ice_pf *pf)
 
 	return 0;
 }
+
+/**
+ * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues
+ * @pf: pointer to PF structure
+ * @change: how many more (or less) queues is needed
+ *
+ * Remember to call ice_eswitch_attach/detach() the "change" times.
+ */
+void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change)
+{
+	if (pf->eswitch.qs.value + change < 0)
+		return;
+
+	pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change;
+	pf->eswitch.qs.is_reaching = true;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index 59d51c0d14e5..1a288a03a79a 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -26,6 +26,7 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb,
 				struct ice_tx_offload_params *off);
 netdev_tx_t
 ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change);
 #else /* CONFIG_ICE_SWITCHDEV */
 static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
 
@@ -76,5 +77,8 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
 	return NETDEV_TX_BUSY;
 }
+
+static inline void
+ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { }
 #endif /* CONFIG_ICE_SWITCHDEV */
 #endif /* _ICE_ESWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 51f5f420d632..5a45bd5ce6ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -172,6 +172,8 @@ void ice_free_vfs(struct ice_pf *pf)
 	else
 		dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
 
+	ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf));
+
 	mutex_lock(&vfs->table_lock);
 
 	ice_for_each_vf(pf, bkt, vf) {
@@ -930,6 +932,7 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
 		goto err_unroll_sriov;
 	}
 
+	ice_eswitch_reserve_cp_queues(pf, num_vfs);
 	ret = ice_start_vfs(pf);
 	if (ret) {
 		dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
-- 
2.41.0


  parent reply	other threads:[~2023-10-24 11:35 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-10-24 11:09 [PATCH iwl-next v1 00/15] one by one port representors creation Michal Swiatkowski
2023-10-24 11:09 ` [PATCH iwl-next v1 01/15] ice: rename switchdev to eswitch Michal Swiatkowski
2023-10-24 11:09 ` [PATCH iwl-next v1 02/15] ice: remove redundant max_vsi_num variable Michal Swiatkowski
2023-10-24 11:09 ` [PATCH iwl-next v1 03/15] ice: remove unused control VSI parameter Michal Swiatkowski
2023-10-24 11:09 ` [PATCH iwl-next v1 04/15] ice: track q_id in representor Michal Swiatkowski
2023-11-09 10:19   ` [Intel-wired-lan] " Buvaneswaran, Sujai
2023-10-24 11:09 ` [PATCH iwl-next v1 05/15] ice: use repr instead of vf->repr Michal Swiatkowski
2023-10-24 11:09 ` [PATCH iwl-next v1 06/15] ice: track port representors in xarray Michal Swiatkowski
2023-11-09 10:20   ` [Intel-wired-lan] " Buvaneswaran, Sujai
2023-10-24 11:09 ` [PATCH iwl-next v1 07/15] ice: remove VF pointer reference in eswitch code Michal Swiatkowski
2023-11-09 10:21   ` [Intel-wired-lan] " Buvaneswaran, Sujai
2023-10-24 11:09 ` [PATCH iwl-next v1 08/15] ice: make representor code generic Michal Swiatkowski
2023-11-09 10:22   ` [Intel-wired-lan] " Buvaneswaran, Sujai
2023-10-24 11:09 ` [PATCH iwl-next v1 09/15] ice: return pointer to representor Michal Swiatkowski
2023-11-09 10:23   ` [Intel-wired-lan] " Buvaneswaran, Sujai
2023-11-09 10:25   ` Buvaneswaran, Sujai
2023-10-24 11:09 ` [PATCH iwl-next v1 10/15] ice: allow changing SWITCHDEV_CTRL VSI queues Michal Swiatkowski
2023-11-09 10:25   ` [Intel-wired-lan] " Buvaneswaran, Sujai
2023-10-24 11:09 ` [PATCH iwl-next v1 11/15] ice: set Tx topology every time new repr is added Michal Swiatkowski
2023-11-09 10:26   ` [Intel-wired-lan] " Buvaneswaran, Sujai
2023-10-24 11:09 ` [PATCH iwl-next v1 12/15] ice: realloc VSI stats arrays Michal Swiatkowski
2023-11-09 10:30   ` [Intel-wired-lan] " Buvaneswaran, Sujai
2023-10-24 11:09 ` [PATCH iwl-next v1 13/15] ice: add VF representors one by one Michal Swiatkowski
2023-11-09 10:31   ` [Intel-wired-lan] " Buvaneswaran, Sujai
2023-10-24 11:09 ` [PATCH iwl-next v1 14/15] ice: adjust switchdev rebuild path Michal Swiatkowski
2023-11-09 10:31   ` [Intel-wired-lan] " Buvaneswaran, Sujai
2023-10-24 11:09 ` Michal Swiatkowski [this message]
2023-11-09 10:32   ` [Intel-wired-lan] [PATCH iwl-next v1 15/15] ice: reserve number of CP queues Buvaneswaran, Sujai
2023-10-24 11:50 ` [PATCH iwl-next v1 00/15] one by one port representors creation Jiri Pirko
2023-10-24 13:10   ` Michal Swiatkowski
2023-10-24 13:59     ` Jiri Pirko

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231024110929.19423-16-michal.swiatkowski@linux.intel.com \
    --to=michal.swiatkowski@linux.intel.com \
    --cc=intel-wired-lan@lists.osuosl.org \
    --cc=jacob.e.keller@intel.com \
    --cc=jesse.brandeburg@intel.com \
    --cc=marcin.szycik@intel.com \
    --cc=netdev@vger.kernel.org \
    --cc=piotr.raczynski@intel.com \
    --cc=przemyslaw.kitszel@intel.com \
    --cc=wojciech.drewek@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).