From: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
To: intel-wired-lan@lists.osuosl.org
Cc: netdev@vger.kernel.org, pawel.chmielewski@intel.com,
sridhar.samudrala@intel.com, jacob.e.keller@intel.com,
pio.raczynski@gmail.com, konrad.knitter@intel.com,
marcin.szycik@intel.com, wojciech.drewek@intel.com,
nex.sw.ncis.nat.hpm.dev@intel.com, przemyslaw.kitszel@intel.com,
jiri@resnulli.us
Subject: [iwl-next v4 1/8] ice: devlink PF MSI-X max and min parameter
Date: Mon, 30 Sep 2024 14:03:55 +0200 [thread overview]
Message-ID: <20240930120402.3468-2-michal.swiatkowski@linux.intel.com> (raw)
In-Reply-To: <20240930120402.3468-1-michal.swiatkowski@linux.intel.com>
Use generic devlink PF MSI-X parameter to allow user to change MSI-X
range.
Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
---
.../net/ethernet/intel/ice/devlink/devlink.c | 56 ++++++++++++++++++-
drivers/net/ethernet/intel/ice/ice.h | 8 +++
drivers/net/ethernet/intel/ice/ice_irq.c | 7 +++
3 files changed, 70 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index 415445cefdb2..55538cbcf0b0 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -1518,6 +1518,32 @@ static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id,
return 0;
}
+static int
+ice_devlink_msix_max_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu16 > ICE_MAX_MSIX) {
+ NL_SET_ERR_MSG_MOD(extack, "Value is too high");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ice_devlink_msix_min_pf_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu16 <= ICE_MIN_MSIX) {
+ NL_SET_ERR_MSG_MOD(extack, "Value is too low");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
enum ice_param_id {
ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
@@ -1535,6 +1561,15 @@ static const struct devlink_param ice_dvl_rdma_params[] = {
ice_devlink_enable_iw_validate),
};
+static const struct devlink_param ice_dvl_msix_params[] = {
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_max_pf_validate),
+ DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, ice_devlink_msix_min_pf_validate),
+};
+
static const struct devlink_param ice_dvl_sched_params[] = {
DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
"tx_scheduling_layers",
@@ -1636,6 +1671,7 @@ void ice_devlink_unregister(struct ice_pf *pf)
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
+ union devlink_param_value value;
struct ice_hw *hw = &pf->hw;
int status;
@@ -1644,11 +1680,27 @@ int ice_devlink_register_params(struct ice_pf *pf)
if (status)
return status;
+ status = devl_params_register(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
+ if (status)
+ return status;
+
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
status = devl_params_register(devlink, ice_dvl_sched_params,
ARRAY_SIZE(ice_dvl_sched_params));
+ if (status)
+ return status;
- return status;
+ value.vu16 = pf->msix.max;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
+ value);
+ value.vu16 = pf->msix.min;
+ devl_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
+ value);
+
+ return 0;
}
void ice_devlink_unregister_params(struct ice_pf *pf)
@@ -1658,6 +1710,8 @@ void ice_devlink_unregister_params(struct ice_pf *pf)
devl_params_unregister(devlink, ice_dvl_rdma_params,
ARRAY_SIZE(ice_dvl_rdma_params));
+ devl_params_unregister(devlink, ice_dvl_msix_params,
+ ARRAY_SIZE(ice_dvl_msix_params));
if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
devl_params_unregister(devlink, ice_dvl_sched_params,
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index d2235e8bfea4..cf824d041d5a 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -96,6 +96,7 @@
#define ICE_MIN_LAN_TXRX_MSIX 1
#define ICE_MIN_LAN_OICR_MSIX 1
#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX)
+#define ICE_MAX_MSIX 256
#define ICE_FDIR_MSIX 2
#define ICE_RDMA_NUM_AEQ_MSIX 4
#define ICE_MIN_RDMA_MSIX 2
@@ -544,6 +545,12 @@ struct ice_agg_node {
u8 valid;
};
+struct ice_pf_msix {
+ u16 cur;
+ u16 min;
+ u16 max;
+};
+
struct ice_pf {
struct pci_dev *pdev;
struct ice_adapter *adapter;
@@ -614,6 +621,7 @@ struct ice_pf {
struct msi_map ll_ts_irq; /* LL_TS interrupt MSIX vector */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
+ struct ice_pf_msix msix;
u16 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */
diff --git a/drivers/net/ethernet/intel/ice/ice_irq.c b/drivers/net/ethernet/intel/ice/ice_irq.c
index ad82ff7d1995..0659b96b9b8c 100644
--- a/drivers/net/ethernet/intel/ice/ice_irq.c
+++ b/drivers/net/ethernet/intel/ice/ice_irq.c
@@ -254,6 +254,13 @@ int ice_init_interrupt_scheme(struct ice_pf *pf)
int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
int vectors, max_vectors;
+ /* load default PF MSI-X range */
+ if (!pf->msix.min)
+ pf->msix.min = ICE_MIN_MSIX;
+
+ if (!pf->msix.max)
+ pf->msix.max = total_vectors / 2;
+
vectors = ice_ena_msix_range(pf);
if (vectors < 0)
--
2.42.0
next prev parent reply other threads:[~2024-09-30 12:04 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-09-30 12:03 [iwl-next v4 0/8] ice: managing MSI-X in driver Michal Swiatkowski
2024-09-30 12:03 ` Michal Swiatkowski [this message]
2024-09-30 12:03 ` [iwl-next v4 2/8] ice: remove splitting MSI-X between features Michal Swiatkowski
2024-09-30 12:03 ` [iwl-next v4 3/8] ice: get rid of num_lan_msix field Michal Swiatkowski
2024-10-12 15:13 ` Simon Horman
2024-10-14 18:50 ` Jacob Keller
2024-10-14 19:04 ` David Laight
2024-10-14 22:23 ` Jacob Keller
2024-10-23 7:17 ` Michal Swiatkowski
2024-09-30 12:03 ` [iwl-next v4 4/8] ice, irdma: move interrupts code to irdma Michal Swiatkowski
2024-09-30 12:03 ` [iwl-next v4 5/8] ice: treat dyn_allowed only as suggestion Michal Swiatkowski
2024-09-30 12:04 ` [iwl-next v4 6/8] ice: enable_rdma devlink param Michal Swiatkowski
2024-09-30 12:04 ` [iwl-next v4 7/8] ice: simplify VF MSI-X managing Michal Swiatkowski
2024-09-30 12:04 ` [iwl-next v4 8/8] ice: init flow director before RDMA Michal Swiatkowski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240930120402.3468-2-michal.swiatkowski@linux.intel.com \
--to=michal.swiatkowski@linux.intel.com \
--cc=intel-wired-lan@lists.osuosl.org \
--cc=jacob.e.keller@intel.com \
--cc=jiri@resnulli.us \
--cc=konrad.knitter@intel.com \
--cc=marcin.szycik@intel.com \
--cc=netdev@vger.kernel.org \
--cc=nex.sw.ncis.nat.hpm.dev@intel.com \
--cc=pawel.chmielewski@intel.com \
--cc=pio.raczynski@gmail.com \
--cc=przemyslaw.kitszel@intel.com \
--cc=sridhar.samudrala@intel.com \
--cc=wojciech.drewek@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).