netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Simon Horman <horms@kernel.org>
To: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
Cc: intel-wired-lan@lists.osuosl.org, netdev@vger.kernel.org,
	pawel.chmielewski@intel.com, sridhar.samudrala@intel.com,
	jacob.e.keller@intel.com, pio.raczynski@gmail.com,
	konrad.knitter@intel.com, marcin.szycik@intel.com,
	wojciech.drewek@intel.com, nex.sw.ncis.nat.hpm.dev@intel.com,
	przemyslaw.kitszel@intel.com, jiri@resnulli.us,
	David Laight <David.Laight@aculab.com>
Subject: Re: [iwl-next v4 3/8] ice: get rid of num_lan_msix field
Date: Sat, 12 Oct 2024 16:13:04 +0100	[thread overview]
Message-ID: <20241012151304.GK77519@kernel.org> (raw)
In-Reply-To: <20240930120402.3468-4-michal.swiatkowski@linux.intel.com>

+ David Laight

On Mon, Sep 30, 2024 at 02:03:57PM +0200, Michal Swiatkowski wrote:
> Remove the field to allow having more queues than MSI-X on VSI. As
> default the number will be the same, but if there won't be more MSI-X
> available VSI can run with at least one MSI-X.
> 
> Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
> Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
> ---
>  drivers/net/ethernet/intel/ice/ice.h         |  1 -
>  drivers/net/ethernet/intel/ice/ice_base.c    | 10 +++-----
>  drivers/net/ethernet/intel/ice/ice_ethtool.c |  8 +++---
>  drivers/net/ethernet/intel/ice/ice_irq.c     | 11 +++------
>  drivers/net/ethernet/intel/ice/ice_lib.c     | 26 +++++++++++---------
>  5 files changed, 27 insertions(+), 29 deletions(-)
> 
> diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
> index cf824d041d5a..1e23aec2634f 100644
> --- a/drivers/net/ethernet/intel/ice/ice.h
> +++ b/drivers/net/ethernet/intel/ice/ice.h
> @@ -622,7 +622,6 @@ struct ice_pf {
>  	u16 max_pf_txqs;	/* Total Tx queues PF wide */
>  	u16 max_pf_rxqs;	/* Total Rx queues PF wide */
>  	struct ice_pf_msix msix;
> -	u16 num_lan_msix;	/* Total MSIX vectors for base driver */
>  	u16 num_lan_tx;		/* num LAN Tx queues setup */
>  	u16 num_lan_rx;		/* num LAN Rx queues setup */
>  	u16 next_vsi;		/* Next free slot in pf->vsi[] - 0-based! */

...

> diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
> index 85a3b2326e7b..e5c56ec8bbda 100644
> --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
> +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
> @@ -3811,8 +3811,8 @@ ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
>   */
>  static int ice_get_max_txq(struct ice_pf *pf)
>  {
> -	return min3(pf->num_lan_msix, (u16)num_online_cpus(),
> -		    (u16)pf->hw.func_caps.common_cap.num_txq);
> +	return min_t(u16, num_online_cpus(),
> +		     pf->hw.func_caps.common_cap.num_txq);

It is unclear why min_t() is used here or elsewhere in this patch
instead of min() as it seems that all the entities being compared
are unsigned. Are you concerned about overflowing u16? If so, perhaps
clamp, or some error handling, is a better approach.

I am concerned that the casting that min_t() brings will hide
any problems that may exist.

>  }
>  
>  /**
> @@ -3821,8 +3821,8 @@ static int ice_get_max_txq(struct ice_pf *pf)
>   */
>  static int ice_get_max_rxq(struct ice_pf *pf)
>  {
> -	return min3(pf->num_lan_msix, (u16)num_online_cpus(),
> -		    (u16)pf->hw.func_caps.common_cap.num_rxq);
> +	return min_t(u16, num_online_cpus(),
> +		     pf->hw.func_caps.common_cap.num_rxq);
>  }
>  
>  /**

...

> diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
> index d4e74f96a8ad..26cfb4b67972 100644
> --- a/drivers/net/ethernet/intel/ice/ice_lib.c
> +++ b/drivers/net/ethernet/intel/ice/ice_lib.c
> @@ -157,6 +157,16 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
>  	}
>  }
>  
> +static u16 ice_get_rxq_count(struct ice_pf *pf)
> +{
> +	return min_t(u16, ice_get_avail_rxq_count(pf), num_online_cpus());
> +}
> +
> +static u16 ice_get_txq_count(struct ice_pf *pf)
> +{
> +	return min_t(u16, ice_get_avail_txq_count(pf), num_online_cpus());
> +}
> +
>  /**
>   * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
>   * @vsi: the VSI being configured
> @@ -178,9 +188,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
>  			vsi->alloc_txq = vsi->req_txq;
>  			vsi->num_txq = vsi->req_txq;
>  		} else {
> -			vsi->alloc_txq = min3(pf->num_lan_msix,
> -					      ice_get_avail_txq_count(pf),
> -					      (u16)num_online_cpus());
> +			vsi->alloc_txq = ice_get_txq_count(pf);
>  		}
>  
>  		pf->num_lan_tx = vsi->alloc_txq;
> @@ -193,17 +201,14 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
>  				vsi->alloc_rxq = vsi->req_rxq;
>  				vsi->num_rxq = vsi->req_rxq;
>  			} else {
> -				vsi->alloc_rxq = min3(pf->num_lan_msix,
> -						      ice_get_avail_rxq_count(pf),
> -						      (u16)num_online_cpus());
> +				vsi->alloc_rxq = ice_get_rxq_count(pf);
>  			}
>  		}
>  
>  		pf->num_lan_rx = vsi->alloc_rxq;
>  
> -		vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
> -					   max_t(int, vsi->alloc_rxq,
> -						 vsi->alloc_txq));
> +		vsi->num_q_vectors = max_t(int, vsi->alloc_rxq,
> +					   vsi->alloc_txq);
>  		break;
>  	case ICE_VSI_SF:
>  		vsi->alloc_txq = 1;
> @@ -1173,12 +1178,11 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
>  static void
>  ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
>  {
> -	struct ice_pf *pf = vsi->back;
>  	u16 qcount, qmap;
>  	u8 offset = 0;
>  	int pow;
>  
> -	qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
> +	qcount = vsi->num_rxq;
>  
>  	pow = order_base_2(qcount);
>  	qmap = FIELD_PREP(ICE_AQ_VSI_TC_Q_OFFSET_M, offset);
> -- 
> 2.42.0
> 
> 

  reply	other threads:[~2024-10-12 15:13 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-09-30 12:03 [iwl-next v4 0/8] ice: managing MSI-X in driver Michal Swiatkowski
2024-09-30 12:03 ` [iwl-next v4 1/8] ice: devlink PF MSI-X max and min parameter Michal Swiatkowski
2024-09-30 12:03 ` [iwl-next v4 2/8] ice: remove splitting MSI-X between features Michal Swiatkowski
2024-09-30 12:03 ` [iwl-next v4 3/8] ice: get rid of num_lan_msix field Michal Swiatkowski
2024-10-12 15:13   ` Simon Horman [this message]
2024-10-14 18:50     ` Jacob Keller
2024-10-14 19:04       ` David Laight
2024-10-14 22:23         ` Jacob Keller
2024-10-23  7:17           ` Michal Swiatkowski
2024-09-30 12:03 ` [iwl-next v4 4/8] ice, irdma: move interrupts code to irdma Michal Swiatkowski
2024-09-30 12:03 ` [iwl-next v4 5/8] ice: treat dyn_allowed only as suggestion Michal Swiatkowski
2024-09-30 12:04 ` [iwl-next v4 6/8] ice: enable_rdma devlink param Michal Swiatkowski
2024-09-30 12:04 ` [iwl-next v4 7/8] ice: simplify VF MSI-X managing Michal Swiatkowski
2024-09-30 12:04 ` [iwl-next v4 8/8] ice: init flow director before RDMA Michal Swiatkowski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241012151304.GK77519@kernel.org \
    --to=horms@kernel.org \
    --cc=David.Laight@aculab.com \
    --cc=intel-wired-lan@lists.osuosl.org \
    --cc=jacob.e.keller@intel.com \
    --cc=jiri@resnulli.us \
    --cc=konrad.knitter@intel.com \
    --cc=marcin.szycik@intel.com \
    --cc=michal.swiatkowski@linux.intel.com \
    --cc=netdev@vger.kernel.org \
    --cc=nex.sw.ncis.nat.hpm.dev@intel.com \
    --cc=pawel.chmielewski@intel.com \
    --cc=pio.raczynski@gmail.com \
    --cc=przemyslaw.kitszel@intel.com \
    --cc=sridhar.samudrala@intel.com \
    --cc=wojciech.drewek@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).