BPF List
 help / color / mirror / Atom feed
From: "Daniel Xu" <dxu@dxuuu.xyz>
To: "Alexander Lobakin" <aleksander.lobakin@intel.com>,
	"Alexei Starovoitov" <ast@kernel.org>,
	"Daniel Borkmann" <daniel@iogearbox.net>,
	"Andrii Nakryiko" <andrii@kernel.org>
Cc: "Lorenzo Bianconi" <lorenzo@kernel.org>,
	"John Fastabend" <john.fastabend@gmail.com>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"Martin KaFai Lau" <martin.lau@linux.dev>,
	"David Miller" <davem@davemloft.net>,
	"Eric Dumazet" <edumazet@google.com>,
	"Jakub Kicinski" <kuba@kernel.org>,
	"Paolo Abeni" <pabeni@redhat.com>,
	"bpf@vger.kernel.org" <bpf@vger.kernel.org>,
	netdev@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH bpf-next 3/9] net: napi: add ability to create CPU-pinned threaded NAPI
Date: Fri, 30 Aug 2024 17:19:17 -0700	[thread overview]
Message-ID: <60cbe452-e1f3-4507-9b3b-563906eccb15@app.fastmail.com> (raw)
In-Reply-To: <20240830162508.1009458-4-aleksander.lobakin@intel.com>



On Fri, Aug 30, 2024, at 9:25 AM, Alexander Lobakin wrote:
> From: Lorenzo Bianconi <lorenzo@kernel.org>
>
> Add netif_napi_add_percpu() to pin NAPI in threaded mode to a particular
> CPU. This means, if the NAPI is not threaded, it will be run as usually,
> but when switching to threaded mode, it will always be run on the
> specified CPU.
> It's not meant to be used in drivers, but might be useful when creating
> percpu threaded NAPIs, for example, to replace percpu kthreads or
> workers where a NAPI context is needed.
> The already existing netif_napi_add*() are not anyhow affected.
>
> Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
> Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
> ---
>  include/linux/netdevice.h | 35 +++++++++++++++++++++++++++++++++--
>  net/core/dev.c            | 18 +++++++++++++-----
>  2 files changed, 46 insertions(+), 7 deletions(-)
>
> diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
> index ca5f0dda733b..4d6fb0ccdea1 100644
> --- a/include/linux/netdevice.h
> +++ b/include/linux/netdevice.h
> @@ -377,6 +377,7 @@ struct napi_struct {
>  	struct list_head	dev_list;
>  	struct hlist_node	napi_hash_node;
>  	int			irq;
> +	int			thread_cpuid;
>  };
> 
>  enum {
> @@ -2619,8 +2620,18 @@ static inline void netif_napi_set_irq(struct 
> napi_struct *napi, int irq)
>   */
>  #define NAPI_POLL_WEIGHT 64
> 
> -void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
> -			   int (*poll)(struct napi_struct *, int), int weight);
> +void netif_napi_add_weight_percpu(struct net_device *dev,
> +				  struct napi_struct *napi,
> +				  int (*poll)(struct napi_struct *, int),
> +				  int weight, int thread_cpuid);
> +
> +static inline void netif_napi_add_weight(struct net_device *dev,
> +					 struct napi_struct *napi,
> +					 int (*poll)(struct napi_struct *, int),
> +					 int weight)
> +{
> +	netif_napi_add_weight_percpu(dev, napi, poll, weight, -1);
> +}
> 
>  /**
>   * netif_napi_add() - initialize a NAPI context
> @@ -2665,6 +2676,26 @@ static inline void netif_napi_add_tx(struct 
> net_device *dev,
>  	netif_napi_add_tx_weight(dev, napi, poll, NAPI_POLL_WEIGHT);
>  }
> 
> +/**
> + * netif_napi_add_percpu() - initialize a CPU-pinned threaded NAPI 
> context
> + * @dev:  network device
> + * @napi: NAPI context
> + * @poll: polling function
> + * @thread_cpuid: CPU which this NAPI will be pinned to
> + *
> + * Variant of netif_napi_add() which pins the NAPI to the specified 
> CPU. No
> + * changes in the "standard" mode, but in case with the threaded one, 
> this
> + * NAPI will always be run on the passed CPU no matter where scheduled.
> + */
> +static inline void netif_napi_add_percpu(struct net_device *dev,
> +					 struct napi_struct *napi,
> +					 int (*poll)(struct napi_struct *, int),
> +					 int thread_cpuid)
> +{
> +	netif_napi_add_weight_percpu(dev, napi, poll, NAPI_POLL_WEIGHT,
> +				     thread_cpuid);
> +}
> +
>  /**
>   *  __netif_napi_del - remove a NAPI context
>   *  @napi: NAPI context
> diff --git a/net/core/dev.c b/net/core/dev.c
> index 98bb5f890b88..93ca3df8e9dd 100644
> --- a/net/core/dev.c
> +++ b/net/core/dev.c
> @@ -1428,8 +1428,13 @@ static int napi_kthread_create(struct 
> napi_struct *n)
>  	 * TASK_INTERRUPTIBLE mode to avoid the blocked task
>  	 * warning and work with loadavg.
>  	 */
> -	n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
> -				n->dev->name, n->napi_id);
> +	if (n->thread_cpuid >= 0)
> +		n->thread = kthread_run_on_cpu(napi_threaded_poll, n,
> +					       n->thread_cpuid, "napi/%s-%u",
> +					       n->dev->name);
> +	else
> +		n->thread = kthread_run(napi_threaded_poll, n, "napi/%s-%d",
> +					n->dev->name, n->napi_id);
>  	if (IS_ERR(n->thread)) {
>  		err = PTR_ERR(n->thread);
>  		pr_err("kthread_run failed with err %d\n", err);
> @@ -6640,8 +6645,10 @@ void netif_queue_set_napi(struct net_device 
> *dev, unsigned int queue_index,
>  }
>  EXPORT_SYMBOL(netif_queue_set_napi);
> 
> -void netif_napi_add_weight(struct net_device *dev, struct napi_struct 
> *napi,
> -			   int (*poll)(struct napi_struct *, int), int weight)
> +void netif_napi_add_weight_percpu(struct net_device *dev,
> +				  struct napi_struct *napi,
> +				  int (*poll)(struct napi_struct *, int),
> +				  int weight, int thread_cpuid)
>  {
>  	if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
>  		return;
> @@ -6664,6 +6671,7 @@ void netif_napi_add_weight(struct net_device 
> *dev, struct napi_struct *napi,
>  	napi->poll_owner = -1;
>  #endif
>  	napi->list_owner = -1;
> +	napi->thread_cpuid = thread_cpuid;
>  	set_bit(NAPI_STATE_SCHED, &napi->state);
>  	set_bit(NAPI_STATE_NPSVC, &napi->state);
>  	list_add_rcu(&napi->dev_list, &dev->napi_list);
> @@ -6677,7 +6685,7 @@ void netif_napi_add_weight(struct net_device 
> *dev, struct napi_struct *napi,
>  		dev->threaded = false;
>  	netif_napi_set_irq(napi, -1);
>  }
> -EXPORT_SYMBOL(netif_napi_add_weight);
> +EXPORT_SYMBOL(netif_napi_add_weight_percpu);
> 
>  void napi_disable(struct napi_struct *n)
>  {
> -- 
> 2.46.0

Acked-by: Daniel Xu <dxu@dxuuu.xyz>

  reply	other threads:[~2024-08-31  0:19 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-08-30 16:24 [PATCH bpf-next 0/9] bpf: cpumap: enable GRO for XDP_PASS frames Alexander Lobakin
2024-08-30 16:25 ` [PATCH bpf-next 1/9] firmware/psci: fix missing '%u' format literal in kthread_create_on_cpu() Alexander Lobakin
2024-08-30 23:31   ` Daniel Xu
2024-08-30 16:25 ` [PATCH bpf-next 2/9] kthread: allow vararg kthread_{create,run}_on_cpu() Alexander Lobakin
2024-08-30 22:56   ` Stanislav Fomichev
2024-09-03 12:25     ` Alexander Lobakin
2024-09-03 17:04       ` Stanislav Fomichev
2024-08-30 16:25 ` [PATCH bpf-next 3/9] net: napi: add ability to create CPU-pinned threaded NAPI Alexander Lobakin
2024-08-31  0:19   ` Daniel Xu [this message]
2024-08-30 16:25 ` [PATCH bpf-next 4/9] bpf: cpumap: use CPU-pinned threaded NAPI w/GRO instead of kthread Alexander Lobakin
2024-08-30 16:25 ` [PATCH bpf-next 5/9] bpf: cpumap: reuse skb array instead of a linked list to chain skbs Alexander Lobakin
2024-08-30 16:25 ` [PATCH bpf-next 6/9] net: skbuff: introduce napi_skb_cache_get_bulk() Alexander Lobakin
2024-08-30 16:25 ` [PATCH bpf-next 7/9] bpf: cpumap: switch to napi_skb_cache_get_bulk() Alexander Lobakin
2024-08-30 16:25 ` [PATCH bpf-next 8/9] veth: use napi_skb_cache_get_bulk() instead of xdp_alloc_skb_bulk() Alexander Lobakin
2024-08-30 16:25 ` [PATCH bpf-next 9/9] xdp: remove xdp_alloc_skb_bulk() Alexander Lobakin
2024-09-03 20:51 ` [PATCH bpf-next 0/9] bpf: cpumap: enable GRO for XDP_PASS frames Jakub Kicinski
2024-09-03 21:33   ` Lorenzo Bianconi
2024-09-05 11:53     ` Jesper Dangaard Brouer
2024-09-05 17:01     ` Lorenzo Bianconi
2024-09-06  0:20       ` Jakub Kicinski
2024-09-06  8:15         ` Lorenzo Bianconi
2024-09-07 13:22           ` Lorenzo Bianconi
2024-09-04 13:13   ` Alexander Lobakin
2024-09-04 14:50     ` Jakub Kicinski
2024-09-04 15:13       ` Alexander Lobakin
2024-09-04 18:29         ` Jakub Kicinski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=60cbe452-e1f3-4507-9b3b-563906eccb15@app.fastmail.com \
    --to=dxu@dxuuu.xyz \
    --cc=aleksander.lobakin@intel.com \
    --cc=andrii@kernel.org \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=hawk@kernel.org \
    --cc=john.fastabend@gmail.com \
    --cc=kuba@kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=lorenzo@kernel.org \
    --cc=martin.lau@linux.dev \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox