* [PATCH RFC net-next 0/2] net: napi: add CPU affinity to napi->config
@ 2024-12-06 0:12 Ahmed Zaki
2024-12-06 0:12 ` [PATCH RFC net-next 1/2] " Ahmed Zaki
2024-12-06 0:12 ` [PATCH RFC net-next 2/2] idpf: use napi's irq affinity Ahmed Zaki
0 siblings, 2 replies; 6+ messages in thread
From: Ahmed Zaki @ 2024-12-06 0:12 UTC (permalink / raw)
To: netdev; +Cc: Ahmed Zaki
As suggested by Jakub, move the CPU affinity mask from the driver to
napi->config.
Tested on idpf.
---
if accepted, will fix all drivers already using netif_napi_set_irq()
Ahmed Zaki (2):
net: napi: add CPU affinity to napi->config
idpf: use napi's irq affinity
drivers/net/ethernet/intel/idpf/idpf_txrx.c | 18 ++++-------------
drivers/net/ethernet/intel/idpf/idpf_txrx.h | 6 ++----
include/linux/netdevice.h | 22 +++++++++++++++++++++
net/core/dev.c | 7 ++++++-
4 files changed, 34 insertions(+), 19 deletions(-)
--
2.47.0
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH RFC net-next 1/2] net: napi: add CPU affinity to napi->config
2024-12-06 0:12 [PATCH RFC net-next 0/2] net: napi: add CPU affinity to napi->config Ahmed Zaki
@ 2024-12-06 0:12 ` Ahmed Zaki
2024-12-08 2:24 ` Jakub Kicinski
2024-12-06 0:12 ` [PATCH RFC net-next 2/2] idpf: use napi's irq affinity Ahmed Zaki
1 sibling, 1 reply; 6+ messages in thread
From: Ahmed Zaki @ 2024-12-06 0:12 UTC (permalink / raw)
To: netdev; +Cc: Ahmed Zaki, Jakub Kicinski
A common task for most drivers is to remember the user's CPU affinity to
its IRQs. On each netdev reset, the driver must then re-assign the
user's setting to the IRQs.
Add CPU affinity mask to napi->config. To delegate the CPU affinity
management to the core, drivers must:
1 - add a persistent napi config: netif_napi_add_config()
2 - bind an IRQ to the napi instance: netif_napi_set_irq()
the core will then make sure to use re-assign affinity to the napi's
IRQ.
Suggested-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Ahmed Zaki <ahmed.zaki@intel.com>
---
include/linux/netdevice.h | 22 ++++++++++++++++++++++
net/core/dev.c | 7 ++++++-
2 files changed, 28 insertions(+), 1 deletion(-)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index ecc686409161..8660de791a1a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -350,6 +350,7 @@ struct napi_config {
u64 gro_flush_timeout;
u64 irq_suspend_timeout;
u32 defer_hard_irqs;
+ cpumask_t affinity_mask;
unsigned int napi_id;
};
@@ -393,6 +394,7 @@ struct napi_struct {
int irq;
int index;
struct napi_config *config;
+ struct irq_affinity_notify affinity_notify;
};
enum {
@@ -2666,10 +2668,30 @@ static inline void *netdev_priv(const struct net_device *dev)
void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
enum netdev_queue_type type,
struct napi_struct *napi);
+static inline void
+netif_napi_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct napi_struct *napi =
+ container_of(notify, struct napi_struct, affinity_notify);
+
+ if (napi->config)
+ cpumask_copy(&napi->config->affinity_mask, mask);
+}
+
+static inline void
+netif_napi_affinity_release(struct kref __always_unused *ref) {}
static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
{
napi->irq = irq;
+
+ if (irq > 0 && napi->config) {
+ napi->affinity_notify.notify = netif_napi_affinity_notify;
+ napi->affinity_notify.release = netif_napi_affinity_release;
+ irq_set_affinity_notifier(irq, &napi->affinity_notify);
+ irq_set_affinity(irq, &napi->config->affinity_mask);
+ }
}
/* Default NAPI poll() weight
diff --git a/net/core/dev.c b/net/core/dev.c
index 13d00fc10f55..d58779d57994 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6843,6 +6843,8 @@ void __netif_napi_del(struct napi_struct *napi)
return;
if (napi->config) {
+ if (napi->irq > 0)
+ irq_set_affinity_notifier(napi->irq, NULL);
napi->index = -1;
napi->config = NULL;
}
@@ -11184,7 +11186,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
{
struct net_device *dev;
size_t napi_config_sz;
- unsigned int maxqs;
+ unsigned int maxqs, i;
BUG_ON(strlen(name) >= sizeof(dev->name));
@@ -11280,6 +11282,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
dev->napi_config = kvzalloc(napi_config_sz, GFP_KERNEL_ACCOUNT);
if (!dev->napi_config)
goto free_all;
+ for (i = 0; i < maxqs; i++)
+ cpumask_copy(&dev->napi_config[i].affinity_mask,
+ cpu_online_mask);
strscpy(dev->name, name);
dev->name_assign_type = name_assign_type;
--
2.47.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH RFC net-next 2/2] idpf: use napi's irq affinity
2024-12-06 0:12 [PATCH RFC net-next 0/2] net: napi: add CPU affinity to napi->config Ahmed Zaki
2024-12-06 0:12 ` [PATCH RFC net-next 1/2] " Ahmed Zaki
@ 2024-12-06 0:12 ` Ahmed Zaki
2024-12-06 8:29 ` [Intel-wired-lan] " Paul Menzel
1 sibling, 1 reply; 6+ messages in thread
From: Ahmed Zaki @ 2024-12-06 0:12 UTC (permalink / raw)
To: netdev; +Cc: Ahmed Zaki, intel-wired-lan
Delete the driver CPU affinity info and use the core's napi config
instead.
Cc: intel-wired-lan@lists.osuosl.org
Signed-off-by: Ahmed Zaki <ahmed.zaki@intel.com>
---
drivers/net/ethernet/intel/idpf/idpf_txrx.c | 18 ++++--------------
drivers/net/ethernet/intel/idpf/idpf_txrx.h | 6 ++----
2 files changed, 6 insertions(+), 18 deletions(-)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index da2a5becf62f..ffa75a98cdc6 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -3553,8 +3553,6 @@ void idpf_vport_intr_rel(struct idpf_vport *vport)
q_vector->tx = NULL;
kfree(q_vector->rx);
q_vector->rx = NULL;
-
- free_cpumask_var(q_vector->affinity_mask);
}
kfree(vport->q_vectors);
@@ -3581,8 +3579,6 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
kfree(free_irq(irq_num, q_vector));
}
}
@@ -3761,8 +3757,6 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
"Request_irq failed, error: %d\n", err);
goto free_q_irqs;
}
- /* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
}
return 0;
@@ -4183,12 +4177,11 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ int irq_num = vport->adapter->msix_entries[v_idx].vector;
- netif_napi_add(vport->netdev, &q_vector->napi, napi_poll);
-
- /* only set affinity_mask if the CPU is online */
- if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, q_vector->affinity_mask);
+ netif_napi_add_config(vport->netdev, &q_vector->napi,
+ napi_poll, v_idx);
+ netif_napi_set_irq(&q_vector->napi, irq_num);
}
}
@@ -4232,9 +4225,6 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
- if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
- goto error;
-
q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
GFP_KERNEL);
if (!q_vector->tx)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 9c1fe84108ed..5efb3402b378 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -397,7 +397,6 @@ struct idpf_intr_reg {
* @rx_intr_mode: Dynamic ITR or not
* @rx_itr_idx: RX ITR index
* @v_idx: Vector index
- * @affinity_mask: CPU affinity mask
*/
struct idpf_q_vector {
__cacheline_group_begin_aligned(read_mostly);
@@ -434,13 +433,12 @@ struct idpf_q_vector {
__cacheline_group_begin_aligned(cold);
u16 v_idx;
- cpumask_var_t affinity_mask;
__cacheline_group_end_aligned(cold);
};
libeth_cacheline_set_assert(struct idpf_q_vector, 112,
24 + sizeof(struct napi_struct) +
2 * sizeof(struct dim),
- 8 + sizeof(cpumask_var_t));
+ 8);
struct idpf_rx_queue_stats {
u64_stats_t packets;
@@ -934,7 +932,7 @@ static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
if (!q_vector)
return NUMA_NO_NODE;
- cpu = cpumask_first(q_vector->affinity_mask);
+ cpu = cpumask_first(&q_vector->napi.config->affinity_mask);
return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
}
--
2.47.0
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [Intel-wired-lan] [PATCH RFC net-next 2/2] idpf: use napi's irq affinity
2024-12-06 0:12 ` [PATCH RFC net-next 2/2] idpf: use napi's irq affinity Ahmed Zaki
@ 2024-12-06 8:29 ` Paul Menzel
2024-12-06 17:58 ` Ahmed Zaki
0 siblings, 1 reply; 6+ messages in thread
From: Paul Menzel @ 2024-12-06 8:29 UTC (permalink / raw)
To: Ahmed Zaki; +Cc: intel-wired-lan, netdev
Dear Ahmed,
Thank you for your patch.
Am 06.12.24 um 01:12 schrieb Ahmed Zaki:
> Delete the driver CPU affinity info and use the core's napi config
> instead.
Excuse my ignorance, but could you please state why? (Is the core’s napi
config the successor?)
> Cc: intel-wired-lan@lists.osuosl.org
> Signed-off-by: Ahmed Zaki <ahmed.zaki@intel.com>
> ---
> drivers/net/ethernet/intel/idpf/idpf_txrx.c | 18 ++++--------------
> drivers/net/ethernet/intel/idpf/idpf_txrx.h | 6 ++----
> 2 files changed, 6 insertions(+), 18 deletions(-)
[…]
Kind regards,
Paul
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [Intel-wired-lan] [PATCH RFC net-next 2/2] idpf: use napi's irq affinity
2024-12-06 8:29 ` [Intel-wired-lan] " Paul Menzel
@ 2024-12-06 17:58 ` Ahmed Zaki
0 siblings, 0 replies; 6+ messages in thread
From: Ahmed Zaki @ 2024-12-06 17:58 UTC (permalink / raw)
To: Paul Menzel; +Cc: intel-wired-lan, netdev
On 2024-12-06 1:29 a.m., Paul Menzel wrote:
> Dear Ahmed,
>
>
> Thank you for your patch.
>
> Am 06.12.24 um 01:12 schrieb Ahmed Zaki:
>> Delete the driver CPU affinity info and use the core's napi config
>> instead.
>
> Excuse my ignorance, but could you please state why? (Is the core’s napi
> config the successor?)
>
Hi Paul
I am not sure I understand what you mean by "successor"!
This patch is an example on how the new "napi_config->affinity_mask"
defined in patch 1/2 of the series could be used instead of keeping the
affinity mask in the driver.
It is kind of code cleanup since the task can be done in the core for
all drivers.
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH RFC net-next 1/2] net: napi: add CPU affinity to napi->config
2024-12-06 0:12 ` [PATCH RFC net-next 1/2] " Ahmed Zaki
@ 2024-12-08 2:24 ` Jakub Kicinski
0 siblings, 0 replies; 6+ messages in thread
From: Jakub Kicinski @ 2024-12-08 2:24 UTC (permalink / raw)
To: Ahmed Zaki; +Cc: netdev
On Thu, 5 Dec 2024 17:12:08 -0700 Ahmed Zaki wrote:
> +static inline void
> +netif_napi_affinity_notify(struct irq_affinity_notify *notify,
> + const cpumask_t *mask)
> +{
> + struct napi_struct *napi =
> + container_of(notify, struct napi_struct, affinity_notify);
> +
> + if (napi->config)
> + cpumask_copy(&napi->config->affinity_mask, mask);
> +}
> +
> +static inline void
> +netif_napi_affinity_release(struct kref __always_unused *ref) {}
>
> static inline void netif_napi_set_irq(struct napi_struct *napi, int irq)
> {
> napi->irq = irq;
> +
> + if (irq > 0 && napi->config) {
> + napi->affinity_notify.notify = netif_napi_affinity_notify;
> + napi->affinity_notify.release = netif_napi_affinity_release;
> + irq_set_affinity_notifier(irq, &napi->affinity_notify);
> + irq_set_affinity(irq, &napi->config->affinity_mask);
> + }
> }
Nice, thanks for following up!
Let's move this code to net/core/dev.c or a new file, it's getting
complex for a static inline since there's no perf implication.
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2024-12-08 2:24 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-12-06 0:12 [PATCH RFC net-next 0/2] net: napi: add CPU affinity to napi->config Ahmed Zaki
2024-12-06 0:12 ` [PATCH RFC net-next 1/2] " Ahmed Zaki
2024-12-08 2:24 ` Jakub Kicinski
2024-12-06 0:12 ` [PATCH RFC net-next 2/2] idpf: use napi's irq affinity Ahmed Zaki
2024-12-06 8:29 ` [Intel-wired-lan] " Paul Menzel
2024-12-06 17:58 ` Ahmed Zaki
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).