From: Alexander Lobakin <aleksander.lobakin@intel.com>
To: intel-wired-lan@lists.osuosl.org
Cc: Alexander Lobakin <aleksander.lobakin@intel.com>,
Michal Kubiak <michal.kubiak@intel.com>,
Maciej Fijalkowski <maciej.fijalkowski@intel.com>,
Tony Nguyen <anthony.l.nguyen@intel.com>,
Przemek Kitszel <przemyslaw.kitszel@intel.com>,
Andrew Lunn <andrew+netdev@lunn.ch>,
"David S. Miller" <davem@davemloft.net>,
Eric Dumazet <edumazet@google.com>,
Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>,
Alexei Starovoitov <ast@kernel.org>,
Daniel Borkmann <daniel@iogearbox.net>,
Simon Horman <horms@kernel.org>,
nxne.cnse.osdt.itp.upstreaming@intel.com, bpf@vger.kernel.org,
netdev@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH iwl-next v3 14/18] idpf: implement XDP_SETUP_PROG in ndo_bpf for splitq
Date: Wed, 30 Jul 2025 18:07:13 +0200 [thread overview]
Message-ID: <20250730160717.28976-15-aleksander.lobakin@intel.com> (raw)
In-Reply-To: <20250730160717.28976-1-aleksander.lobakin@intel.com>
From: Michal Kubiak <michal.kubiak@intel.com>
Implement loading/removing XDP program using .ndo_bpf callback
in the split queue mode. Reconfigure and restart the queues if needed
(!!old_prog != !!new_prog), otherwise, just update the pointers.
Signed-off-by: Michal Kubiak <michal.kubiak@intel.com>
Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
---
drivers/net/ethernet/intel/idpf/idpf_txrx.h | 4 +-
drivers/net/ethernet/intel/idpf/xdp.h | 7 ++
drivers/net/ethernet/intel/idpf/idpf_lib.c | 1 +
drivers/net/ethernet/intel/idpf/idpf_txrx.c | 4 +
drivers/net/ethernet/intel/idpf/xdp.c | 98 +++++++++++++++++++++
5 files changed, 113 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index 6bc204b68d9e..f898a9c8de1d 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -467,6 +467,7 @@ struct idpf_tx_queue_stats {
* @desc_ring: virtual descriptor ring address
* @bufq_sets: Pointer to the array of buffer queues in splitq mode
* @napi: NAPI instance corresponding to this queue (splitq)
+ * @xdp_prog: attached XDP program
* @rx_buf: See struct &libeth_fqe
* @pp: Page pool pointer in singleq mode
* @tail: Tail offset. Used for both queue models single and split.
@@ -508,13 +509,14 @@ struct idpf_rx_queue {
struct {
struct idpf_bufq_set *bufq_sets;
struct napi_struct *napi;
+ struct bpf_prog __rcu *xdp_prog;
};
struct {
struct libeth_fqe *rx_buf;
struct page_pool *pp;
+ void __iomem *tail;
};
};
- void __iomem *tail;
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
u16 idx;
diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
index cf6823b24ba5..47553ce5f81a 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.h
+++ b/drivers/net/ethernet/intel/idpf/xdp.h
@@ -6,12 +6,19 @@
#include <linux/types.h>
+struct bpf_prog;
struct idpf_vport;
+struct net_device;
+struct netdev_bpf;
int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport);
void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport);
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+ struct bpf_prog *xdp_prog);
int idpf_xdpsqs_get(const struct idpf_vport *vport);
void idpf_xdpsqs_put(const struct idpf_vport *vport);
+int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
+
#endif /* _IDPF_XDP_H_ */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index e0ceb791f5c2..8f2ea790fdf3 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -2521,4 +2521,5 @@ static const struct net_device_ops idpf_netdev_ops = {
.ndo_tx_timeout = idpf_tx_timeout,
.ndo_hwtstamp_get = idpf_hwtstamp_get,
.ndo_hwtstamp_set = idpf_hwtstamp_set,
+ .ndo_bpf = idpf_xdp,
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 0ef35be0421a..385aa769d85e 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -741,6 +741,8 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
bool split = idpf_is_queue_model_split(vport->rxq_model);
int i, j, err;
+ idpf_xdp_copy_prog_to_rqs(vport, vport->xdp_prog);
+
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
u32 truesize = 0;
@@ -1019,6 +1021,8 @@ static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport)
*/
void idpf_vport_queues_rel(struct idpf_vport *vport)
{
+ idpf_xdp_copy_prog_to_rqs(vport, NULL);
+
idpf_tx_desc_rel_all(vport);
idpf_rx_desc_rel_all(vport);
diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
index 08d63462dca4..09e84fe80d4e 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.c
+++ b/drivers/net/ethernet/intel/idpf/xdp.c
@@ -4,6 +4,7 @@
#include <net/libeth/xdp.h>
#include "idpf.h"
+#include "idpf_virtchnl.h"
#include "xdp.h"
static int idpf_rxq_for_each(const struct idpf_vport *vport,
@@ -91,6 +92,28 @@ void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport)
(void *)(size_t)vport->rxq_model);
}
+static int idpf_xdp_rxq_assign_prog(struct idpf_rx_queue *rxq, void *arg)
+{
+ struct mutex *lock = &rxq->q_vector->vport->adapter->vport_ctrl_lock;
+ struct bpf_prog *prog = arg;
+ struct bpf_prog *old;
+
+ if (prog)
+ bpf_prog_inc(prog);
+
+ old = rcu_replace_pointer(rxq->xdp_prog, prog, lockdep_is_held(lock));
+ if (old)
+ bpf_prog_put(old);
+
+ return 0;
+}
+
+void idpf_xdp_copy_prog_to_rqs(const struct idpf_vport *vport,
+ struct bpf_prog *xdp_prog)
+{
+ idpf_rxq_for_each(vport, idpf_xdp_rxq_assign_prog, xdp_prog);
+}
+
int idpf_xdpsqs_get(const struct idpf_vport *vport)
{
struct libeth_xdpsq_timer **timers __free(kvfree) = NULL;
@@ -163,3 +186,78 @@ void idpf_xdpsqs_put(const struct idpf_vport *vport)
idpf_queue_clear(NOIRQ, xdpsq);
}
}
+
+static int idpf_xdp_setup_prog(struct idpf_vport *vport,
+ const struct netdev_bpf *xdp)
+{
+ const struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ struct bpf_prog *old, *prog = xdp->prog;
+ struct idpf_vport_config *cfg;
+ int ret;
+
+ cfg = vport->adapter->vport_config[vport->idx];
+
+ if (test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags) ||
+ !test_bit(IDPF_VPORT_REG_NETDEV, cfg->flags) ||
+ !!vport->xdp_prog == !!prog) {
+ if (np->state == __IDPF_VPORT_UP)
+ idpf_xdp_copy_prog_to_rqs(vport, prog);
+
+ old = xchg(&vport->xdp_prog, prog);
+ if (old)
+ bpf_prog_put(old);
+
+ cfg->user_config.xdp_prog = prog;
+
+ return 0;
+ }
+
+ if (!vport->num_xdp_txq && vport->num_txq == cfg->max_q.max_txq) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "No Tx queues available for XDP, please decrease the number of regular SQs");
+ return -ENOSPC;
+ }
+
+ old = cfg->user_config.xdp_prog;
+ cfg->user_config.xdp_prog = prog;
+
+ ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE);
+ if (ret) {
+ NL_SET_ERR_MSG_MOD(xdp->extack,
+ "Could not reopen the vport after XDP setup");
+
+ cfg->user_config.xdp_prog = old;
+ old = prog;
+ }
+
+ if (old)
+ bpf_prog_put(old);
+
+ return ret;
+}
+
+int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ struct idpf_vport *vport;
+ int ret;
+
+ idpf_vport_ctrl_lock(dev);
+ vport = idpf_netdev_to_vport(dev);
+
+ if (!idpf_is_queue_model_split(vport->txq_model))
+ goto notsupp;
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ ret = idpf_xdp_setup_prog(vport, xdp);
+ break;
+ default:
+notsupp:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ idpf_vport_ctrl_unlock(dev);
+
+ return ret;
+}
--
2.50.1
next prev parent reply other threads:[~2025-07-30 16:08 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-07-30 16:06 [PATCH iwl-next v3 00/18] idpf: add XDP support Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 01/18] idpf: add support for Tx refillqs in flow scheduling mode Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 02/18] idpf: improve when to set RE bit logic Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 03/18] idpf: simplify and fix splitq Tx packet rollback error path Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 04/18] idpf: replace flow scheduling buffer ring with buffer pool Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 05/18] idpf: stop Tx if there are insufficient buffer resources Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 06/18] idpf: remove obsolete stashing code Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 07/18] idpf: fix Rx descriptor ready check barrier in splitq Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 08/18] idpf: use a saner limit for default number of queues to allocate Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 09/18] idpf: link NAPIs to queues Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 10/18] idpf: add 4-byte completion descriptor definition Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 11/18] idpf: remove SW marker handling from NAPI Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 12/18] idpf: add support for nointerrupt queues Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 13/18] idpf: prepare structures to support XDP Alexander Lobakin
2025-08-01 22:30 ` Jakub Kicinski
2025-08-05 16:06 ` Alexander Lobakin
2025-07-30 16:07 ` Alexander Lobakin [this message]
2025-07-30 16:07 ` [PATCH iwl-next v3 15/18] idpf: use generic functions to build xdp_buff and skb Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 16/18] idpf: add support for XDP on Rx Alexander Lobakin
2025-07-31 12:37 ` Simon Horman
2025-07-31 17:05 ` Kees Cook
2025-08-01 13:12 ` Alexander Lobakin
2025-08-01 13:17 ` Alexander Lobakin
2025-08-02 18:52 ` Kees Cook
2025-08-05 9:40 ` Simon Horman
2025-07-31 13:35 ` Simon Horman
2025-08-01 13:11 ` Alexander Lobakin
2025-08-01 22:33 ` Jakub Kicinski
2025-08-05 16:09 ` Alexander Lobakin
2025-08-05 22:46 ` Jakub Kicinski
2025-07-30 16:07 ` [PATCH iwl-next v3 17/18] idpf: add support for .ndo_xdp_xmit() Alexander Lobakin
2025-07-30 16:07 ` [PATCH iwl-next v3 18/18] idpf: add XDP RSS hash hint Alexander Lobakin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250730160717.28976-15-aleksander.lobakin@intel.com \
--to=aleksander.lobakin@intel.com \
--cc=andrew+netdev@lunn.ch \
--cc=anthony.l.nguyen@intel.com \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=horms@kernel.org \
--cc=intel-wired-lan@lists.osuosl.org \
--cc=kuba@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=maciej.fijalkowski@intel.com \
--cc=michal.kubiak@intel.com \
--cc=netdev@vger.kernel.org \
--cc=nxne.cnse.osdt.itp.upstreaming@intel.com \
--cc=pabeni@redhat.com \
--cc=przemyslaw.kitszel@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).