From: Denis Kirjanov <kda@linux-powerpc.org>
To: netdev@vger.kernel.org
Cc: brouer@redhat.com, jgross@suse.com, wei.liu@kernel.org,
paul@xen.org, ilias.apalodimas@linaro.org
Subject: [PATCH net-next v8 2/3] xen networking: add XDP offset adjustment to xen-netback
Date: Thu, 7 May 2020 15:40:40 +0300 [thread overview]
Message-ID: <1588855241-29141-2-git-send-email-kda@linux-powerpc.org> (raw)
In-Reply-To: <1588855241-29141-1-git-send-email-kda@linux-powerpc.org>
the patch basically adds the offset adjustment and netfront
state reading to make XDP work on netfront side.
Signed-off-by: Denis Kirjanov <denis.kirjanov@suse.com>
---
drivers/net/xen-netback/common.h | 2 ++
drivers/net/xen-netback/netback.c | 7 +++++++
drivers/net/xen-netback/rx.c | 7 ++++++-
drivers/net/xen-netback/xenbus.c | 28 ++++++++++++++++++++++++++++
4 files changed, 43 insertions(+), 1 deletion(-)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 05847eb..4a148d6 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -280,6 +280,7 @@ struct xenvif {
u8 ip_csum:1;
u8 ipv6_csum:1;
u8 multicast_control:1;
+ u8 xdp_enabled:1;
/* Is this interface disabled? True when backend discovers
* frontend is rogue.
@@ -395,6 +396,7 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
irqreturn_t xenvif_interrupt(int irq, void *dev_id);
extern bool separate_tx_rx_irq;
+extern bool provides_xdp_headroom;
extern unsigned int rx_drain_timeout_msecs;
extern unsigned int rx_stall_timeout_msecs;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 315dfc6..6dfca72 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -96,6 +96,13 @@
module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
+/* The module parameter tells that we have to put data
+ * for xen-netfront with the XDP_PACKET_HEADROOM offset
+ * needed for XDP processing
+ */
+bool provides_xdp_headroom = true;
+module_param(provides_xdp_headroom, bool, 0644);
+
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
u8 status);
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index ef58870..1c0cf8a 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -33,6 +33,11 @@
#include <xen/xen.h>
#include <xen/events.h>
+static inline int xenvif_rx_xdp_offset(struct xenvif *vif)
+{
+ return vif->xdp_enabled ? XDP_PACKET_HEADROOM : 0;
+}
+
static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
{
RING_IDX prod, cons;
@@ -356,7 +361,7 @@ static void xenvif_rx_data_slot(struct xenvif_queue *queue,
struct xen_netif_rx_request *req,
struct xen_netif_rx_response *rsp)
{
- unsigned int offset = 0;
+ unsigned int offset = xenvif_rx_xdp_offset(queue->vif);
unsigned int flags;
do {
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 286054b..9c1b837 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -393,6 +393,20 @@ static void set_backend_state(struct backend_info *be,
}
}
+static void read_xenbus_frontend_xdp(struct backend_info *be,
+ struct xenbus_device *dev)
+{
+ struct xenvif *vif = be->vif;
+ unsigned int val;
+ int err;
+
+ err = xenbus_scanf(XBT_NIL, dev->otherend,
+ "feature-xdp", "%u", &val);
+ if (err != 1)
+ return;
+ vif->xdp_enabled = val;
+}
+
/**
* Callback received when the frontend's state changes.
*/
@@ -417,6 +431,11 @@ static void frontend_changed(struct xenbus_device *dev,
set_backend_state(be, XenbusStateConnected);
break;
+ case XenbusStateReconfiguring:
+ read_xenbus_frontend_xdp(be, dev);
+ xenbus_switch_state(dev, XenbusStateReconfigured);
+ break;
+
case XenbusStateClosing:
set_backend_state(be, XenbusStateClosing);
break;
@@ -1036,6 +1055,15 @@ static int netback_probe(struct xenbus_device *dev,
goto abort_transaction;
}
+ /* we can adjust a headroom for netfront XDP processing */
+ err = xenbus_printf(xbt, dev->nodename,
+ "feature-xdp-headroom", "%d",
+ provides_xdp_headroom);
+ if (err) {
+ message = "writing feature-xdp-headroom";
+ goto abort_transaction;
+ }
+
/* We don't support rx-flip path (except old guests who
* don't grok this feature flag).
*/
--
1.8.3.1
next prev parent reply other threads:[~2020-05-07 12:41 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-07 12:40 [PATCH net-next v8 1/3] xen networking: add basic XDP support for xen-netfront Denis Kirjanov
2020-05-07 12:40 ` Denis Kirjanov [this message]
2020-05-08 21:07 ` [PATCH net-next v8 2/3] xen networking: add XDP offset adjustment to xen-netback Jakub Kicinski
2020-05-07 12:40 ` [PATCH net-next v8 3/3] net: xen: select PAGE_POOL for xen-netfront Denis Kirjanov
2020-05-08 21:08 ` Jakub Kicinski
2020-05-08 21:07 ` [PATCH net-next v8 1/3] xen networking: add basic XDP support " Jakub Kicinski
2020-05-09 10:47 ` Denis Kirjanov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1588855241-29141-2-git-send-email-kda@linux-powerpc.org \
--to=kda@linux-powerpc.org \
--cc=brouer@redhat.com \
--cc=ilias.apalodimas@linaro.org \
--cc=jgross@suse.com \
--cc=netdev@vger.kernel.org \
--cc=paul@xen.org \
--cc=wei.liu@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).