From: Stanislav Fomichev <sdf@google.com>
To: bpf@vger.kernel.org
Cc: ast@kernel.org, daniel@iogearbox.net, andrii@kernel.org,
martin.lau@linux.dev, song@kernel.org, yhs@fb.com,
john.fastabend@gmail.com, kpsingh@kernel.org, sdf@google.com,
haoluo@google.com, jolsa@kernel.org, netdev@vger.kernel.org
Subject: [RFC bpf-next 5/7] net: veth: implement devtx timestamp kfuncs
Date: Mon, 12 Jun 2023 10:23:05 -0700 [thread overview]
Message-ID: <20230612172307.3923165-6-sdf@google.com> (raw)
In-Reply-To: <20230612172307.3923165-1-sdf@google.com>
Have a software-base example for kfuncs to showcase how it
can be used in the real devices and to have something to
test against in the selftests.
Both path (skb & xdp) are covered. Only the skb path is really
tested though.
Cc: netdev@vger.kernel.org
Signed-off-by: Stanislav Fomichev <sdf@google.com>
---
drivers/net/veth.c | 94 ++++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 90 insertions(+), 4 deletions(-)
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 614f3e3efab0..eb78d51d8352 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -27,6 +27,7 @@
#include <linux/bpf_trace.h>
#include <linux/net_tstamp.h>
#include <net/page_pool.h>
+#include <net/devtx.h>
#define DRV_NAME "veth"
#define DRV_VERSION "1.0"
@@ -123,6 +124,13 @@ struct veth_xdp_buff {
struct sk_buff *skb;
};
+struct veth_devtx_frame {
+ struct devtx_frame frame;
+ bool request_timestamp;
+ ktime_t xdp_tx_timestamp;
+ struct sk_buff *skb;
+};
+
static int veth_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -314,9 +322,29 @@ static int veth_xdp_rx(struct veth_rq *rq, struct sk_buff *skb)
}
static int veth_forward_skb(struct net_device *dev, struct sk_buff *skb,
- struct veth_rq *rq, bool xdp)
+ struct veth_rq *rq, bool xdp, bool request_timestamp)
{
- return __dev_forward_skb(dev, skb) ?: xdp ?
+ struct net_device *src_dev = skb->dev;
+ int ret;
+
+ ret = __dev_forward_skb(dev, skb);
+ if (ret)
+ return ret;
+
+ if (devtx_complete_enabled(src_dev)) {
+ struct veth_devtx_frame ctx;
+
+ if (unlikely(request_timestamp))
+ __net_timestamp(skb);
+
+ devtx_frame_from_skb(&ctx.frame, skb);
+ ctx.frame.data -= ETH_HLEN; /* undo eth_type_trans pull */
+ ctx.frame.len += ETH_HLEN;
+ ctx.skb = skb;
+ devtx_complete(src_dev, &ctx.frame);
+ }
+
+ return xdp ?
veth_xdp_rx(rq, skb) :
__netif_rx(skb);
}
@@ -343,6 +371,7 @@ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev,
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
+ bool request_timestamp = false;
struct veth_rq *rq = NULL;
struct net_device *rcv;
int length = skb->len;
@@ -356,6 +385,15 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
}
+ if (devtx_submit_enabled(dev)) {
+ struct veth_devtx_frame ctx;
+
+ devtx_frame_from_skb(&ctx.frame, skb);
+ ctx.request_timestamp = false;
+ devtx_submit(dev, &ctx.frame);
+ request_timestamp = ctx.request_timestamp;
+ }
+
rcv_priv = netdev_priv(rcv);
rxq = skb_get_queue_mapping(skb);
if (rxq < rcv->real_num_rx_queues) {
@@ -370,7 +408,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
- if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) {
+ if (likely(veth_forward_skb(rcv, skb, rq, use_napi, request_timestamp) == NET_RX_SUCCESS)) {
if (!use_napi)
dev_lstats_add(dev, length);
} else {
@@ -483,6 +521,7 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
int i, ret = -ENXIO, nxmit = 0;
+ ktime_t tx_timestamp = 0;
struct net_device *rcv;
unsigned int max_len;
struct veth_rq *rq;
@@ -511,9 +550,32 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
void *ptr = veth_xdp_to_ptr(frame);
if (unlikely(xdp_get_frame_len(frame) > max_len ||
- __ptr_ring_produce(&rq->xdp_ring, ptr)))
+ __ptr_ring_full(&rq->xdp_ring)))
+ break;
+
+ if (devtx_submit_enabled(dev)) {
+ struct veth_devtx_frame ctx;
+
+ devtx_frame_from_xdp(&ctx.frame, frame);
+ ctx.request_timestamp = false;
+ devtx_submit(dev, &ctx.frame);
+
+ if (unlikely(ctx.request_timestamp))
+ tx_timestamp = ktime_get_real();
+ }
+
+ if (unlikely(__ptr_ring_produce(&rq->xdp_ring, ptr)))
break;
nxmit++;
+
+ if (devtx_complete_enabled(dev)) {
+ struct veth_devtx_frame ctx;
+
+ devtx_frame_from_xdp(&ctx.frame, frame);
+ ctx.xdp_tx_timestamp = tx_timestamp;
+ ctx.skb = NULL;
+ devtx_complete(dev, &ctx.frame);
+ }
}
spin_unlock(&rq->xdp_ring.producer_lock);
@@ -1732,6 +1794,28 @@ static int veth_xdp_rx_hash(const struct xdp_md *ctx, u32 *hash,
return 0;
}
+static int veth_devtx_sb_request_timestamp(const struct devtx_frame *_ctx)
+{
+ struct veth_devtx_frame *ctx = (struct veth_devtx_frame *)_ctx;
+
+ ctx->request_timestamp = true;
+
+ return 0;
+}
+
+static int veth_devtx_cp_timestamp(const struct devtx_frame *_ctx, u64 *timestamp)
+{
+ struct veth_devtx_frame *ctx = (struct veth_devtx_frame *)_ctx;
+
+ if (ctx->skb) {
+ *timestamp = ctx->skb->tstamp;
+ return 0;
+ }
+
+ *timestamp = ctx->xdp_tx_timestamp;
+ return 0;
+}
+
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
@@ -1756,6 +1840,8 @@ static const struct net_device_ops veth_netdev_ops = {
static const struct xdp_metadata_ops veth_xdp_metadata_ops = {
.xmo_rx_timestamp = veth_xdp_rx_timestamp,
.xmo_rx_hash = veth_xdp_rx_hash,
+ .xmo_sb_request_timestamp = veth_devtx_sb_request_timestamp,
+ .xmo_cp_timestamp = veth_devtx_cp_timestamp,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
--
2.41.0.162.gfafddb0af9-goog
next prev parent reply other threads:[~2023-06-12 17:23 UTC|newest]
Thread overview: 47+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-06-12 17:23 [RFC bpf-next 0/7] bpf: netdev TX metadata Stanislav Fomichev
2023-06-12 17:23 ` [RFC bpf-next 1/7] bpf: rename some xdp-metadata functions into dev-bound Stanislav Fomichev
2023-06-12 17:23 ` [RFC bpf-next 2/7] bpf: resolve single typedef when walking structs Stanislav Fomichev
2023-06-12 17:23 ` [RFC bpf-next 3/7] bpf: implement devtx hook points Stanislav Fomichev
2023-06-13 14:54 ` Willem de Bruijn
2023-06-13 19:00 ` Stanislav Fomichev
2023-06-13 19:29 ` Willem de Bruijn
2023-06-13 15:08 ` Simon Horman
2023-06-13 19:00 ` Stanislav Fomichev
2023-06-14 7:02 ` Simon Horman
2023-06-14 17:18 ` Stanislav Fomichev
2023-06-16 5:46 ` Kui-Feng Lee
2023-06-16 17:32 ` Stanislav Fomichev
2023-06-12 17:23 ` [RFC bpf-next 4/7] bpf: implement devtx timestamp kfunc Stanislav Fomichev
2023-06-13 15:14 ` Simon Horman
2023-06-13 18:39 ` Stanislav Fomichev
2023-06-12 17:23 ` Stanislav Fomichev [this message]
2023-06-12 17:23 ` [RFC bpf-next 6/7] selftests/bpf: extend xdp_metadata with devtx kfuncs Stanislav Fomichev
2023-06-13 14:47 ` Willem de Bruijn
2023-06-13 19:00 ` Stanislav Fomichev
2023-06-12 17:23 ` [RFC bpf-next 7/7] selftests/bpf: extend xdp_hw_metadata " Stanislav Fomichev
2023-06-13 15:03 ` Willem de Bruijn
2023-06-13 19:00 ` Stanislav Fomichev
2023-06-12 21:00 ` [RFC bpf-next 0/7] bpf: netdev TX metadata Toke Høiland-Jørgensen
2023-06-13 16:32 ` Stanislav Fomichev
2023-06-13 17:18 ` Toke Høiland-Jørgensen
2023-06-13 18:39 ` Stanislav Fomichev
2023-06-13 19:10 ` Toke Høiland-Jørgensen
2023-06-13 21:17 ` Stanislav Fomichev
2023-06-13 22:32 ` Alexei Starovoitov
2023-06-13 23:16 ` Stanislav Fomichev
2023-06-14 4:19 ` Alexei Starovoitov
2023-06-14 11:59 ` Toke Høiland-Jørgensen
2023-06-14 16:27 ` Alexei Starovoitov
2023-06-15 12:36 ` Toke Høiland-Jørgensen
2023-06-15 16:10 ` Alexei Starovoitov
2023-06-15 16:31 ` Stanislav Fomichev
2023-06-16 1:50 ` Jakub Kicinski
2023-06-16 0:09 ` Stanislav Fomichev
2023-06-16 8:12 ` Magnus Karlsson
2023-06-16 17:32 ` Stanislav Fomichev
2023-06-16 23:10 ` Stanislav Fomichev
2023-06-19 7:15 ` Magnus Karlsson
2023-06-14 3:31 ` Jakub Kicinski
2023-06-14 3:54 ` David Ahern
2023-06-14 5:05 ` Jakub Kicinski
2023-06-14 17:17 ` Stanislav Fomichev
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230612172307.3923165-6-sdf@google.com \
--to=sdf@google.com \
--cc=andrii@kernel.org \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=haoluo@google.com \
--cc=john.fastabend@gmail.com \
--cc=jolsa@kernel.org \
--cc=kpsingh@kernel.org \
--cc=martin.lau@linux.dev \
--cc=netdev@vger.kernel.org \
--cc=song@kernel.org \
--cc=yhs@fb.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox