* [PATCH net-next v3] idpf: export RX hardware timestamping information to XDP
@ 2025-12-19 20:29 Mina Almasry
2025-12-20 5:59 ` [Intel-wired-lan] " Paul Menzel
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Mina Almasry @ 2025-12-19 20:29 UTC (permalink / raw)
To: netdev, bpf, linux-kernel
Cc: YiFei Zhu, Alexei Starovoitov, Daniel Borkmann, David S. Miller,
Jakub Kicinski, Jesper Dangaard Brouer, John Fastabend,
Stanislav Fomichev, Tony Nguyen, Przemek Kitszel, Andrew Lunn,
Eric Dumazet, Paolo Abeni, Alexander Lobakin, Richard Cochran,
intel-wired-lan, Mina Almasry, Aleksandr Loktionov
From: YiFei Zhu <zhuyifei@google.com>
The logic is similar to idpf_rx_hwtstamp, but the data is exported
as a BPF kfunc instead of appended to an skb.
A idpf_queue_has(PTP, rxq) condition is added to check the queue
supports PTP similar to idpf_rx_process_skb_fields.
Cc: intel-wired-lan@lists.osuosl.org
Signed-off-by: YiFei Zhu <zhuyifei@google.com>
Signed-off-by: Mina Almasry <almasrymina@google.com>
Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
---
v3: https://lore.kernel.org/netdev/20251218022948.3288897-1-almasrymina@google.com/
- Do the idpf_queue_has(PTP) check before we read qw1 (lobakin)
- Fix _qw1 not copying over ts_low on on !__LIBETH_WORD_ACCESS systems
(AI)
v2: https://lore.kernel.org/netdev/20251122140839.3922015-1-almasrymina@google.com/
- Fixed alphabetical ordering
- Use the xdp desc type instead of virtchnl one (required some added
helpers)
---
drivers/net/ethernet/intel/idpf/xdp.c | 31 +++++++++++++++++++++++++++
drivers/net/ethernet/intel/idpf/xdp.h | 22 ++++++++++++++++++-
2 files changed, 52 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
index 958d16f87424..0916d201bf98 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.c
+++ b/drivers/net/ethernet/intel/idpf/xdp.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2025 Intel Corporation */
#include "idpf.h"
+#include "idpf_ptp.h"
#include "idpf_virtchnl.h"
#include "xdp.h"
#include "xsk.h"
@@ -391,8 +392,38 @@ static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
pt);
}
+static int idpf_xdpmo_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
+{
+ const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
+ struct idpf_xdp_rx_desc desc __uninitialized;
+ const struct idpf_rx_queue *rxq;
+ u64 cached_time, ts_ns;
+ u32 ts_high;
+
+ rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
+
+ if (!idpf_queue_has(PTP, rxq))
+ return -ENODATA;
+
+ idpf_xdp_get_qw1(&desc, xdp->desc);
+
+ if (!(idpf_xdp_rx_ts_low(&desc) & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
+ return -ENODATA;
+
+ cached_time = READ_ONCE(rxq->cached_phc_time);
+
+ idpf_xdp_get_qw3(&desc, xdp->desc);
+
+ ts_high = idpf_xdp_rx_ts_high(&desc);
+ ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
+
+ *timestamp = ts_ns;
+ return 0;
+}
+
static const struct xdp_metadata_ops idpf_xdpmo = {
.xmo_rx_hash = idpf_xdpmo_rx_hash,
+ .xmo_rx_timestamp = idpf_xdpmo_rx_timestamp,
};
void idpf_xdp_set_features(const struct idpf_vport *vport)
diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
index 479f5ef3c604..9daae445bde4 100644
--- a/drivers/net/ethernet/intel/idpf/xdp.h
+++ b/drivers/net/ethernet/intel/idpf/xdp.h
@@ -112,11 +112,13 @@ struct idpf_xdp_rx_desc {
aligned_u64 qw1;
#define IDPF_XDP_RX_BUF GENMASK_ULL(47, 32)
#define IDPF_XDP_RX_EOP BIT_ULL(1)
+#define IDPF_XDP_RX_TS_LOW GENMASK_ULL(31, 24)
aligned_u64 qw2;
#define IDPF_XDP_RX_HASH GENMASK_ULL(31, 0)
aligned_u64 qw3;
+#define IDPF_XDP_RX_TS_HIGH GENMASK_ULL(63, 32)
} __aligned(4 * sizeof(u64));
static_assert(sizeof(struct idpf_xdp_rx_desc) ==
sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
@@ -128,6 +130,8 @@ static_assert(sizeof(struct idpf_xdp_rx_desc) ==
#define idpf_xdp_rx_buf(desc) FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
#define idpf_xdp_rx_eop(desc) !!((desc)->qw1 & IDPF_XDP_RX_EOP)
#define idpf_xdp_rx_hash(desc) FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
+#define idpf_xdp_rx_ts_low(desc) FIELD_GET(IDPF_XDP_RX_TS_LOW, (desc)->qw1)
+#define idpf_xdp_rx_ts_high(desc) FIELD_GET(IDPF_XDP_RX_TS_HIGH, (desc)->qw3)
static inline void
idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
@@ -149,7 +153,10 @@ idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
desc->qw1 = ((const typeof(desc))rxd)->qw1;
#else
desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
- rxd->status_err0_qw1;
+ ((u64)rxd->ts_low << 24) |
+ ((u64)rxd->fflags1 << 16) |
+ ((u64)rxd->status_err1 << 8) |
+ rxd->status_err0_qw1;
#endif
}
@@ -166,6 +173,19 @@ idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
#endif
}
+static inline void
+idpf_xdp_get_qw3(struct idpf_xdp_rx_desc *desc,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
+{
+#ifdef __LIBETH_WORD_ACCESS
+ desc->qw3 = ((const typeof(desc))rxd)->qw3;
+#else
+ desc->qw3 = ((u64)le32_to_cpu(rxd->ts_high) << 32) |
+ ((u64)le16_to_cpu(rxd->fmd6) << 16) |
+ le16_to_cpu(rxd->l2tag1);
+#endif
+}
+
void idpf_xdp_set_features(const struct idpf_vport *vport);
int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
base-commit: 7b8e9264f55a9c320f398e337d215e68cca50131
--
2.52.0.322.g1dd061c0dc-goog
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [Intel-wired-lan] [PATCH net-next v3] idpf: export RX hardware timestamping information to XDP
2025-12-19 20:29 [PATCH net-next v3] idpf: export RX hardware timestamping information to XDP Mina Almasry
@ 2025-12-20 5:59 ` Paul Menzel
2025-12-22 11:48 ` Paolo Abeni
2025-12-22 12:54 ` Alexander Lobakin
2 siblings, 0 replies; 6+ messages in thread
From: Paul Menzel @ 2025-12-20 5:59 UTC (permalink / raw)
To: Mina Almasry
Cc: netdev, bpf, linux-kernel, YiFei Zhu, Alexei Starovoitov,
Daniel Borkmann, David S. Miller, Jakub Kicinski,
Jesper Dangaard Brouer, John Fastabend, Stanislav Fomichev,
Tony Nguyen, Przemek Kitszel, Andrew Lunn, Eric Dumazet,
Paolo Abeni, Alexander Lobakin, Richard Cochran, intel-wired-lan,
Aleksandr Loktionov
Dear Mina,
Thank you for the patch.
Am 19.12.25 um 21:29 schrieb Mina Almasry via Intel-wired-lan:
> From: YiFei Zhu <zhuyifei@google.com>
>
> The logic is similar to idpf_rx_hwtstamp, but the data is exported
> as a BPF kfunc instead of appended to an skb.
Could you add the reason, why it’s done this way?
> A idpf_queue_has(PTP, rxq) condition is added to check the queue
> supports PTP similar to idpf_rx_process_skb_fields.
It’d be great if you added test information.
> Cc: intel-wired-lan@lists.osuosl.org
>
Remove the blank line.
> Signed-off-by: YiFei Zhu <zhuyifei@google.com>
> Signed-off-by: Mina Almasry <almasrymina@google.com>
> Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
>
> ---
>
> v3: https://lore.kernel.org/netdev/20251218022948.3288897-1-almasrymina@google.com/
> - Do the idpf_queue_has(PTP) check before we read qw1 (lobakin)
> - Fix _qw1 not copying over ts_low on on !__LIBETH_WORD_ACCESS systems
> (AI)
>
> v2: https://lore.kernel.org/netdev/20251122140839.3922015-1-almasrymina@google.com/
> - Fixed alphabetical ordering
> - Use the xdp desc type instead of virtchnl one (required some added
> helpers)
>
> ---
> drivers/net/ethernet/intel/idpf/xdp.c | 31 +++++++++++++++++++++++++++
> drivers/net/ethernet/intel/idpf/xdp.h | 22 ++++++++++++++++++-
> 2 files changed, 52 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
> index 958d16f87424..0916d201bf98 100644
> --- a/drivers/net/ethernet/intel/idpf/xdp.c
> +++ b/drivers/net/ethernet/intel/idpf/xdp.c
> @@ -2,6 +2,7 @@
> /* Copyright (C) 2025 Intel Corporation */
>
> #include "idpf.h"
> +#include "idpf_ptp.h"
> #include "idpf_virtchnl.h"
> #include "xdp.h"
> #include "xsk.h"
> @@ -391,8 +392,38 @@ static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
> pt);
> }
>
> +static int idpf_xdpmo_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
> +{
> + const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
> + struct idpf_xdp_rx_desc desc __uninitialized;
> + const struct idpf_rx_queue *rxq;
> + u64 cached_time, ts_ns;
> + u32 ts_high;
> +
> + rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
> +
> + if (!idpf_queue_has(PTP, rxq))
> + return -ENODATA;
> +
> + idpf_xdp_get_qw1(&desc, xdp->desc);
> +
> + if (!(idpf_xdp_rx_ts_low(&desc) & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
> + return -ENODATA;
> +
> + cached_time = READ_ONCE(rxq->cached_phc_time);
> +
> + idpf_xdp_get_qw3(&desc, xdp->desc);
> +
> + ts_high = idpf_xdp_rx_ts_high(&desc);
> + ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
> +
> + *timestamp = ts_ns;
> + return 0;
> +}
> +
> static const struct xdp_metadata_ops idpf_xdpmo = {
> .xmo_rx_hash = idpf_xdpmo_rx_hash,
> + .xmo_rx_timestamp = idpf_xdpmo_rx_timestamp,
> };
>
> void idpf_xdp_set_features(const struct idpf_vport *vport)
> diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
> index 479f5ef3c604..9daae445bde4 100644
> --- a/drivers/net/ethernet/intel/idpf/xdp.h
> +++ b/drivers/net/ethernet/intel/idpf/xdp.h
> @@ -112,11 +112,13 @@ struct idpf_xdp_rx_desc {
> aligned_u64 qw1;
> #define IDPF_XDP_RX_BUF GENMASK_ULL(47, 32)
> #define IDPF_XDP_RX_EOP BIT_ULL(1)
> +#define IDPF_XDP_RX_TS_LOW GENMASK_ULL(31, 24)
>
> aligned_u64 qw2;
> #define IDPF_XDP_RX_HASH GENMASK_ULL(31, 0)
>
> aligned_u64 qw3;
> +#define IDPF_XDP_RX_TS_HIGH GENMASK_ULL(63, 32)
> } __aligned(4 * sizeof(u64));
> static_assert(sizeof(struct idpf_xdp_rx_desc) ==
> sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
> @@ -128,6 +130,8 @@ static_assert(sizeof(struct idpf_xdp_rx_desc) ==
> #define idpf_xdp_rx_buf(desc) FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
> #define idpf_xdp_rx_eop(desc) !!((desc)->qw1 & IDPF_XDP_RX_EOP)
> #define idpf_xdp_rx_hash(desc) FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
> +#define idpf_xdp_rx_ts_low(desc) FIELD_GET(IDPF_XDP_RX_TS_LOW, (desc)->qw1)
> +#define idpf_xdp_rx_ts_high(desc) FIELD_GET(IDPF_XDP_RX_TS_HIGH, (desc)->qw3)
>
> static inline void
> idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
> @@ -149,7 +153,10 @@ idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
> desc->qw1 = ((const typeof(desc))rxd)->qw1;
> #else
> desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
> - rxd->status_err0_qw1;
> + ((u64)rxd->ts_low << 24) |
> + ((u64)rxd->fflags1 << 16) |
> + ((u64)rxd->status_err1 << 8) |
> + rxd->status_err0_qw1;
> #endif
> }
>
> @@ -166,6 +173,19 @@ idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
> #endif
> }
>
> +static inline void
> +idpf_xdp_get_qw3(struct idpf_xdp_rx_desc *desc,
> + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
> +{
> +#ifdef __LIBETH_WORD_ACCESS
> + desc->qw3 = ((const typeof(desc))rxd)->qw3;
> +#else
> + desc->qw3 = ((u64)le32_to_cpu(rxd->ts_high) << 32) |
> + ((u64)le16_to_cpu(rxd->fmd6) << 16) |
> + le16_to_cpu(rxd->l2tag1);
> +#endif
> +}
> +
> void idpf_xdp_set_features(const struct idpf_vport *vport);
>
> int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
The diff looks fine.
Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
Kind regards,
Paul
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH net-next v3] idpf: export RX hardware timestamping information to XDP
2025-12-19 20:29 [PATCH net-next v3] idpf: export RX hardware timestamping information to XDP Mina Almasry
2025-12-20 5:59 ` [Intel-wired-lan] " Paul Menzel
@ 2025-12-22 11:48 ` Paolo Abeni
2025-12-22 12:54 ` Alexander Lobakin
2 siblings, 0 replies; 6+ messages in thread
From: Paolo Abeni @ 2025-12-22 11:48 UTC (permalink / raw)
To: Mina Almasry, netdev, bpf, linux-kernel
Cc: YiFei Zhu, Alexei Starovoitov, Daniel Borkmann, David S. Miller,
Jakub Kicinski, Jesper Dangaard Brouer, John Fastabend,
Stanislav Fomichev, Tony Nguyen, Przemek Kitszel, Andrew Lunn,
Eric Dumazet, Alexander Lobakin, Richard Cochran, intel-wired-lan,
Aleksandr Loktionov
On 12/19/25 9:29 PM, Mina Almasry wrote:
> From: YiFei Zhu <zhuyifei@google.com>
>
> The logic is similar to idpf_rx_hwtstamp, but the data is exported
> as a BPF kfunc instead of appended to an skb.
>
> A idpf_queue_has(PTP, rxq) condition is added to check the queue
> supports PTP similar to idpf_rx_process_skb_fields.
>
> Cc: intel-wired-lan@lists.osuosl.org
>
> Signed-off-by: YiFei Zhu <zhuyifei@google.com>
> Signed-off-by: Mina Almasry <almasrymina@google.com>
> Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
@YiFei and Mina: I believe this patch should go first via the intel
tree: please replace the 'net-next' tag prefix with 'iwl-next' on later
revision, if any.
Thanks,
Paolo
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH net-next v3] idpf: export RX hardware timestamping information to XDP
2025-12-19 20:29 [PATCH net-next v3] idpf: export RX hardware timestamping information to XDP Mina Almasry
2025-12-20 5:59 ` [Intel-wired-lan] " Paul Menzel
2025-12-22 11:48 ` Paolo Abeni
@ 2025-12-22 12:54 ` Alexander Lobakin
2025-12-22 23:00 ` Mina Almasry
2 siblings, 1 reply; 6+ messages in thread
From: Alexander Lobakin @ 2025-12-22 12:54 UTC (permalink / raw)
To: Mina Almasry
Cc: netdev, bpf, linux-kernel, YiFei Zhu, Alexei Starovoitov,
Daniel Borkmann, David S. Miller, Jakub Kicinski,
Jesper Dangaard Brouer, John Fastabend, Stanislav Fomichev,
Tony Nguyen, Przemek Kitszel, Andrew Lunn, Eric Dumazet,
Paolo Abeni, Richard Cochran, intel-wired-lan,
Aleksandr Loktionov
From: Mina Almasry <almasrymina@google.com>
Date: Fri, 19 Dec 2025 20:29:54 +0000
> From: YiFei Zhu <zhuyifei@google.com>
>
> The logic is similar to idpf_rx_hwtstamp, but the data is exported
> as a BPF kfunc instead of appended to an skb.
>
> A idpf_queue_has(PTP, rxq) condition is added to check the queue
> supports PTP similar to idpf_rx_process_skb_fields.
>
> Cc: intel-wired-lan@lists.osuosl.org
>
> Signed-off-by: YiFei Zhu <zhuyifei@google.com>
> Signed-off-by: Mina Almasry <almasrymina@google.com>
> Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
>
> ---
>
> v3: https://lore.kernel.org/netdev/20251218022948.3288897-1-almasrymina@google.com/
> - Do the idpf_queue_has(PTP) check before we read qw1 (lobakin)
> - Fix _qw1 not copying over ts_low on on !__LIBETH_WORD_ACCESS systems
> (AI)
>
> v2: https://lore.kernel.org/netdev/20251122140839.3922015-1-almasrymina@google.com/
> - Fixed alphabetical ordering
> - Use the xdp desc type instead of virtchnl one (required some added
> helpers)
>
> ---
> drivers/net/ethernet/intel/idpf/xdp.c | 31 +++++++++++++++++++++++++++
> drivers/net/ethernet/intel/idpf/xdp.h | 22 ++++++++++++++++++-
> 2 files changed, 52 insertions(+), 1 deletion(-)
>
> diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
> index 958d16f87424..0916d201bf98 100644
> --- a/drivers/net/ethernet/intel/idpf/xdp.c
> +++ b/drivers/net/ethernet/intel/idpf/xdp.c
> @@ -2,6 +2,7 @@
> /* Copyright (C) 2025 Intel Corporation */
>
> #include "idpf.h"
> +#include "idpf_ptp.h"
> #include "idpf_virtchnl.h"
> #include "xdp.h"
> #include "xsk.h"
> @@ -391,8 +392,38 @@ static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
> pt);
> }
>
> +static int idpf_xdpmo_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
> +{
> + const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
> + struct idpf_xdp_rx_desc desc __uninitialized;
> + const struct idpf_rx_queue *rxq;
> + u64 cached_time, ts_ns;
> + u32 ts_high;
> +
> + rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
> +
> + if (!idpf_queue_has(PTP, rxq))
> + return -ENODATA;
> +
> + idpf_xdp_get_qw1(&desc, xdp->desc);
> +
> + if (!(idpf_xdp_rx_ts_low(&desc) & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
> + return -ENODATA;
> +
> + cached_time = READ_ONCE(rxq->cached_phc_time);
> +
> + idpf_xdp_get_qw3(&desc, xdp->desc);
> +
> + ts_high = idpf_xdp_rx_ts_high(&desc);
> + ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
> +
> + *timestamp = ts_ns;
> + return 0;
> +}
> +
> static const struct xdp_metadata_ops idpf_xdpmo = {
> .xmo_rx_hash = idpf_xdpmo_rx_hash,
> + .xmo_rx_timestamp = idpf_xdpmo_rx_timestamp,
> };
>
> void idpf_xdp_set_features(const struct idpf_vport *vport)
> diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
> index 479f5ef3c604..9daae445bde4 100644
> --- a/drivers/net/ethernet/intel/idpf/xdp.h
> +++ b/drivers/net/ethernet/intel/idpf/xdp.h
> @@ -112,11 +112,13 @@ struct idpf_xdp_rx_desc {
> aligned_u64 qw1;
> #define IDPF_XDP_RX_BUF GENMASK_ULL(47, 32)
> #define IDPF_XDP_RX_EOP BIT_ULL(1)
> +#define IDPF_XDP_RX_TS_LOW GENMASK_ULL(31, 24)
>
> aligned_u64 qw2;
> #define IDPF_XDP_RX_HASH GENMASK_ULL(31, 0)
>
> aligned_u64 qw3;
> +#define IDPF_XDP_RX_TS_HIGH GENMASK_ULL(63, 32)
> } __aligned(4 * sizeof(u64));
> static_assert(sizeof(struct idpf_xdp_rx_desc) ==
> sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
> @@ -128,6 +130,8 @@ static_assert(sizeof(struct idpf_xdp_rx_desc) ==
> #define idpf_xdp_rx_buf(desc) FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
> #define idpf_xdp_rx_eop(desc) !!((desc)->qw1 & IDPF_XDP_RX_EOP)
> #define idpf_xdp_rx_hash(desc) FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
> +#define idpf_xdp_rx_ts_low(desc) FIELD_GET(IDPF_XDP_RX_TS_LOW, (desc)->qw1)
> +#define idpf_xdp_rx_ts_high(desc) FIELD_GET(IDPF_XDP_RX_TS_HIGH, (desc)->qw3)
>
> static inline void
> idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
> @@ -149,7 +153,10 @@ idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
> desc->qw1 = ((const typeof(desc))rxd)->qw1;
> #else
> desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
> - rxd->status_err0_qw1;
> + ((u64)rxd->ts_low << 24) |
> + ((u64)rxd->fflags1 << 16) |
> + ((u64)rxd->status_err1 << 8) |
I'm not sure you need casts to u64 here. Pls rebuild without them and
check the objdiff / compiler warnings.
It's required for buf_id as we shift by 32.
> + rxd->status_err0_qw1;
Why did you replace the proper indentation with two tabs in all 4 lines
above?
Even though...
> #endif
> }
>
> @@ -166,6 +173,19 @@ idpf_xdp_get_qw2(struct idpf_xdp_rx_desc *desc,
> #endif
> }
>
> +static inline void
> +idpf_xdp_get_qw3(struct idpf_xdp_rx_desc *desc,
> + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rxd)
> +{
> +#ifdef __LIBETH_WORD_ACCESS
> + desc->qw3 = ((const typeof(desc))rxd)->qw3;
> +#else
> + desc->qw3 = ((u64)le32_to_cpu(rxd->ts_high) << 32) |
> + ((u64)le16_to_cpu(rxd->fmd6) << 16) |
> + le16_to_cpu(rxd->l2tag1);
...here you use the correct one.
> +#endif
> +}
> +
> void idpf_xdp_set_features(const struct idpf_vport *vport);
>
> int idpf_xdp(struct net_device *dev, struct netdev_bpf *xdp);
>
> base-commit: 7b8e9264f55a9c320f398e337d215e68cca50131
Thanks,
Olek
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH net-next v3] idpf: export RX hardware timestamping information to XDP
2025-12-22 12:54 ` Alexander Lobakin
@ 2025-12-22 23:00 ` Mina Almasry
2025-12-23 7:03 ` Loktionov, Aleksandr
0 siblings, 1 reply; 6+ messages in thread
From: Mina Almasry @ 2025-12-22 23:00 UTC (permalink / raw)
To: Alexander Lobakin
Cc: netdev, bpf, linux-kernel, YiFei Zhu, Alexei Starovoitov,
Daniel Borkmann, David S. Miller, Jakub Kicinski,
Jesper Dangaard Brouer, John Fastabend, Stanislav Fomichev,
Tony Nguyen, Przemek Kitszel, Andrew Lunn, Eric Dumazet,
Paolo Abeni, Richard Cochran, intel-wired-lan,
Aleksandr Loktionov
On Mon, Dec 22, 2025 at 4:55 AM Alexander Lobakin
<aleksander.lobakin@intel.com> wrote:
>
> From: Mina Almasry <almasrymina@google.com>
> Date: Fri, 19 Dec 2025 20:29:54 +0000
>
> > From: YiFei Zhu <zhuyifei@google.com>
> >
> > The logic is similar to idpf_rx_hwtstamp, but the data is exported
> > as a BPF kfunc instead of appended to an skb.
> >
> > A idpf_queue_has(PTP, rxq) condition is added to check the queue
> > supports PTP similar to idpf_rx_process_skb_fields.
> >
> > Cc: intel-wired-lan@lists.osuosl.org
> >
> > Signed-off-by: YiFei Zhu <zhuyifei@google.com>
> > Signed-off-by: Mina Almasry <almasrymina@google.com>
> > Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
> >
> > ---
> >
> > v3: https://lore.kernel.org/netdev/20251218022948.3288897-1-almasrymina@google.com/
> > - Do the idpf_queue_has(PTP) check before we read qw1 (lobakin)
> > - Fix _qw1 not copying over ts_low on on !__LIBETH_WORD_ACCESS systems
> > (AI)
> >
> > v2: https://lore.kernel.org/netdev/20251122140839.3922015-1-almasrymina@google.com/
> > - Fixed alphabetical ordering
> > - Use the xdp desc type instead of virtchnl one (required some added
> > helpers)
> >
> > ---
> > drivers/net/ethernet/intel/idpf/xdp.c | 31 +++++++++++++++++++++++++++
> > drivers/net/ethernet/intel/idpf/xdp.h | 22 ++++++++++++++++++-
> > 2 files changed, 52 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c
> > index 958d16f87424..0916d201bf98 100644
> > --- a/drivers/net/ethernet/intel/idpf/xdp.c
> > +++ b/drivers/net/ethernet/intel/idpf/xdp.c
> > @@ -2,6 +2,7 @@
> > /* Copyright (C) 2025 Intel Corporation */
> >
> > #include "idpf.h"
> > +#include "idpf_ptp.h"
> > #include "idpf_virtchnl.h"
> > #include "xdp.h"
> > #include "xsk.h"
> > @@ -391,8 +392,38 @@ static int idpf_xdpmo_rx_hash(const struct xdp_md *ctx, u32 *hash,
> > pt);
> > }
> >
> > +static int idpf_xdpmo_rx_timestamp(const struct xdp_md *ctx, u64 *timestamp)
> > +{
> > + const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
> > + struct idpf_xdp_rx_desc desc __uninitialized;
> > + const struct idpf_rx_queue *rxq;
> > + u64 cached_time, ts_ns;
> > + u32 ts_high;
> > +
> > + rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
> > +
> > + if (!idpf_queue_has(PTP, rxq))
> > + return -ENODATA;
> > +
> > + idpf_xdp_get_qw1(&desc, xdp->desc);
> > +
> > + if (!(idpf_xdp_rx_ts_low(&desc) & VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
> > + return -ENODATA;
> > +
> > + cached_time = READ_ONCE(rxq->cached_phc_time);
> > +
> > + idpf_xdp_get_qw3(&desc, xdp->desc);
> > +
> > + ts_high = idpf_xdp_rx_ts_high(&desc);
> > + ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time, ts_high);
> > +
> > + *timestamp = ts_ns;
> > + return 0;
> > +}
> > +
> > static const struct xdp_metadata_ops idpf_xdpmo = {
> > .xmo_rx_hash = idpf_xdpmo_rx_hash,
> > + .xmo_rx_timestamp = idpf_xdpmo_rx_timestamp,
> > };
> >
> > void idpf_xdp_set_features(const struct idpf_vport *vport)
> > diff --git a/drivers/net/ethernet/intel/idpf/xdp.h b/drivers/net/ethernet/intel/idpf/xdp.h
> > index 479f5ef3c604..9daae445bde4 100644
> > --- a/drivers/net/ethernet/intel/idpf/xdp.h
> > +++ b/drivers/net/ethernet/intel/idpf/xdp.h
> > @@ -112,11 +112,13 @@ struct idpf_xdp_rx_desc {
> > aligned_u64 qw1;
> > #define IDPF_XDP_RX_BUF GENMASK_ULL(47, 32)
> > #define IDPF_XDP_RX_EOP BIT_ULL(1)
> > +#define IDPF_XDP_RX_TS_LOW GENMASK_ULL(31, 24)
> >
> > aligned_u64 qw2;
> > #define IDPF_XDP_RX_HASH GENMASK_ULL(31, 0)
> >
> > aligned_u64 qw3;
> > +#define IDPF_XDP_RX_TS_HIGH GENMASK_ULL(63, 32)
> > } __aligned(4 * sizeof(u64));
> > static_assert(sizeof(struct idpf_xdp_rx_desc) ==
> > sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
> > @@ -128,6 +130,8 @@ static_assert(sizeof(struct idpf_xdp_rx_desc) ==
> > #define idpf_xdp_rx_buf(desc) FIELD_GET(IDPF_XDP_RX_BUF, (desc)->qw1)
> > #define idpf_xdp_rx_eop(desc) !!((desc)->qw1 & IDPF_XDP_RX_EOP)
> > #define idpf_xdp_rx_hash(desc) FIELD_GET(IDPF_XDP_RX_HASH, (desc)->qw2)
> > +#define idpf_xdp_rx_ts_low(desc) FIELD_GET(IDPF_XDP_RX_TS_LOW, (desc)->qw1)
> > +#define idpf_xdp_rx_ts_high(desc) FIELD_GET(IDPF_XDP_RX_TS_HIGH, (desc)->qw3)
> >
> > static inline void
> > idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc,
> > @@ -149,7 +153,10 @@ idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
> > desc->qw1 = ((const typeof(desc))rxd)->qw1;
> > #else
> > desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
> > - rxd->status_err0_qw1;
> > + ((u64)rxd->ts_low << 24) |
> > + ((u64)rxd->fflags1 << 16) |
> > + ((u64)rxd->status_err1 << 8) |
>
> I'm not sure you need casts to u64 here. Pls rebuild without them and
> check the objdiff / compiler warnings.
> It's required for buf_id as we shift by 32.
>
The compiler does not warn if I drop the u64 casts, but are you sure
you want them dropped? You're already doing u64 casts in all the
entries that you bit-shift in qw0 and qw2. It makes the code clearer
imo. But up to you.
> > + rxd->status_err0_qw1;
>
> Why did you replace the proper indentation with two tabs in all 4 lines
> above?
>
Sure, will fix.
--
Thanks,
Mina
^ permalink raw reply [flat|nested] 6+ messages in thread
* RE: [PATCH net-next v3] idpf: export RX hardware timestamping information to XDP
2025-12-22 23:00 ` Mina Almasry
@ 2025-12-23 7:03 ` Loktionov, Aleksandr
0 siblings, 0 replies; 6+ messages in thread
From: Loktionov, Aleksandr @ 2025-12-23 7:03 UTC (permalink / raw)
To: Mina Almasry, Lobakin, Aleksander
Cc: netdev@vger.kernel.org, bpf@vger.kernel.org,
linux-kernel@vger.kernel.org, YiFei Zhu, Alexei Starovoitov,
Daniel Borkmann, David S. Miller, Jakub Kicinski,
Jesper Dangaard Brouer, John Fastabend, Stanislav Fomichev,
Nguyen, Anthony L, Kitszel, Przemyslaw, Andrew Lunn, Eric Dumazet,
Paolo Abeni, Richard Cochran, intel-wired-lan@lists.osuosl.org
> -----Original Message-----
> From: Mina Almasry <almasrymina@google.com>
> Sent: Tuesday, December 23, 2025 12:00 AM
> To: Lobakin, Aleksander <aleksander.lobakin@intel.com>
> Cc: netdev@vger.kernel.org; bpf@vger.kernel.org; linux-
> kernel@vger.kernel.org; YiFei Zhu <zhuyifei@google.com>; Alexei
> Starovoitov <ast@kernel.org>; Daniel Borkmann <daniel@iogearbox.net>;
> David S. Miller <davem@davemloft.net>; Jakub Kicinski
> <kuba@kernel.org>; Jesper Dangaard Brouer <hawk@kernel.org>; John
> Fastabend <john.fastabend@gmail.com>; Stanislav Fomichev
> <sdf@fomichev.me>; Nguyen, Anthony L <anthony.l.nguyen@intel.com>;
> Kitszel, Przemyslaw <przemyslaw.kitszel@intel.com>; Andrew Lunn
> <andrew+netdev@lunn.ch>; Eric Dumazet <edumazet@google.com>; Paolo
> Abeni <pabeni@redhat.com>; Richard Cochran <richardcochran@gmail.com>;
> intel-wired-lan@lists.osuosl.org; Loktionov, Aleksandr
> <aleksandr.loktionov@intel.com>
> Subject: Re: [PATCH net-next v3] idpf: export RX hardware timestamping
> information to XDP
>
> On Mon, Dec 22, 2025 at 4:55 AM Alexander Lobakin
> <aleksander.lobakin@intel.com> wrote:
> >
> > From: Mina Almasry <almasrymina@google.com>
> > Date: Fri, 19 Dec 2025 20:29:54 +0000
> >
> > > From: YiFei Zhu <zhuyifei@google.com>
> > >
> > > The logic is similar to idpf_rx_hwtstamp, but the data is exported
> > > as a BPF kfunc instead of appended to an skb.
> > >
> > > A idpf_queue_has(PTP, rxq) condition is added to check the queue
> > > supports PTP similar to idpf_rx_process_skb_fields.
> > >
> > > Cc: intel-wired-lan@lists.osuosl.org
> > >
> > > Signed-off-by: YiFei Zhu <zhuyifei@google.com>
> > > Signed-off-by: Mina Almasry <almasrymina@google.com>
> > > Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
> > >
> > > ---
> > >
> > > v3:
> > > https://lore.kernel.org/netdev/20251218022948.3288897-1-
> almasrymina@
> > > google.com/
> > > - Do the idpf_queue_has(PTP) check before we read qw1 (lobakin)
> > > - Fix _qw1 not copying over ts_low on on !__LIBETH_WORD_ACCESS
> systems
> > > (AI)
> > >
> > > v2:
> > > https://lore.kernel.org/netdev/20251122140839.3922015-1-
> almasrymina@
> > > google.com/
> > > - Fixed alphabetical ordering
> > > - Use the xdp desc type instead of virtchnl one (required some
> added
> > > helpers)
> > >
> > > ---
> > > drivers/net/ethernet/intel/idpf/xdp.c | 31
> > > +++++++++++++++++++++++++++ drivers/net/ethernet/intel/idpf/xdp.h
> |
> > > 22 ++++++++++++++++++-
> > > 2 files changed, 52 insertions(+), 1 deletion(-)
> > >
> > > diff --git a/drivers/net/ethernet/intel/idpf/xdp.c
> > > b/drivers/net/ethernet/intel/idpf/xdp.c
> > > index 958d16f87424..0916d201bf98 100644
> > > --- a/drivers/net/ethernet/intel/idpf/xdp.c
> > > +++ b/drivers/net/ethernet/intel/idpf/xdp.c
> > > @@ -2,6 +2,7 @@
> > > /* Copyright (C) 2025 Intel Corporation */
> > >
> > > #include "idpf.h"
> > > +#include "idpf_ptp.h"
> > > #include "idpf_virtchnl.h"
> > > #include "xdp.h"
> > > #include "xsk.h"
> > > @@ -391,8 +392,38 @@ static int idpf_xdpmo_rx_hash(const struct
> xdp_md *ctx, u32 *hash,
> > > pt); }
> > >
> > > +static int idpf_xdpmo_rx_timestamp(const struct xdp_md *ctx, u64
> > > +*timestamp) {
> > > + const struct libeth_xdp_buff *xdp = (typeof(xdp))ctx;
> > > + struct idpf_xdp_rx_desc desc __uninitialized;
> > > + const struct idpf_rx_queue *rxq;
> > > + u64 cached_time, ts_ns;
> > > + u32 ts_high;
> > > +
> > > + rxq = libeth_xdp_buff_to_rq(xdp, typeof(*rxq), xdp_rxq);
> > > +
> > > + if (!idpf_queue_has(PTP, rxq))
> > > + return -ENODATA;
> > > +
> > > + idpf_xdp_get_qw1(&desc, xdp->desc);
> > > +
> > > + if (!(idpf_xdp_rx_ts_low(&desc) &
> VIRTCHNL2_RX_FLEX_TSTAMP_VALID))
> > > + return -ENODATA;
> > > +
> > > + cached_time = READ_ONCE(rxq->cached_phc_time);
> > > +
> > > + idpf_xdp_get_qw3(&desc, xdp->desc);
> > > +
> > > + ts_high = idpf_xdp_rx_ts_high(&desc);
> > > + ts_ns = idpf_ptp_tstamp_extend_32b_to_64b(cached_time,
> > > + ts_high);
> > > +
> > > + *timestamp = ts_ns;
> > > + return 0;
> > > +}
> > > +
> > > static const struct xdp_metadata_ops idpf_xdpmo = {
> > > .xmo_rx_hash = idpf_xdpmo_rx_hash,
> > > + .xmo_rx_timestamp = idpf_xdpmo_rx_timestamp,
> > > };
> > >
> > > void idpf_xdp_set_features(const struct idpf_vport *vport) diff
> > > --git a/drivers/net/ethernet/intel/idpf/xdp.h
> > > b/drivers/net/ethernet/intel/idpf/xdp.h
> > > index 479f5ef3c604..9daae445bde4 100644
> > > --- a/drivers/net/ethernet/intel/idpf/xdp.h
> > > +++ b/drivers/net/ethernet/intel/idpf/xdp.h
> > > @@ -112,11 +112,13 @@ struct idpf_xdp_rx_desc {
> > > aligned_u64 qw1;
> > > #define IDPF_XDP_RX_BUF GENMASK_ULL(47, 32)
> > > #define IDPF_XDP_RX_EOP BIT_ULL(1)
> > > +#define IDPF_XDP_RX_TS_LOW GENMASK_ULL(31, 24)
> > >
> > > aligned_u64 qw2;
> > > #define IDPF_XDP_RX_HASH GENMASK_ULL(31, 0)
> > >
> > > aligned_u64 qw3;
> > > +#define IDPF_XDP_RX_TS_HIGH GENMASK_ULL(63, 32)
> > > } __aligned(4 * sizeof(u64));
> > > static_assert(sizeof(struct idpf_xdp_rx_desc) ==
> > > sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3));
> > > @@ -128,6 +130,8 @@ static_assert(sizeof(struct idpf_xdp_rx_desc)
> ==
> > > #define idpf_xdp_rx_buf(desc) FIELD_GET(IDPF_XDP_RX_BUF,
> (desc)->qw1)
> > > #define idpf_xdp_rx_eop(desc) !!((desc)->qw1 &
> IDPF_XDP_RX_EOP)
> > > #define idpf_xdp_rx_hash(desc) FIELD_GET(IDPF_XDP_RX_HASH,
> (desc)->qw2)
> > > +#define idpf_xdp_rx_ts_low(desc)
> FIELD_GET(IDPF_XDP_RX_TS_LOW, (desc)->qw1)
> > > +#define idpf_xdp_rx_ts_high(desc)
> FIELD_GET(IDPF_XDP_RX_TS_HIGH, (desc)->qw3)
> > >
> > > static inline void
> > > idpf_xdp_get_qw0(struct idpf_xdp_rx_desc *desc, @@ -149,7 +153,10
> > > @@ idpf_xdp_get_qw1(struct idpf_xdp_rx_desc *desc,
> > > desc->qw1 = ((const typeof(desc))rxd)->qw1; #else
> > > desc->qw1 = ((u64)le16_to_cpu(rxd->buf_id) << 32) |
> > > - rxd->status_err0_qw1;
> > > + ((u64)rxd->ts_low << 24) |
> > > + ((u64)rxd->fflags1 << 16) |
> > > + ((u64)rxd->status_err1 << 8) |
> >
> > I'm not sure you need casts to u64 here. Pls rebuild without them
> and
> > check the objdiff / compiler warnings.
> > It's required for buf_id as we shift by 32.
> >
>
> The compiler does not warn if I drop the u64 casts, but are you sure
> you want them dropped? You're already doing u64 casts in all the
> entries that you bit-shift in qw0 and qw2. It makes the code clearer
> imo. But up to you.
>
> > > + rxd->status_err0_qw1;
IMHO FIELD_PREP() looks better, just in case:
desc->qw1 =
FIELD_PREP(RXD_QW1_BUF_ID, le16_to_cpu(rxd->buf_id)) |
FIELD_PREP(RXD_QW1_TS_LOW, rxd->ts_low) |
FIELD_PREP(RXD_QW1_FFLAGS1, rxd->fflags1) |
FIELD_PREP(RXD_QW1_STATUS_ERR1, rxd->status_err1) |
FIELD_PREP(RXD_QW1_STATUS_ERR0, rxd->status_err0);
> >
> > Why did you replace the proper indentation with two tabs in all 4
> > lines above?
> >
>
> Sure, will fix.
>
> --
> Thanks,
> Mina
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2025-12-23 7:03 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-12-19 20:29 [PATCH net-next v3] idpf: export RX hardware timestamping information to XDP Mina Almasry
2025-12-20 5:59 ` [Intel-wired-lan] " Paul Menzel
2025-12-22 11:48 ` Paolo Abeni
2025-12-22 12:54 ` Alexander Lobakin
2025-12-22 23:00 ` Mina Almasry
2025-12-23 7:03 ` Loktionov, Aleksandr
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox