From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
To: Praveen Kaligineedi <pkaligineedi@google.com>
Cc: <netdev@vger.kernel.org>, <davem@davemloft.net>,
<kuba@kernel.org>, "Jeroen de Borst" <jeroendb@google.com>
Subject: Re: [PATCH net-next 3/4] gve: Add XDP REDIRECT support for GQI-QPL format
Date: Tue, 14 Feb 2023 02:03:34 +0100 [thread overview]
Message-ID: <Y+rd5ljmJuNPDwv1@boxer> (raw)
In-Reply-To: <20230207210058.2257219-4-pkaligineedi@google.com>
On Tue, Feb 07, 2023 at 01:00:57PM -0800, Praveen Kaligineedi wrote:
> Add support for XDP REDIRECT action.
>
> This patch contains the following changes:
> 1) Support for XDP REDIRECT action on rx
> 2) ndo_xdp_xmit callback support
>
> Signed-off-by: Praveen Kaligineedi <pkaligineedi@google.com>
> Reviewed-by: Jeroen de Borst <jeroendb@google.com>
> ---
> drivers/net/ethernet/google/gve/gve.h | 13 ++++-
> drivers/net/ethernet/google/gve/gve_ethtool.c | 26 ++++++----
> drivers/net/ethernet/google/gve/gve_main.c | 17 +++++++
> drivers/net/ethernet/google/gve/gve_rx.c | 45 +++++++++++++++--
> drivers/net/ethernet/google/gve/gve_tx.c | 48 +++++++++++++++++--
> 5 files changed, 132 insertions(+), 17 deletions(-)
>
> diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
> index 8352f4c0e8d1..f89b1278db70 100644
> --- a/drivers/net/ethernet/google/gve/gve.h
> +++ b/drivers/net/ethernet/google/gve/gve.h
> @@ -236,6 +236,7 @@ struct gve_rx_ring {
> u64 rx_frag_alloc_cnt; /* free-running count of rx page allocations */
> u64 xdp_tx_errors;
> u64 xdp_redirect_errors;
> + u64 xdp_alloc_fails;
> u64 xdp_actions[GVE_XDP_ACTIONS];
> u32 q_num; /* queue index */
> u32 ntfy_id; /* notification block index */
> @@ -247,6 +248,7 @@ struct gve_rx_ring {
>
> /* XDP stuff */
> struct xdp_rxq_info xdp_rxq;
> + struct page_frag_cache page_cache;
few words about why you need that would be helpful
> };
>
> /* A TX desc ring entry */
> @@ -267,7 +269,10 @@ struct gve_tx_iovec {
> * ring entry but only used for a pkt_desc not a seg_desc
> */
> struct gve_tx_buffer_state {
> - struct sk_buff *skb; /* skb for this pkt */
> + union {
> + struct sk_buff *skb; /* skb for this pkt */
> + struct xdp_frame *xdp_frame; /* xdp_frame */
> + };
> struct {
> u16 size; /* size of xmitted xdp pkt */
> } xdp;
> @@ -464,6 +469,8 @@ struct gve_tx_ring {
> dma_addr_t q_resources_bus; /* dma address of the queue resources */
> dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
> struct u64_stats_sync statss; /* sync stats for 32bit archs */
> + u64 xdp_xmit;
> + u64 xdp_xmit_errors;
> } ____cacheline_aligned;
>
> /* Wraps the info for one irq including the napi struct and the queues
> @@ -889,8 +896,10 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
> enum dma_data_direction);
> /* tx handling */
> netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
> +int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
> + u32 flags);
> int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
> - void *data, int len, u32 flags);
> + void *data, int len, void *frame_p, u32 flags);
> bool gve_tx_poll(struct gve_notify_block *block, int budget);
> bool gve_xdp_poll(struct gve_notify_block *block, int budget);
> int gve_tx_alloc_rings(struct gve_priv *priv);
> diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
> index d2f0b53eacbb..57940f90c6be 100644
> --- a/drivers/net/ethernet/google/gve/gve_ethtool.c
> +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
> @@ -56,13 +56,14 @@ static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
> "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
> "rx_xdp_aborted[%u]", "rx_xdp_drop[%u]", "rx_xdp_pass[%u]",
> "rx_xdp_tx[%u]", "rx_xdp_redirect[%u]",
> - "rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]",
> + "rx_xdp_tx_errors[%u]", "rx_xdp_redirect_errors[%u]", "rx_xdp_alloc_fails[%u]",
> };
>
> static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
> "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_consumed_desc[%u]", "tx_bytes[%u]",
> "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
> "tx_dma_mapping_error[%u]",
> + "tx_xdp_xmit[%u]", "tx_xdp_xmit_errors[%u]"
> };
>
> static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
> @@ -312,9 +313,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
> data[i + j] = rx->xdp_actions[j];
> data[i + j++] = rx->xdp_tx_errors;
> data[i + j++] = rx->xdp_redirect_errors;
> + data[i + j++] = rx->xdp_alloc_fails;
> } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
> start));
> - i += GVE_XDP_ACTIONS + 2; /* XDP rx counters */
> + i += GVE_XDP_ACTIONS + 3; /* XDP rx counters */
> }
> } else {
> i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
> @@ -370,13 +372,21 @@ gve_get_ethtool_stats(struct net_device *netdev,
> if (skip_nic_stats) {
> /* skip NIC tx stats */
> i += NIC_TX_STATS_REPORT_NUM;
> - continue;
> - }
> - for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
> - u64 value =
> - be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value);
> - data[i++] = value;
> + } else {
> + stats_idx = tx_qid_to_stats_idx[ring];
> + for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
> + u64 value =
> + be64_to_cpu(report_stats[stats_idx + j].value);
> + data[i++] = value;
> + }
> }
> + do {
> + start = u64_stats_fetch_begin(&priv->tx[ring].statss);
> + data[i] = tx->xdp_xmit;
> + data[i + 1] = tx->xdp_xmit_errors;
> + } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
> + start));
> + i += 2; /* XDP tx counters */
> }
> } else {
> i += num_tx_queues * NUM_GVE_TX_CNTS;
> diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
> index 5050aa3aa1c3..4398e5887f3b 100644
> --- a/drivers/net/ethernet/google/gve/gve_main.c
> +++ b/drivers/net/ethernet/google/gve/gve_main.c
> @@ -1015,6 +1015,21 @@ static void gve_unreg_xdp_info(struct gve_priv *priv)
> }
> }
>
> +static void gve_drain_page_cache(struct gve_priv *priv)
> +{
> + struct page_frag_cache *nc;
> + int i;
> +
> + for (i = 0; i < priv->rx_cfg.num_queues; i++) {
> + nc = &priv->rx[i].page_cache;
> + if (nc->va) {
> + __page_frag_cache_drain(virt_to_page(nc->va),
> + nc->pagecnt_bias);
> + nc->va = NULL;
> + }
> + }
> +}
> +
> static int gve_open(struct net_device *dev)
> {
> struct gve_priv *priv = netdev_priv(dev);
> @@ -1098,6 +1113,7 @@ static int gve_close(struct net_device *dev)
> netif_carrier_off(dev);
> if (gve_get_device_rings_ok(priv)) {
> gve_turndown(priv);
> + gve_drain_page_cache(priv);
> err = gve_destroy_rings(priv);
> if (err)
> goto err;
> @@ -1409,6 +1425,7 @@ static const struct net_device_ops gve_netdev_ops = {
> .ndo_tx_timeout = gve_tx_timeout,
> .ndo_set_features = gve_set_features,
> .ndo_bpf = gve_xdp,
> + .ndo_xdp_xmit = gve_xdp_xmit,
> };
>
> static void gve_handle_status(struct gve_priv *priv, u32 status)
> diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
> index 3785bc150546..ea833388f895 100644
> --- a/drivers/net/ethernet/google/gve/gve_rx.c
> +++ b/drivers/net/ethernet/google/gve/gve_rx.c
> @@ -593,6 +593,35 @@ static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
> return skb;
> }
>
> +static int gve_xdp_redirect(struct net_device *dev, struct gve_rx_ring *rx,
> + struct xdp_buff *orig, struct bpf_prog *xdp_prog)
> +{
> + int total_len, len = orig->data_end - orig->data;
> + int headroom = XDP_PACKET_HEADROOM;
> + struct xdp_buff new;
> + void *frame;
> + int err;
> +
> + total_len = headroom + SKB_DATA_ALIGN(len) +
> + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
> + frame = page_frag_alloc(&rx->page_cache, total_len, GFP_ATOMIC);
> + if (!frame) {
> + u64_stats_update_begin(&rx->statss);
> + rx->xdp_alloc_fails++;
> + u64_stats_update_end(&rx->statss);
> + return -ENOMEM;
> + }
> + xdp_init_buff(&new, total_len, &rx->xdp_rxq);
> + xdp_prepare_buff(&new, frame, headroom, len, false);
> + memcpy(new.data, orig->data, len);
can you explain why?
> +
> + err = xdp_do_redirect(dev, &new, xdp_prog);
> + if (err)
> + page_frag_free(frame);
> +
> + return err;
> +}
> +
(...)
next prev parent reply other threads:[~2023-02-14 1:03 UTC|newest]
Thread overview: 9+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-07 21:00 [PATCH net-next 0/4] gve: Add XDP support for GQI-QPL format Praveen Kaligineedi
2023-02-07 21:00 ` [PATCH net-next 1/4] gve: XDP support GQI-QPL: helper function changes Praveen Kaligineedi
2023-02-07 21:00 ` [PATCH net-next 2/4] gve: Add XDP DROP and TX support for GQI-QPL format Praveen Kaligineedi
2023-02-09 5:54 ` Jakub Kicinski
2023-02-14 0:46 ` Maciej Fijalkowski
2023-02-07 21:00 ` [PATCH net-next 3/4] gve: Add XDP REDIRECT " Praveen Kaligineedi
2023-02-14 1:03 ` Maciej Fijalkowski [this message]
2023-02-07 21:00 ` [PATCH net-next 4/4] gve: Add AF_XDP zero-copy " Praveen Kaligineedi
2023-02-14 14:46 ` Maciej Fijalkowski
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=Y+rd5ljmJuNPDwv1@boxer \
--to=maciej.fijalkowski@intel.com \
--cc=davem@davemloft.net \
--cc=jeroendb@google.com \
--cc=kuba@kernel.org \
--cc=netdev@vger.kernel.org \
--cc=pkaligineedi@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).