netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: netdev@vger.kernel.org, "Michael S. Tsirkin" <mst@redhat.com>,
	"Eugenio Pérez" <eperezma@redhat.com>,
	"David S. Miller" <davem@davemloft.net>,
	"Eric Dumazet" <edumazet@google.com>,
	"Jakub Kicinski" <kuba@kernel.org>,
	"Paolo Abeni" <pabeni@redhat.com>,
	"Alexei Starovoitov" <ast@kernel.org>,
	"Daniel Borkmann" <daniel@iogearbox.net>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"John Fastabend" <john.fastabend@gmail.com>,
	virtualization@lists.linux.dev, bpf@vger.kernel.org
Subject: Re: [PATCH net-next v5 15/15] virtio_net: xsk: rx: support recv small mode
Date: Mon, 17 Jun 2024 15:10:48 +0800	[thread overview]
Message-ID: <CACGkMEtzWQLN9D9+5jZFcn4MNNfDPQ77TK3D5B78NXPyq5u-Gg@mail.gmail.com> (raw)
In-Reply-To: <20240614063933.108811-16-xuanzhuo@linux.alibaba.com>

On Fri, Jun 14, 2024 at 2:39 PM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> The virtnet_xdp_handler() is re-used. But
>
> 1. We need to copy data to create skb for XDP_PASS.
> 2. We need to call xsk_buff_free() to release the buffer.
> 3. The handle for xdp_buff is difference.
>
> If we pushed this logic into existing receive handle(merge and small),
> we would have to maintain code scattered inside merge and small (and big).
> So I think it is a good choice for us to put the xsk code into an
> independent function.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> ---
>  drivers/net/virtio_net.c | 142 +++++++++++++++++++++++++++++++++++++--
>  1 file changed, 138 insertions(+), 4 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 4e5645d8bb7d..72c4d2f0c0ea 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -534,8 +534,10 @@ struct virtio_net_common_hdr {
>
>  static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
>  static void virtnet_xsk_completed(struct send_queue *sq, int num);
> -static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
> -                                  struct xsk_buff_pool *pool, gfp_t gfp);
> +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
> +                              struct net_device *dev,
> +                              unsigned int *xdp_xmit,
> +                              struct virtnet_rq_stats *stats);
>
>  enum virtnet_xmit_type {
>         VIRTNET_XMIT_TYPE_SKB,
> @@ -1218,6 +1220,11 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
>
>         rq = &vi->rq[i];
>
> +       if (rq->xsk.pool) {
> +               xsk_buff_free((struct xdp_buff *)buf);
> +               return;
> +       }
> +
>         if (!vi->big_packets || vi->mergeable_rx_bufs)
>                 virtnet_rq_unmap(rq, buf, 0);
>
> @@ -1308,6 +1315,120 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
>         sg->length = len;
>  }
>
> +static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
> +                                  struct receive_queue *rq, void *buf, u32 len)
> +{
> +       struct xdp_buff *xdp;
> +       u32 bufsize;
> +
> +       xdp = (struct xdp_buff *)buf;
> +
> +       bufsize = xsk_pool_get_rx_frame_size(rq->xsk.pool) + vi->hdr_len;
> +
> +       if (unlikely(len > bufsize)) {
> +               pr_debug("%s: rx error: len %u exceeds truesize %u\n",
> +                        vi->dev->name, len, bufsize);
> +               DEV_STATS_INC(vi->dev, rx_length_errors);
> +               xsk_buff_free(xdp);
> +               return NULL;
> +       }
> +
> +       xsk_buff_set_size(xdp, len);
> +       xsk_buff_dma_sync_for_cpu(xdp);
> +
> +       return xdp;
> +}
> +
> +static struct sk_buff *xdp_construct_skb(struct receive_queue *rq,
> +                                        struct xdp_buff *xdp)
> +{
> +       unsigned int metasize = xdp->data - xdp->data_meta;
> +       struct sk_buff *skb;
> +       unsigned int size;
> +
> +       size = xdp->data_end - xdp->data_hard_start;
> +       skb = napi_alloc_skb(&rq->napi, size);
> +       if (unlikely(!skb)) {
> +               xsk_buff_free(xdp);
> +               return NULL;
> +       }
> +
> +       skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
> +
> +       size = xdp->data_end - xdp->data_meta;
> +       memcpy(__skb_put(skb, size), xdp->data_meta, size);
> +
> +       if (metasize) {
> +               __skb_pull(skb, metasize);
> +               skb_metadata_set(skb, metasize);
> +       }
> +
> +       xsk_buff_free(xdp);
> +
> +       return skb;
> +}
> +
> +static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
> +                                                struct receive_queue *rq, struct xdp_buff *xdp,
> +                                                unsigned int *xdp_xmit,
> +                                                struct virtnet_rq_stats *stats)
> +{
> +       struct bpf_prog *prog;
> +       u32 ret;
> +
> +       ret = XDP_PASS;
> +       rcu_read_lock();
> +       prog = rcu_dereference(rq->xdp_prog);
> +       if (prog)
> +               ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
> +       rcu_read_unlock();
> +
> +       switch (ret) {
> +       case XDP_PASS:
> +               return xdp_construct_skb(rq, xdp);
> +
> +       case XDP_TX:
> +       case XDP_REDIRECT:
> +               return NULL;
> +
> +       default:
> +               /* drop packet */
> +               xsk_buff_free(xdp);
> +               u64_stats_inc(&stats->drops);
> +               return NULL;
> +       }
> +}

Let's use a separate patch for this to decouple new functions with refactoring.

Or even use a separate series for rx zerocopy.

> +
> +static struct sk_buff *virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
> +                                              void *buf, u32 len,
> +                                              unsigned int *xdp_xmit,
> +                                              struct virtnet_rq_stats *stats)
> +{
> +       struct net_device *dev = vi->dev;
> +       struct sk_buff *skb = NULL;
> +       struct xdp_buff *xdp;
> +
> +       len -= vi->hdr_len;
> +
> +       u64_stats_add(&stats->bytes, len);
> +
> +       xdp = buf_to_xdp(vi, rq, buf, len);
> +       if (!xdp)
> +               return NULL;

Don't we need to check if XDP is enabled before those operations?

> +
> +       if (unlikely(len < ETH_HLEN)) {
> +               pr_debug("%s: short packet %i\n", dev->name, len);
> +               DEV_STATS_INC(dev, rx_length_errors);
> +               xsk_buff_free(xdp);
> +               return NULL;
> +       }
> +
> +       if (!vi->mergeable_rx_bufs)
> +               skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
> +
> +       return skb;
> +}
> +
>  static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
>                                    struct xsk_buff_pool *pool, gfp_t gfp)
>  {
> @@ -2713,9 +2834,22 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
>         void *buf;
>         int i;
>
> -       if (!vi->big_packets || vi->mergeable_rx_bufs) {
> -               void *ctx;
> +       if (rq->xsk.pool) {
> +               struct sk_buff *skb;
> +
> +               while (packets < budget) {
> +                       buf = virtqueue_get_buf(rq->vq, &len);
> +                       if (!buf)
> +                               break;
>
> +                       skb = virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, &stats);

The function name is confusing for example, xsk might not be even enabled.

> +                       if (skb)
> +                               virtnet_receive_done(vi, rq, skb);
> +
> +                       packets++;
> +               }
> +       } else if (!vi->big_packets || vi->mergeable_rx_bufs) {
> +               void *ctx;
>                 while (packets < budget &&
>                        (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
>                         receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
> --
> 2.32.0.3.g01195cf9f
>

Thanks


  reply	other threads:[~2024-06-17  7:11 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-06-14  6:39 [PATCH net-next v5 00/15] virtio-net: support AF_XDP zero copy Xuan Zhuo
2024-06-14  6:39 ` [PATCH net-next v5 01/15] virtio_ring: introduce dma map api for page Xuan Zhuo
2024-06-14  6:39 ` [PATCH net-next v5 02/15] virtio_ring: introduce vring_need_unmap_buffer Xuan Zhuo
2024-06-14  6:39 ` [PATCH net-next v5 03/15] virtio_ring: virtqueue_set_dma_premapped() support to disable Xuan Zhuo
2024-06-14  6:39 ` [PATCH net-next v5 04/15] virtio_net: separate virtnet_rx_resize() Xuan Zhuo
2024-06-14  6:39 ` [PATCH net-next v5 05/15] virtio_net: separate virtnet_tx_resize() Xuan Zhuo
2024-06-14  6:39 ` [PATCH net-next v5 06/15] virtio_net: separate receive_buf Xuan Zhuo
2024-06-14  6:39 ` [PATCH net-next v5 07/15] virtio_net: refactor the xmit type Xuan Zhuo
2024-06-17  5:00   ` Jason Wang
2024-06-17  7:21     ` Xuan Zhuo
2024-06-14  6:39 ` [PATCH net-next v5 08/15] virtio_net: sq support premapped mode Xuan Zhuo
2024-06-17  5:00   ` Jason Wang
2024-06-17  6:28     ` Jason Wang
2024-06-17  7:40       ` Xuan Zhuo
2024-06-18  1:00         ` Jason Wang
2024-06-18  1:31           ` Xuan Zhuo
2024-06-17  7:23     ` Xuan Zhuo
2024-06-18  0:57       ` Jason Wang
2024-06-18  0:59         ` Jason Wang
2024-06-18  1:34         ` Xuan Zhuo
2024-06-14  6:39 ` [PATCH net-next v5 09/15] virtio_net: xsk: bind/unbind xsk Xuan Zhuo
2024-06-17  6:19   ` Jason Wang
2024-06-17  7:43     ` Xuan Zhuo
2024-06-18  1:04       ` Jason Wang
2024-06-18  1:36         ` Xuan Zhuo
2024-06-14  6:39 ` [PATCH net-next v5 10/15] virtio_net: xsk: prevent disable tx napi Xuan Zhuo
2024-06-14  6:39 ` [PATCH net-next v5 11/15] virtio_net: xsk: tx: support xmit xsk buffer Xuan Zhuo
2024-06-17  6:30   ` Jason Wang
2024-06-17  7:51     ` Xuan Zhuo
2024-06-18  1:06       ` Jason Wang
2024-06-18  1:40         ` Xuan Zhuo
2024-06-14  6:39 ` [PATCH net-next v5 12/15] virtio_net: xsk: tx: support wakeup Xuan Zhuo
2024-06-17  6:31   ` Jason Wang
2024-06-14  6:39 ` [PATCH net-next v5 13/15] virtio_net: xsk: tx: handle the transmitted xsk buffer Xuan Zhuo
2024-06-14  6:39 ` [PATCH net-next v5 14/15] virtio_net: xsk: rx: support fill with " Xuan Zhuo
2024-06-14  6:39 ` [PATCH net-next v5 15/15] virtio_net: xsk: rx: support recv small mode Xuan Zhuo
2024-06-17  7:10   ` Jason Wang [this message]
2024-06-17  7:55     ` Xuan Zhuo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CACGkMEtzWQLN9D9+5jZFcn4MNNfDPQ77TK3D5B78NXPyq5u-Gg@mail.gmail.com \
    --to=jasowang@redhat.com \
    --cc=ast@kernel.org \
    --cc=bpf@vger.kernel.org \
    --cc=daniel@iogearbox.net \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=eperezma@redhat.com \
    --cc=hawk@kernel.org \
    --cc=john.fastabend@gmail.com \
    --cc=kuba@kernel.org \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=virtualization@lists.linux.dev \
    --cc=xuanzhuo@linux.alibaba.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).