virtualization.lists.linux-foundation.org archive mirror
 help / color / mirror / Atom feed
* [PATCH net, stable v1 0/3] add checking sq is full inside xdp xmit
@ 2023-03-08  2:49 Xuan Zhuo
  2023-03-08  2:49 ` [PATCH net, stable v1 1/3] virtio_net: reorder some funcs Xuan Zhuo
                   ` (2 more replies)
  0 siblings, 3 replies; 10+ messages in thread
From: Xuan Zhuo @ 2023-03-08  2:49 UTC (permalink / raw)
  To: netdev
  Cc: Yichun Zhang, Jesper Dangaard Brouer, Daniel Borkmann,
	Michael S. Tsirkin, John Fastabend, Alexei Starovoitov,
	virtualization, Eric Dumazet, Alexander Duyck, Jakub Kicinski,
	bpf, Paolo Abeni, David S. Miller

If the queue of xdp xmit is not an independent queue, then when the xdp
xmit used all the desc, the xmit from the __dev_queue_xmit() may encounter
the following error.

net ens4: Unexpected TXQ (0) queue failure: -28

This patch adds a check whether sq is full in XDP Xmit.

Thanks.

v1:
    1. rename to check_sq_full_and_disable
    2. reorder some funcs to avoid declaration

Xuan Zhuo (3):
  virtio_net: reorder some funcs
  virtio_net: separate the logic of checking whether sq is full
  virtio_net: add checking sq is full inside xdp xmit

 drivers/net/virtio_net.c | 155 +++++++++++++++++++++------------------
 1 file changed, 85 insertions(+), 70 deletions(-)

--
2.32.0.3.g01195cf9f

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH net, stable v1 1/3] virtio_net: reorder some funcs
  2023-03-08  2:49 [PATCH net, stable v1 0/3] add checking sq is full inside xdp xmit Xuan Zhuo
@ 2023-03-08  2:49 ` Xuan Zhuo
  2023-03-08  5:20   ` Jason Wang
  2023-03-08  6:53   ` Michael S. Tsirkin
  2023-03-08  2:49 ` [PATCH net, stable v1 2/3] virtio_net: separate the logic of checking whether sq is full Xuan Zhuo
  2023-03-08  2:49 ` [PATCH net, stable v1 3/3] virtio_net: add checking sq is full inside xdp xmit Xuan Zhuo
  2 siblings, 2 replies; 10+ messages in thread
From: Xuan Zhuo @ 2023-03-08  2:49 UTC (permalink / raw)
  To: netdev
  Cc: Yichun Zhang, Jesper Dangaard Brouer, Daniel Borkmann,
	Michael S. Tsirkin, John Fastabend, Alexei Starovoitov,
	virtualization, Eric Dumazet, Alexander Duyck, Jakub Kicinski,
	bpf, Paolo Abeni, David S. Miller

The purpose of this is to facilitate the subsequent addition of new
functions without introducing a separate declaration.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
---
 drivers/net/virtio_net.c | 92 ++++++++++++++++++++--------------------
 1 file changed, 46 insertions(+), 46 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index fb5e68ed3ec2..8b31a04052f2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -545,6 +545,52 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
 	return skb;
 }
 
+static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+{
+	unsigned int len;
+	unsigned int packets = 0;
+	unsigned int bytes = 0;
+	void *ptr;
+
+	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+		if (likely(!is_xdp_frame(ptr))) {
+			struct sk_buff *skb = ptr;
+
+			pr_debug("Sent skb %p\n", skb);
+
+			bytes += skb->len;
+			napi_consume_skb(skb, in_napi);
+		} else {
+			struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+			bytes += xdp_get_frame_len(frame);
+			xdp_return_frame(frame);
+		}
+		packets++;
+	}
+
+	/* Avoid overhead when no packets have been processed
+	 * happens when called speculatively from start_xmit.
+	 */
+	if (!packets)
+		return;
+
+	u64_stats_update_begin(&sq->stats.syncp);
+	sq->stats.bytes += bytes;
+	sq->stats.packets += packets;
+	u64_stats_update_end(&sq->stats.syncp);
+}
+
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
+{
+	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
+		return false;
+	else if (q < vi->curr_queue_pairs)
+		return true;
+	else
+		return false;
+}
+
 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
 				   struct send_queue *sq,
 				   struct xdp_frame *xdpf)
@@ -1714,52 +1760,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
 	return stats.packets;
 }
 
-static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
-{
-	unsigned int len;
-	unsigned int packets = 0;
-	unsigned int bytes = 0;
-	void *ptr;
-
-	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
-		if (likely(!is_xdp_frame(ptr))) {
-			struct sk_buff *skb = ptr;
-
-			pr_debug("Sent skb %p\n", skb);
-
-			bytes += skb->len;
-			napi_consume_skb(skb, in_napi);
-		} else {
-			struct xdp_frame *frame = ptr_to_xdp(ptr);
-
-			bytes += xdp_get_frame_len(frame);
-			xdp_return_frame(frame);
-		}
-		packets++;
-	}
-
-	/* Avoid overhead when no packets have been processed
-	 * happens when called speculatively from start_xmit.
-	 */
-	if (!packets)
-		return;
-
-	u64_stats_update_begin(&sq->stats.syncp);
-	sq->stats.bytes += bytes;
-	sq->stats.packets += packets;
-	u64_stats_update_end(&sq->stats.syncp);
-}
-
-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
-{
-	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
-		return false;
-	else if (q < vi->curr_queue_pairs)
-		return true;
-	else
-		return false;
-}
-
 static void virtnet_poll_cleantx(struct receive_queue *rq)
 {
 	struct virtnet_info *vi = rq->vq->vdev->priv;
-- 
2.32.0.3.g01195cf9f

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH net, stable v1 2/3] virtio_net: separate the logic of checking whether sq is full
  2023-03-08  2:49 [PATCH net, stable v1 0/3] add checking sq is full inside xdp xmit Xuan Zhuo
  2023-03-08  2:49 ` [PATCH net, stable v1 1/3] virtio_net: reorder some funcs Xuan Zhuo
@ 2023-03-08  2:49 ` Xuan Zhuo
  2023-03-08  5:20   ` Jason Wang
  2023-03-08  2:49 ` [PATCH net, stable v1 3/3] virtio_net: add checking sq is full inside xdp xmit Xuan Zhuo
  2 siblings, 1 reply; 10+ messages in thread
From: Xuan Zhuo @ 2023-03-08  2:49 UTC (permalink / raw)
  To: netdev
  Cc: Yichun Zhang, Jesper Dangaard Brouer, Daniel Borkmann,
	Michael S. Tsirkin, John Fastabend, Alexei Starovoitov,
	virtualization, Eric Dumazet, Alexander Duyck, Jakub Kicinski,
	bpf, Paolo Abeni, David S. Miller

Separate the logic of checking whether sq is full. The subsequent patch
will reuse this func.

Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
---
 drivers/net/virtio_net.c | 60 ++++++++++++++++++++++++----------------
 1 file changed, 36 insertions(+), 24 deletions(-)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8b31a04052f2..46bbddaadb0d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -591,6 +591,41 @@ static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
 		return false;
 }
 
+static void check_sq_full_and_disable(struct virtnet_info *vi,
+				      struct net_device *dev,
+				      struct send_queue *sq)
+{
+	bool use_napi = sq->napi.weight;
+	int qnum;
+
+	qnum = sq - vi->sq;
+
+	/* If running out of space, stop queue to avoid getting packets that we
+	 * are then unable to transmit.
+	 * An alternative would be to force queuing layer to requeue the skb by
+	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
+	 * returned in a normal path of operation: it means that driver is not
+	 * maintaining the TX queue stop/start state properly, and causes
+	 * the stack to do a non-trivial amount of useless work.
+	 * Since most packets only take 1 or 2 ring slots, stopping the queue
+	 * early means 16 slots are typically wasted.
+	 */
+	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
+		netif_stop_subqueue(dev, qnum);
+		if (use_napi) {
+			if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
+				virtqueue_napi_schedule(&sq->napi, sq->vq);
+		} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+			/* More just got used, free them then recheck. */
+			free_old_xmit_skbs(sq, false);
+			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
+				netif_start_subqueue(dev, qnum);
+				virtqueue_disable_cb(sq->vq);
+			}
+		}
+	}
+}
+
 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
 				   struct send_queue *sq,
 				   struct xdp_frame *xdpf)
@@ -1989,30 +2024,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 		nf_reset_ct(skb);
 	}
 
-	/* If running out of space, stop queue to avoid getting packets that we
-	 * are then unable to transmit.
-	 * An alternative would be to force queuing layer to requeue the skb by
-	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
-	 * returned in a normal path of operation: it means that driver is not
-	 * maintaining the TX queue stop/start state properly, and causes
-	 * the stack to do a non-trivial amount of useless work.
-	 * Since most packets only take 1 or 2 ring slots, stopping the queue
-	 * early means 16 slots are typically wasted.
-	 */
-	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
-		netif_stop_subqueue(dev, qnum);
-		if (use_napi) {
-			if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
-				virtqueue_napi_schedule(&sq->napi, sq->vq);
-		} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
-			/* More just got used, free them then recheck. */
-			free_old_xmit_skbs(sq, false);
-			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
-				netif_start_subqueue(dev, qnum);
-				virtqueue_disable_cb(sq->vq);
-			}
-		}
-	}
+	check_sq_full_and_disable(vi, dev, sq);
 
 	if (kick || netif_xmit_stopped(txq)) {
 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
-- 
2.32.0.3.g01195cf9f

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH net, stable v1 3/3] virtio_net: add checking sq is full inside xdp xmit
  2023-03-08  2:49 [PATCH net, stable v1 0/3] add checking sq is full inside xdp xmit Xuan Zhuo
  2023-03-08  2:49 ` [PATCH net, stable v1 1/3] virtio_net: reorder some funcs Xuan Zhuo
  2023-03-08  2:49 ` [PATCH net, stable v1 2/3] virtio_net: separate the logic of checking whether sq is full Xuan Zhuo
@ 2023-03-08  2:49 ` Xuan Zhuo
  2023-03-08  5:20   ` Jason Wang
       [not found]   ` <7eea924e-5cc3-8584-af95-04587f303f8f@huawei.com>
  2 siblings, 2 replies; 10+ messages in thread
From: Xuan Zhuo @ 2023-03-08  2:49 UTC (permalink / raw)
  To: netdev
  Cc: Yichun Zhang, Jesper Dangaard Brouer, Daniel Borkmann,
	Michael S. Tsirkin, John Fastabend, Alexei Starovoitov,
	virtualization, Eric Dumazet, Alexander Duyck, Jakub Kicinski,
	bpf, Paolo Abeni, David S. Miller

If the queue of xdp xmit is not an independent queue, then when the xdp
xmit used all the desc, the xmit from the __dev_queue_xmit() may encounter
the following error.

net ens4: Unexpected TXQ (0) queue failure: -28

This patch adds a check whether sq is full in xdp xmit.

Fixes: 56434a01b12e ("virtio_net: add XDP_TX support")
Reported-by: Yichun Zhang <yichun@openresty.com>
Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
---
 drivers/net/virtio_net.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 46bbddaadb0d..1a309cfb4976 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -767,6 +767,9 @@ static int virtnet_xdp_xmit(struct net_device *dev,
 	}
 	ret = nxmit;
 
+	if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
+		check_sq_full_and_disable(vi, dev, sq);
+
 	if (flags & XDP_XMIT_FLUSH) {
 		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
 			kicks = 1;
-- 
2.32.0.3.g01195cf9f

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH net, stable v1 1/3] virtio_net: reorder some funcs
  2023-03-08  2:49 ` [PATCH net, stable v1 1/3] virtio_net: reorder some funcs Xuan Zhuo
@ 2023-03-08  5:20   ` Jason Wang
  2023-03-08  6:53   ` Michael S. Tsirkin
  1 sibling, 0 replies; 10+ messages in thread
From: Jason Wang @ 2023-03-08  5:20 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: Yichun Zhang, Jesper Dangaard Brouer, Daniel Borkmann,
	Michael S. Tsirkin, netdev, Alexander Duyck, John Fastabend,
	Alexei Starovoitov, virtualization, Eric Dumazet, Jakub Kicinski,
	bpf, Paolo Abeni, David S. Miller

On Wed, Mar 8, 2023 at 10:49 AM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> The purpose of this is to facilitate the subsequent addition of new
> functions without introducing a separate declaration.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

Not sure such reordering is suitable for -stable.

Thanks

> ---
>  drivers/net/virtio_net.c | 92 ++++++++++++++++++++--------------------
>  1 file changed, 46 insertions(+), 46 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index fb5e68ed3ec2..8b31a04052f2 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -545,6 +545,52 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
>         return skb;
>  }
>
> +static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
> +{
> +       unsigned int len;
> +       unsigned int packets = 0;
> +       unsigned int bytes = 0;
> +       void *ptr;
> +
> +       while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> +               if (likely(!is_xdp_frame(ptr))) {
> +                       struct sk_buff *skb = ptr;
> +
> +                       pr_debug("Sent skb %p\n", skb);
> +
> +                       bytes += skb->len;
> +                       napi_consume_skb(skb, in_napi);
> +               } else {
> +                       struct xdp_frame *frame = ptr_to_xdp(ptr);
> +
> +                       bytes += xdp_get_frame_len(frame);
> +                       xdp_return_frame(frame);
> +               }
> +               packets++;
> +       }
> +
> +       /* Avoid overhead when no packets have been processed
> +        * happens when called speculatively from start_xmit.
> +        */
> +       if (!packets)
> +               return;
> +
> +       u64_stats_update_begin(&sq->stats.syncp);
> +       sq->stats.bytes += bytes;
> +       sq->stats.packets += packets;
> +       u64_stats_update_end(&sq->stats.syncp);
> +}
> +
> +static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
> +{
> +       if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
> +               return false;
> +       else if (q < vi->curr_queue_pairs)
> +               return true;
> +       else
> +               return false;
> +}
> +
>  static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
>                                    struct send_queue *sq,
>                                    struct xdp_frame *xdpf)
> @@ -1714,52 +1760,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
>         return stats.packets;
>  }
>
> -static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
> -{
> -       unsigned int len;
> -       unsigned int packets = 0;
> -       unsigned int bytes = 0;
> -       void *ptr;
> -
> -       while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> -               if (likely(!is_xdp_frame(ptr))) {
> -                       struct sk_buff *skb = ptr;
> -
> -                       pr_debug("Sent skb %p\n", skb);
> -
> -                       bytes += skb->len;
> -                       napi_consume_skb(skb, in_napi);
> -               } else {
> -                       struct xdp_frame *frame = ptr_to_xdp(ptr);
> -
> -                       bytes += xdp_get_frame_len(frame);
> -                       xdp_return_frame(frame);
> -               }
> -               packets++;
> -       }
> -
> -       /* Avoid overhead when no packets have been processed
> -        * happens when called speculatively from start_xmit.
> -        */
> -       if (!packets)
> -               return;
> -
> -       u64_stats_update_begin(&sq->stats.syncp);
> -       sq->stats.bytes += bytes;
> -       sq->stats.packets += packets;
> -       u64_stats_update_end(&sq->stats.syncp);
> -}
> -
> -static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
> -{
> -       if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
> -               return false;
> -       else if (q < vi->curr_queue_pairs)
> -               return true;
> -       else
> -               return false;
> -}
> -
>  static void virtnet_poll_cleantx(struct receive_queue *rq)
>  {
>         struct virtnet_info *vi = rq->vq->vdev->priv;
> --
> 2.32.0.3.g01195cf9f
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH net, stable v1 2/3] virtio_net: separate the logic of checking whether sq is full
  2023-03-08  2:49 ` [PATCH net, stable v1 2/3] virtio_net: separate the logic of checking whether sq is full Xuan Zhuo
@ 2023-03-08  5:20   ` Jason Wang
  0 siblings, 0 replies; 10+ messages in thread
From: Jason Wang @ 2023-03-08  5:20 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: Yichun Zhang, Jesper Dangaard Brouer, Daniel Borkmann,
	Michael S. Tsirkin, netdev, Alexander Duyck, John Fastabend,
	Alexei Starovoitov, virtualization, Eric Dumazet, Jakub Kicinski,
	bpf, Paolo Abeni, David S. Miller

On Wed, Mar 8, 2023 at 10:49 AM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> Separate the logic of checking whether sq is full. The subsequent patch
> will reuse this func.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
> Acked-by: Michael S. Tsirkin <mst@redhat.com>

Acked-by: Jason Wang <jasowang@redhat.com>

Thanks

> ---
>  drivers/net/virtio_net.c | 60 ++++++++++++++++++++++++----------------
>  1 file changed, 36 insertions(+), 24 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 8b31a04052f2..46bbddaadb0d 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -591,6 +591,41 @@ static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
>                 return false;
>  }
>
> +static void check_sq_full_and_disable(struct virtnet_info *vi,
> +                                     struct net_device *dev,
> +                                     struct send_queue *sq)
> +{
> +       bool use_napi = sq->napi.weight;
> +       int qnum;
> +
> +       qnum = sq - vi->sq;
> +
> +       /* If running out of space, stop queue to avoid getting packets that we
> +        * are then unable to transmit.
> +        * An alternative would be to force queuing layer to requeue the skb by
> +        * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
> +        * returned in a normal path of operation: it means that driver is not
> +        * maintaining the TX queue stop/start state properly, and causes
> +        * the stack to do a non-trivial amount of useless work.
> +        * Since most packets only take 1 or 2 ring slots, stopping the queue
> +        * early means 16 slots are typically wasted.
> +        */
> +       if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
> +               netif_stop_subqueue(dev, qnum);
> +               if (use_napi) {
> +                       if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
> +                               virtqueue_napi_schedule(&sq->napi, sq->vq);
> +               } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
> +                       /* More just got used, free them then recheck. */
> +                       free_old_xmit_skbs(sq, false);
> +                       if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
> +                               netif_start_subqueue(dev, qnum);
> +                               virtqueue_disable_cb(sq->vq);
> +                       }
> +               }
> +       }
> +}
> +
>  static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
>                                    struct send_queue *sq,
>                                    struct xdp_frame *xdpf)
> @@ -1989,30 +2024,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
>                 nf_reset_ct(skb);
>         }
>
> -       /* If running out of space, stop queue to avoid getting packets that we
> -        * are then unable to transmit.
> -        * An alternative would be to force queuing layer to requeue the skb by
> -        * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
> -        * returned in a normal path of operation: it means that driver is not
> -        * maintaining the TX queue stop/start state properly, and causes
> -        * the stack to do a non-trivial amount of useless work.
> -        * Since most packets only take 1 or 2 ring slots, stopping the queue
> -        * early means 16 slots are typically wasted.
> -        */
> -       if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
> -               netif_stop_subqueue(dev, qnum);
> -               if (use_napi) {
> -                       if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
> -                               virtqueue_napi_schedule(&sq->napi, sq->vq);
> -               } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
> -                       /* More just got used, free them then recheck. */
> -                       free_old_xmit_skbs(sq, false);
> -                       if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
> -                               netif_start_subqueue(dev, qnum);
> -                               virtqueue_disable_cb(sq->vq);
> -                       }
> -               }
> -       }
> +       check_sq_full_and_disable(vi, dev, sq);
>
>         if (kick || netif_xmit_stopped(txq)) {
>                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
> --
> 2.32.0.3.g01195cf9f
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH net, stable v1 3/3] virtio_net: add checking sq is full inside xdp xmit
  2023-03-08  2:49 ` [PATCH net, stable v1 3/3] virtio_net: add checking sq is full inside xdp xmit Xuan Zhuo
@ 2023-03-08  5:20   ` Jason Wang
       [not found]   ` <7eea924e-5cc3-8584-af95-04587f303f8f@huawei.com>
  1 sibling, 0 replies; 10+ messages in thread
From: Jason Wang @ 2023-03-08  5:20 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: Yichun Zhang, Jesper Dangaard Brouer, Daniel Borkmann,
	Michael S. Tsirkin, netdev, Alexander Duyck, John Fastabend,
	Alexei Starovoitov, virtualization, Eric Dumazet, Jakub Kicinski,
	bpf, Paolo Abeni, David S. Miller

On Wed, Mar 8, 2023 at 10:49 AM Xuan Zhuo <xuanzhuo@linux.alibaba.com> wrote:
>
> If the queue of xdp xmit is not an independent queue, then when the xdp
> xmit used all the desc, the xmit from the __dev_queue_xmit() may encounter
> the following error.
>
> net ens4: Unexpected TXQ (0) queue failure: -28
>
> This patch adds a check whether sq is full in xdp xmit.
>
> Fixes: 56434a01b12e ("virtio_net: add XDP_TX support")
> Reported-by: Yichun Zhang <yichun@openresty.com>
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
> Acked-by: Michael S. Tsirkin <mst@redhat.com>

Acked-by: Jason Wang <jasowang@redhat.com>

Thanks

> ---
>  drivers/net/virtio_net.c | 3 +++
>  1 file changed, 3 insertions(+)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index 46bbddaadb0d..1a309cfb4976 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -767,6 +767,9 @@ static int virtnet_xdp_xmit(struct net_device *dev,
>         }
>         ret = nxmit;
>
> +       if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
> +               check_sq_full_and_disable(vi, dev, sq);
> +
>         if (flags & XDP_XMIT_FLUSH) {
>                 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
>                         kicks = 1;
> --
> 2.32.0.3.g01195cf9f
>

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH net, stable v1 1/3] virtio_net: reorder some funcs
  2023-03-08  2:49 ` [PATCH net, stable v1 1/3] virtio_net: reorder some funcs Xuan Zhuo
  2023-03-08  5:20   ` Jason Wang
@ 2023-03-08  6:53   ` Michael S. Tsirkin
  1 sibling, 0 replies; 10+ messages in thread
From: Michael S. Tsirkin @ 2023-03-08  6:53 UTC (permalink / raw)
  To: Xuan Zhuo
  Cc: Yichun Zhang, Jesper Dangaard Brouer, Daniel Borkmann, netdev,
	John Fastabend, Alexei Starovoitov, virtualization, Eric Dumazet,
	Alexander Duyck, Jakub Kicinski, bpf, Paolo Abeni,
	David S. Miller

On Wed, Mar 08, 2023 at 10:49:33AM +0800, Xuan Zhuo wrote:
> The purpose of this is to facilitate the subsequent addition of new
> functions without introducing a separate declaration.
> 
> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>

Acked-by: Michael S. Tsirkin <mst@redhat.com>

this one isn't for stable naturally, stable can use forward declarations
instead.

> ---
>  drivers/net/virtio_net.c | 92 ++++++++++++++++++++--------------------
>  1 file changed, 46 insertions(+), 46 deletions(-)
> 
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index fb5e68ed3ec2..8b31a04052f2 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -545,6 +545,52 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
>  	return skb;
>  }
>  
> +static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
> +{
> +	unsigned int len;
> +	unsigned int packets = 0;
> +	unsigned int bytes = 0;
> +	void *ptr;
> +
> +	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> +		if (likely(!is_xdp_frame(ptr))) {
> +			struct sk_buff *skb = ptr;
> +
> +			pr_debug("Sent skb %p\n", skb);
> +
> +			bytes += skb->len;
> +			napi_consume_skb(skb, in_napi);
> +		} else {
> +			struct xdp_frame *frame = ptr_to_xdp(ptr);
> +
> +			bytes += xdp_get_frame_len(frame);
> +			xdp_return_frame(frame);
> +		}
> +		packets++;
> +	}
> +
> +	/* Avoid overhead when no packets have been processed
> +	 * happens when called speculatively from start_xmit.
> +	 */
> +	if (!packets)
> +		return;
> +
> +	u64_stats_update_begin(&sq->stats.syncp);
> +	sq->stats.bytes += bytes;
> +	sq->stats.packets += packets;
> +	u64_stats_update_end(&sq->stats.syncp);
> +}
> +
> +static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
> +{
> +	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
> +		return false;
> +	else if (q < vi->curr_queue_pairs)
> +		return true;
> +	else
> +		return false;
> +}
> +
>  static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
>  				   struct send_queue *sq,
>  				   struct xdp_frame *xdpf)
> @@ -1714,52 +1760,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
>  	return stats.packets;
>  }
>  
> -static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
> -{
> -	unsigned int len;
> -	unsigned int packets = 0;
> -	unsigned int bytes = 0;
> -	void *ptr;
> -
> -	while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
> -		if (likely(!is_xdp_frame(ptr))) {
> -			struct sk_buff *skb = ptr;
> -
> -			pr_debug("Sent skb %p\n", skb);
> -
> -			bytes += skb->len;
> -			napi_consume_skb(skb, in_napi);
> -		} else {
> -			struct xdp_frame *frame = ptr_to_xdp(ptr);
> -
> -			bytes += xdp_get_frame_len(frame);
> -			xdp_return_frame(frame);
> -		}
> -		packets++;
> -	}
> -
> -	/* Avoid overhead when no packets have been processed
> -	 * happens when called speculatively from start_xmit.
> -	 */
> -	if (!packets)
> -		return;
> -
> -	u64_stats_update_begin(&sq->stats.syncp);
> -	sq->stats.bytes += bytes;
> -	sq->stats.packets += packets;
> -	u64_stats_update_end(&sq->stats.syncp);
> -}
> -
> -static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
> -{
> -	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
> -		return false;
> -	else if (q < vi->curr_queue_pairs)
> -		return true;
> -	else
> -		return false;
> -}
> -
>  static void virtnet_poll_cleantx(struct receive_queue *rq)
>  {
>  	struct virtnet_info *vi = rq->vq->vdev->priv;
> -- 
> 2.32.0.3.g01195cf9f

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH net, stable v1 3/3] virtio_net: add checking sq is full inside xdp xmit
       [not found]   ` <7eea924e-5cc3-8584-af95-04587f303f8f@huawei.com>
@ 2023-03-08  7:14     ` Xuan Zhuo
       [not found]       ` <5a4564dc-af93-4305-49a4-5ca16d737bc3@huawei.com>
  0 siblings, 1 reply; 10+ messages in thread
From: Xuan Zhuo @ 2023-03-08  7:14 UTC (permalink / raw)
  To: Yunsheng Lin
  Cc: Yichun Zhang, Jesper Dangaard Brouer, Daniel Borkmann,
	Michael S. Tsirkin, netdev, John Fastabend, Alexei Starovoitov,
	virtualization, Eric Dumazet, Alexander Duyck, Jakub Kicinski,
	bpf, Paolo Abeni, David S. Miller

On Wed, 8 Mar 2023 14:59:36 +0800, Yunsheng Lin <linyunsheng@huawei.com> wrote:
> On 2023/3/8 10:49, Xuan Zhuo wrote:
> > If the queue of xdp xmit is not an independent queue, then when the xdp
> > xmit used all the desc, the xmit from the __dev_queue_xmit() may encounter
> > the following error.
> >
> > net ens4: Unexpected TXQ (0) queue failure: -28
> >
> > This patch adds a check whether sq is full in xdp xmit.
> >
> > Fixes: 56434a01b12e ("virtio_net: add XDP_TX support")
> > Reported-by: Yichun Zhang <yichun@openresty.com>
> > Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
> > Acked-by: Michael S. Tsirkin <mst@redhat.com>
> > ---
> >  drivers/net/virtio_net.c | 3 +++
> >  1 file changed, 3 insertions(+)
> >
> > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > index 46bbddaadb0d..1a309cfb4976 100644
> > --- a/drivers/net/virtio_net.c
> > +++ b/drivers/net/virtio_net.c
> > @@ -767,6 +767,9 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> >  	}
> >  	ret = nxmit;
> >
> > +	if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
> > +		check_sq_full_and_disable(vi, dev, sq);
> > +
>
> Sorry if I missed something obvious here.
>
> As the comment in start_xmit(), the current skb is added to the sq->vq, so
> NETDEV_TX_BUSY can not be returned.
>
> 	/* If running out of space, stop queue to avoid getting packets that we
> 	 * are then unable to transmit.
> 	 * An alternative would be to force queuing layer to requeue the skb by
> 	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
> 	 * returned in a normal path of operation: it means that driver is not
> 	 * maintaining the TX queue stop/start state properly, and causes
> 	 * the stack to do a non-trivial amount of useless work.
> 	 * Since most packets only take 1 or 2 ring slots, stopping the queue
> 	 * early means 16 slots are typically wasted.
> 	 */
>
> It there any reason not to check the sq->vq->num_free at the begin of start_xmit(),
> if the space is not enough for the current skb, TX queue is stopped and NETDEV_TX_BUSY
> is return to the stack to requeue the current skb.
>
> It seems it is the pattern that most network driver follow, and it seems we can avoid
> calling check_sq_full_and_disable() in this patch and not wasting 16 slots as mentioned
> in the comment above.
>



 * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
 *                               struct net_device *dev);
 *	Called when a packet needs to be transmitted.
 *	Returns NETDEV_TX_OK.  Can return NETDEV_TX_BUSY, but you should stop
 *	the queue before that can happen; it's for obsolete devices and weird
 *	corner cases, but the stack really does a non-trivial amount
 *	of useless work if you return NETDEV_TX_BUSY.
 *	Required; cannot be NULL.

It does not affect the XDP TX. It is just that there are some waste on the
path of the protocol stack.

For example, TCP will do some unnecessary work based on the return value here.

Thanks.


>
> >  	if (flags & XDP_XMIT_FLUSH) {
> >  		if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
> >  			kicks = 1;
> >
_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH net, stable v1 3/3] virtio_net: add checking sq is full inside xdp xmit
       [not found]       ` <5a4564dc-af93-4305-49a4-5ca16d737bc3@huawei.com>
@ 2023-03-08 12:21         ` Michael S. Tsirkin
  0 siblings, 0 replies; 10+ messages in thread
From: Michael S. Tsirkin @ 2023-03-08 12:21 UTC (permalink / raw)
  To: Yunsheng Lin
  Cc: Yichun Zhang, Jesper Dangaard Brouer, Daniel Borkmann, netdev,
	John Fastabend, Alexei Starovoitov, virtualization, Eric Dumazet,
	Alexander Duyck, Jakub Kicinski, bpf, Paolo Abeni,
	David S. Miller

On Wed, Mar 08, 2023 at 04:13:12PM +0800, Yunsheng Lin wrote:
> On 2023/3/8 15:14, Xuan Zhuo wrote:
> > On Wed, 8 Mar 2023 14:59:36 +0800, Yunsheng Lin <linyunsheng@huawei.com> wrote:
> >> On 2023/3/8 10:49, Xuan Zhuo wrote:
> >>> If the queue of xdp xmit is not an independent queue, then when the xdp
> >>> xmit used all the desc, the xmit from the __dev_queue_xmit() may encounter
> >>> the following error.
> >>>
> >>> net ens4: Unexpected TXQ (0) queue failure: -28
> >>>
> >>> This patch adds a check whether sq is full in xdp xmit.
> >>>
> >>> Fixes: 56434a01b12e ("virtio_net: add XDP_TX support")
> >>> Reported-by: Yichun Zhang <yichun@openresty.com>
> >>> Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> >>> Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
> >>> Acked-by: Michael S. Tsirkin <mst@redhat.com>
> >>> ---
> >>>  drivers/net/virtio_net.c | 3 +++
> >>>  1 file changed, 3 insertions(+)
> >>>
> >>> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> >>> index 46bbddaadb0d..1a309cfb4976 100644
> >>> --- a/drivers/net/virtio_net.c
> >>> +++ b/drivers/net/virtio_net.c
> >>> @@ -767,6 +767,9 @@ static int virtnet_xdp_xmit(struct net_device *dev,
> >>>  	}
> >>>  	ret = nxmit;
> >>>
> >>> +	if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
> >>> +		check_sq_full_and_disable(vi, dev, sq);
> >>> +
> >>
> >> Sorry if I missed something obvious here.
> >>
> >> As the comment in start_xmit(), the current skb is added to the sq->vq, so
> >> NETDEV_TX_BUSY can not be returned.
> >>
> >> 	/* If running out of space, stop queue to avoid getting packets that we
> >> 	 * are then unable to transmit.
> >> 	 * An alternative would be to force queuing layer to requeue the skb by
> >> 	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
> >> 	 * returned in a normal path of operation: it means that driver is not
> >> 	 * maintaining the TX queue stop/start state properly, and causes
> >> 	 * the stack to do a non-trivial amount of useless work.
> >> 	 * Since most packets only take 1 or 2 ring slots, stopping the queue
> >> 	 * early means 16 slots are typically wasted.
> >> 	 */
> >>
> >> It there any reason not to check the sq->vq->num_free at the begin of start_xmit(),
> >> if the space is not enough for the current skb, TX queue is stopped and NETDEV_TX_BUSY
> >> is return to the stack to requeue the current skb.
> >>
> >> It seems it is the pattern that most network driver follow, and it seems we can avoid
> >> calling check_sq_full_and_disable() in this patch and not wasting 16 slots as mentioned
> >> in the comment above.
> >>
> > 
> > 
> > 
> >  * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
> >  *                               struct net_device *dev);
> >  *	Called when a packet needs to be transmitted.
> >  *	Returns NETDEV_TX_OK.  Can return NETDEV_TX_BUSY, but you should stop
> >  *	the queue before that can happen; it's for obsolete devices and weird
> >  *	corner cases, but the stack really does a non-trivial amount
> >  *	of useless work if you return NETDEV_TX_BUSY.
> >  *	Required; cannot be NULL.
> 
> Thanks for the pointer. It is intersting, it seems most driver is not flollowing
> the suggestion.

Yes - I don't know why.

> I found out why the above comment was added, but I am not sure I understand
> what does "non-trivial amount of useless work" means yet.
> https://lists.linuxfoundation.org/pipermail/virtualization/2015-April/029718.html

dev_requeue_skb 

-- 
MST

_______________________________________________
Virtualization mailing list
Virtualization@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/virtualization

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2023-03-08 12:21 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2023-03-08  2:49 [PATCH net, stable v1 0/3] add checking sq is full inside xdp xmit Xuan Zhuo
2023-03-08  2:49 ` [PATCH net, stable v1 1/3] virtio_net: reorder some funcs Xuan Zhuo
2023-03-08  5:20   ` Jason Wang
2023-03-08  6:53   ` Michael S. Tsirkin
2023-03-08  2:49 ` [PATCH net, stable v1 2/3] virtio_net: separate the logic of checking whether sq is full Xuan Zhuo
2023-03-08  5:20   ` Jason Wang
2023-03-08  2:49 ` [PATCH net, stable v1 3/3] virtio_net: add checking sq is full inside xdp xmit Xuan Zhuo
2023-03-08  5:20   ` Jason Wang
     [not found]   ` <7eea924e-5cc3-8584-af95-04587f303f8f@huawei.com>
2023-03-08  7:14     ` Xuan Zhuo
     [not found]       ` <5a4564dc-af93-4305-49a4-5ca16d737bc3@huawei.com>
2023-03-08 12:21         ` Michael S. Tsirkin

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).