qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: stefanha@redhat.com, qemu-devel@nongnu.org
Cc: Laurent Vivier <lvivier@redhat.com>,
	alex.williamson@redhat.com,
	"Michael S . Tsirkin" <mst@redhat.com>,
	Jason Wang <jasowang@redhat.com>
Subject: [PULL 02/26] virtio-net: fix TX timer with tx_burst
Date: Fri, 28 Oct 2022 13:48:11 +0800	[thread overview]
Message-ID: <20221028054835.29674-3-jasowang@redhat.com> (raw)
In-Reply-To: <20221028054835.29674-1-jasowang@redhat.com>

From: Laurent Vivier <lvivier@redhat.com>

When virtio_net_flush_tx() reaches the tx_burst value all
the queue is not flushed and nothing restart the timer.

Fix that by doing for TX timer as we do for bottom half TX:
rearming the timer if we find any packet to send during the
virtio_net_flush_tx() call.

Fixes: e3f30488e5f8 ("virtio-net: Limit number of packets sent per TX flush")
Cc: alex.williamson@redhat.com
Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 hw/net/virtio-net.c | 50 +++++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 41 insertions(+), 9 deletions(-)

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 1fbf2f3..b6903ae 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -2536,14 +2536,19 @@ static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
 
     virtio_queue_set_notification(q->tx_vq, 1);
     ret = virtio_net_flush_tx(q);
-    if (q->tx_bh && ret >= n->tx_burst) {
+    if (ret >= n->tx_burst) {
         /*
          * the flush has been stopped by tx_burst
          * we will not receive notification for the
          * remainining part, so re-schedule
          */
         virtio_queue_set_notification(q->tx_vq, 0);
-        qemu_bh_schedule(q->tx_bh);
+        if (q->tx_bh) {
+            qemu_bh_schedule(q->tx_bh);
+        } else {
+            timer_mod(q->tx_timer,
+                      qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
+        }
         q->tx_waiting = 1;
     }
 }
@@ -2644,6 +2649,8 @@ drop:
     return num_packets;
 }
 
+static void virtio_net_tx_timer(void *opaque);
+
 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
 {
     VirtIONet *n = VIRTIO_NET(vdev);
@@ -2661,15 +2668,13 @@ static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
     }
 
     if (q->tx_waiting) {
-        virtio_queue_set_notification(vq, 1);
+        /* We already have queued packets, immediately flush */
         timer_del(q->tx_timer);
-        q->tx_waiting = 0;
-        if (virtio_net_flush_tx(q) == -EINVAL) {
-            return;
-        }
+        virtio_net_tx_timer(q);
     } else {
+        /* re-arm timer to flush it (and more) on next tick */
         timer_mod(q->tx_timer,
-                       qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
+                  qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
         q->tx_waiting = 1;
         virtio_queue_set_notification(vq, 0);
     }
@@ -2702,6 +2707,8 @@ static void virtio_net_tx_timer(void *opaque)
     VirtIONetQueue *q = opaque;
     VirtIONet *n = q->n;
     VirtIODevice *vdev = VIRTIO_DEVICE(n);
+    int ret;
+
     /* This happens when device was stopped but BH wasn't. */
     if (!vdev->vm_running) {
         /* Make sure tx waiting is set, so we'll run when restarted. */
@@ -2716,8 +2723,33 @@ static void virtio_net_tx_timer(void *opaque)
         return;
     }
 
+    ret = virtio_net_flush_tx(q);
+    if (ret == -EBUSY || ret == -EINVAL) {
+        return;
+    }
+    /*
+     * If we flush a full burst of packets, assume there are
+     * more coming and immediately rearm
+     */
+    if (ret >= n->tx_burst) {
+        q->tx_waiting = 1;
+        timer_mod(q->tx_timer,
+                  qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
+        return;
+    }
+    /*
+     * If less than a full burst, re-enable notification and flush
+     * anything that may have come in while we weren't looking.  If
+     * we find something, assume the guest is still active and rearm
+     */
     virtio_queue_set_notification(q->tx_vq, 1);
-    virtio_net_flush_tx(q);
+    ret = virtio_net_flush_tx(q);
+    if (ret > 0) {
+        virtio_queue_set_notification(q->tx_vq, 0);
+        q->tx_waiting = 1;
+        timer_mod(q->tx_timer,
+                  qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
+    }
 }
 
 static void virtio_net_tx_bh(void *opaque)
-- 
2.7.4



  parent reply	other threads:[~2022-10-28  5:51 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-10-28  5:48 [PULL 00/26] Net patches Jason Wang
2022-10-28  5:48 ` [PULL 01/26] virtio-net: fix bottom-half packet TX on asynchronous completion Jason Wang
2022-10-28  5:48 ` Jason Wang [this message]
2022-10-28  5:48 ` [PULL 03/26] vdpa: Delete duplicated vdpa_feature_bits entry Jason Wang
2022-10-28  5:48 ` [PULL 04/26] vdpa: Remove shadow CVQ command check Jason Wang
2022-10-28  5:48 ` [PULL 05/26] vhost-vdpa: allow passing opened vhostfd to vhost-vdpa Jason Wang
2022-10-31 13:22   ` Peter Maydell
2022-10-28  5:48 ` [PULL 06/26] net: improve error message for missing netdev backend Jason Wang
2022-10-28  5:48 ` [PULL 07/26] vhost: allocate event_idx fields on vring Jason Wang
2022-10-28  5:48 ` [PULL 08/26] vhost: toggle device callbacks using used event idx Jason Wang
2022-10-28  5:48 ` [PULL 09/26] vhost: use avail event idx on vhost_svq_kick Jason Wang
2022-10-28  5:48 ` [PULL 10/26] vhost: Accept event idx flag Jason Wang
2022-10-28  5:48 ` [PULL 11/26] net: introduce convert_host_port() Jason Wang
2022-10-28  5:48 ` [PULL 12/26] net: remove the @errp argument of net_client_inits() Jason Wang
2022-10-28  5:48 ` [PULL 13/26] net: simplify net_client_parse() error management Jason Wang
2022-10-28  5:48 ` [PULL 14/26] qapi: net: introduce a way to bypass qemu_opts_parse_noisily() Jason Wang
2022-10-28  5:48 ` [PULL 15/26] net: introduce qemu_set_info_str() function Jason Wang
2022-10-28  5:48 ` [PULL 16/26] qapi: net: add stream and dgram netdevs Jason Wang
2022-10-28  5:48 ` [PULL 17/26] net: socket: Don't ignore EINVAL on netdev socket connection Jason Wang
2022-10-28  5:48 ` [PULL 18/26] net: stream: " Jason Wang
2022-10-28  5:48 ` [PULL 19/26] net: stream: add unix socket Jason Wang
2022-10-28  5:48 ` [PULL 20/26] net: dgram: make dgram_dst generic Jason Wang
2022-10-28  5:48 ` [PULL 21/26] net: dgram: move mcast specific code from net_socket_fd_init_dgram() Jason Wang
2022-10-28  5:48 ` [PULL 22/26] net: dgram: add unix socket Jason Wang
2022-10-28  5:48 ` [PULL 23/26] qemu-sockets: move and rename SocketAddress_to_str() Jason Wang
2022-10-28  5:48 ` [PULL 24/26] qemu-sockets: update socket_uri() and socket_parse() to be consistent Jason Wang
2022-10-28  5:48 ` [PULL 25/26] net: stream: move to QIO to enable additional parameters Jason Wang
2022-10-28  5:48 ` [PULL 26/26] net: stream: add QAPI events to report connection state Jason Wang
2022-10-31 10:13 ` [PULL 00/26] Net patches Stefan Hajnoczi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221028054835.29674-3-jasowang@redhat.com \
    --to=jasowang@redhat.com \
    --cc=alex.williamson@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    --cc=stefanha@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).