qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Patrik Hermansson <phermansson@gmail.com>
To: qemu-devel@nongnu.org
Cc: Patrik Hermansson <phermansson@gmail.com>, mst@redhat.com
Subject: [Qemu-devel] [PATCH] virtio-net: Make virtio queue sizes configurable
Date: Fri, 27 Nov 2015 16:02:39 +0100	[thread overview]
Message-ID: <1448636559-2140-1-git-send-email-phermansson@gmail.com> (raw)

This patch adds the option to specify virtio queue sizes. Currently the
queue sizes is hard coded to 256, which might not be suitable for all types
of applications. This patch makes it possible to specify the queue size between
256 to 1024.

The minimum value is chosen based on the current sizes of the virtio
queues. The maximum queue size is based upon VIRTQUEUE_SIZE_MAX.
---
 hw/net/virtio-net.c            | 31 ++++++++++++++++++++++++++++---
 include/hw/virtio/virtio-net.h |  8 +++++++-
 2 files changed, 35 insertions(+), 4 deletions(-)

diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index a877614..c4fcb39 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1326,20 +1326,40 @@ static void virtio_net_tx_bh(void *opaque)
     }
 }
 
+static void virtio_net_validate_queue_limit(uint16_t *queue_size)
+{
+    if (*queue_size > VIRTIO_QUEUE_SIZE_MAX) {
+        error_report("queue-size: %d, exceeds maximum allowed queue-size(%d),"
+                     "queue-size set to %d", *queue_size, VIRTIO_QUEUE_SIZE_MAX,
+                      VIRTIO_QUEUE_SIZE_MAX);
+        *queue_size = VIRTIO_QUEUE_SIZE_MAX;
+    } else if (*queue_size < VIRTIO_QUEUE_SIZE_MIN) {
+        error_report("queue-size: %d, below minimum allowed queue-size(%d),"
+                     "queue-size set to %d", *queue_size, VIRTIO_QUEUE_SIZE_MIN,
+                      VIRTIO_QUEUE_SIZE_MIN);
+        *queue_size = VIRTIO_QUEUE_SIZE_MIN;
+    }
+}
+
 static void virtio_net_add_queue(VirtIONet *n, int index)
 {
     VirtIODevice *vdev = VIRTIO_DEVICE(n);
 
-    n->vqs[index].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
+    virtio_net_validate_queue_limit(&n->net_conf.rx_virtqueue_sz);
+    virtio_net_validate_queue_limit(&n->net_conf.tx_virtqueue_sz);
+    n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_virtqueue_sz,
+                                           virtio_net_handle_rx);
     if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
         n->vqs[index].tx_vq =
-            virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
+            virtio_add_queue(vdev, n->net_conf.tx_virtqueue_sz,
+                             virtio_net_handle_tx_timer);
         n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
                                               virtio_net_tx_timer,
                                               &n->vqs[index]);
     } else {
         n->vqs[index].tx_vq =
-            virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
+            virtio_add_queue(vdev, n->net_conf.tx_virtqueue_sz,
+                             virtio_net_handle_tx_bh);
         n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
     }
 
@@ -1826,6 +1846,11 @@ static Property virtio_net_properties[] = {
     DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
                        TX_TIMER_INTERVAL),
     DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
+    DEFINE_PROP_UINT16("rx_virtqueue_sz", VirtIONet, net_conf.rx_virtqueue_sz,
+                       RX_VIRTQUEUE_SIZE),
+    DEFINE_PROP_UINT16("tx_virtqueue_sz", VirtIONet, net_conf.tx_virtqueue_sz,
+                       TX_VIRTQUEUE_SIZE),
+
     DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
     DEFINE_PROP_END_OF_LIST(),
 };
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index f3cc25f..dbcabbf 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -28,13 +28,19 @@
  * ensures fairness in the io path.  256 conveniently matches the
  * length of the TX queue and shows a good balance of performance
  * and latency. */
-#define TX_BURST 256
+#define VIRTIO_QUEUE_SIZE_MIN 256
+#define VIRTIO_QUEUE_SIZE_MAX VIRTQUEUE_MAX_SIZE
+#define RX_VIRTQUEUE_SIZE VIRTIO_QUEUE_SIZE_MIN
+#define TX_VIRTQUEUE_SIZE VIRTIO_QUEUE_SIZE_MIN
+#define TX_BURST TX_VIRTQUEUE_SIZE
 
 typedef struct virtio_net_conf
 {
     uint32_t txtimer;
     int32_t txburst;
     char *tx;
+    uint16_t rx_virtqueue_sz;
+    uint16_t tx_virtqueue_sz;
 } virtio_net_conf;
 
 /* Maximum packet size we can receive from tap device: header + 64k */
-- 
1.9.1

             reply	other threads:[~2015-11-27 15:03 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-11-27 15:02 Patrik Hermansson [this message]
2016-03-14 11:27 ` [Qemu-devel] [PATCH] virtio-net: Make virtio queue sizes configurable Greg Kurz
2016-03-15  7:58   ` Greg Kurz
2016-03-15  9:21   ` Patrik Hermansson
2016-03-15  4:50 ` Michael S. Tsirkin
2016-03-15  8:00   ` Greg Kurz
2016-03-15  9:44     ` Michael S. Tsirkin
2016-03-15 19:29   ` Patrik Hermansson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1448636559-2140-1-git-send-email-phermansson@gmail.com \
    --to=phermansson@gmail.com \
    --cc=mst@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).