qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: Jason Wang <jasowang@redhat.com>
To: qemu-devel@nongnu.org
Cc: Laurent Vivier <lvivier@redhat.com>, Jason Wang <jasowang@redhat.com>
Subject: [PULL 10/13] net: Allow network backends to advertise max TX queue size
Date: Mon, 14 Jul 2025 13:34:20 +0800	[thread overview]
Message-ID: <20250714053423.10415-11-jasowang@redhat.com> (raw)
In-Reply-To: <20250714053423.10415-1-jasowang@redhat.com>

From: Laurent Vivier <lvivier@redhat.com>

This commit refactors how the maximum transmit queue size for
virtio-net devices is determined, making the mechanism more generic
and extensible.

Previously, virtio_net_max_tx_queue_size() contained hardcoded
checks for specific network backend types (vhost-user and
vhost-vdpa) to determine their supported maximum queue size. This
created direct dependencies and would require modifications for
every new backend that supports variable queue sizes.

To improve flexibility, a new max_tx_queue_size field is added
to the vhost_net structure. This allows each network backend
to advertise its supported maximum transmit queue size directly.

The virtio_net_max_tx_queue_size() function now retrieves the max
TX queue size from the vhost_net struct, if available and set.
Otherwise, it defaults to VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE.

Signed-off-by: Laurent Vivier <lvivier@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
 hw/net/vhost_net.c        |  1 +
 hw/net/virtio-net.c       | 24 ++++++++++++------------
 include/hw/virtio/vhost.h |  1 +
 include/net/vhost_net.h   |  1 +
 net/tap.c                 |  1 +
 net/vhost-user.c          |  1 +
 net/vhost-vdpa.c          |  1 +
 7 files changed, 18 insertions(+), 12 deletions(-)

diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 976d2b315a..74d2e3ed90 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -245,6 +245,7 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)
     net->dev.nvqs = options->nvqs;
     net->feature_bits = options->feature_bits;
     net->save_acked_features = options->save_acked_features;
+    net->max_tx_queue_size = options->max_tx_queue_size;
 
     net->dev.max_queues = 1;
     net->dev.vqs = net->vqs;
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index e3400f18c8..39fc280839 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -670,22 +670,22 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
 static int virtio_net_max_tx_queue_size(VirtIONet *n)
 {
     NetClientState *peer = n->nic_conf.peers.ncs[0];
+    struct vhost_net *net;
 
-    /*
-     * Backends other than vhost-user or vhost-vdpa don't support max queue
-     * size.
-     */
     if (!peer) {
-        return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
+        goto default_value;
     }
 
-    switch(peer->info->type) {
-    case NET_CLIENT_DRIVER_VHOST_USER:
-    case NET_CLIENT_DRIVER_VHOST_VDPA:
-        return VIRTQUEUE_MAX_SIZE;
-    default:
-        return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
-    };
+    net = get_vhost_net(peer);
+
+    if (!net || !net->max_tx_queue_size) {
+        goto default_value;
+    }
+
+    return net->max_tx_queue_size;
+
+default_value:
+    return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
 }
 
 static int peer_attach(VirtIONet *n, int index)
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index b0830bac79..a62992c819 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -145,6 +145,7 @@ struct vhost_net {
     struct vhost_virtqueue vqs[2];
     int backend;
     const int *feature_bits;
+    int max_tx_queue_size;
     SaveAcketFeatures *save_acked_features;
     NetClientState *nc;
 };
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index eb26ed9bdc..8f4fddfb69 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -16,6 +16,7 @@ typedef struct VhostNetOptions {
     uint32_t busyloop_timeout;
     unsigned int nvqs;
     const int *feature_bits;
+    int max_tx_queue_size;
     GetAckedFeatures *get_acked_features;
     SaveAcketFeatures *save_acked_features;
     void *opaque;
diff --git a/net/tap.c b/net/tap.c
index 79fa02a65c..2f0cb55c9a 100644
--- a/net/tap.c
+++ b/net/tap.c
@@ -746,6 +746,7 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer,
         options.feature_bits = kernel_feature_bits;
         options.get_acked_features = NULL;
         options.save_acked_features = NULL;
+        options.max_tx_queue_size = 0;
 
         s->vhost_net = vhost_net_init(&options);
         if (!s->vhost_net) {
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 8a3df27b02..bf892915de 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -138,6 +138,7 @@ static int vhost_user_start(int queues, NetClientState *ncs[],
         options.busyloop_timeout = 0;
         options.nvqs = 2;
         options.feature_bits = user_feature_bits;
+        options.max_tx_queue_size = VIRTQUEUE_MAX_SIZE;
         options.get_acked_features = vhost_user_get_acked_features;
         options.save_acked_features = vhost_user_save_acked_features;
 
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index c63225d3d2..353392b3d7 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -204,6 +204,7 @@ static int vhost_vdpa_add(NetClientState *ncs, void *be,
     options.feature_bits = vdpa_feature_bits;
     options.get_acked_features = NULL;
     options.save_acked_features = NULL;
+    options.max_tx_queue_size = VIRTQUEUE_MAX_SIZE;
 
     net = vhost_net_init(&options);
     if (!net) {
-- 
2.42.0



  parent reply	other threads:[~2025-07-14  5:36 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-07-14  5:34 [PULL 00/13] Net patches Jason Wang
2025-07-14  5:34 ` [PULL 01/13] net: fix buffer overflow in af_xdp_umem_create() Jason Wang
2025-07-14  5:34 ` [PULL 02/13] virtio-net: Add queues for RSS during migration Jason Wang
2025-07-14  5:34 ` [PULL 03/13] net: Refactor stream logic for reuse in '-net passt' Jason Wang
2025-07-14  5:34 ` [PULL 04/13] net: Define net_client_set_link() Jason Wang
2025-07-14  5:34 ` [PULL 05/13] vhost_net: Rename vhost_set_vring_enable() for clarity Jason Wang
2025-07-14  5:34 ` [PULL 06/13] net: Add get_vhost_net callback to NetClientInfo Jason Wang
2025-07-14  5:34 ` [PULL 07/13] net: Consolidate vhost feature bits into vhost_net structure Jason Wang
2025-07-14  5:34 ` [PULL 08/13] net: Add get_acked_features callback to VhostNetOptions Jason Wang
2025-07-14  5:34 ` [PULL 09/13] net: Add save_acked_features callback to vhost_net Jason Wang
2025-07-14  5:34 ` Jason Wang [this message]
2025-07-14  5:34 ` [PULL 11/13] net: Add is_vhost_user flag to vhost_net struct Jason Wang
2025-07-14  5:34 ` [PULL 12/13] net: Add passt network backend Jason Wang
2025-07-14  5:34 ` [PULL 13/13] net/passt: Implement vhost-user backend support Jason Wang
2025-07-15  4:13 ` [PULL 00/13] Net patches Jason Wang
2025-07-15 19:50 ` Stefan Hajnoczi
2025-07-16  2:21   ` Jason Wang
2025-07-16  4:40     ` Philippe Mathieu-Daudé
2025-07-16 10:26     ` Stefan Hajnoczi
2025-07-16 10:30       ` Stefan Hajnoczi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250714053423.10415-11-jasowang@redhat.com \
    --to=jasowang@redhat.com \
    --cc=lvivier@redhat.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).