qemu-devel.nongnu.org archive mirror
 help / color / mirror / Atom feed
From: "Michael S. Tsirkin" <mst@redhat.com>
To: liuhaiwei <liuhaiwei9699@126.com>
Cc: qemu-devel@nongnu.org, jasowang@redhat.com
Subject: Re: [PATCH 2/3] virtio-net: update the default and max of rx/tx_queue_size
Date: Mon, 19 Sep 2022 06:22:54 -0400	[thread overview]
Message-ID: <20220919062211-mutt-send-email-mst@kernel.org> (raw)
In-Reply-To: <20220919093915.33003-2-liuhaiwei9699@126.com>

On Mon, Sep 19, 2022 at 05:39:14AM -0400, liuhaiwei wrote:
> Set the max of tx_queue_size to 4096 even if the backends
> are not vhost-user.
> 
> Set the default of rx/tx_queue_size to 2048 if the backends
> are vhost-user, otherwise to 4096.
> 
> Signed-off-by: liuhaiwei <liuhaiwei9699@126.com>


Pls include motivation for the change.
A change like this will also need compat knobs to avoid breaking
old machine types.

> ---
>  hw/net/virtio-net.c | 40 ++++++++++++++++++++++++++++++----------
>  1 file changed, 30 insertions(+), 10 deletions(-)
> 
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index dd0d056fde..d63ef24e6a 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -52,12 +52,11 @@
>  #define MAX_VLAN    (1 << 12)   /* Per 802.1Q definition */
>  
>  /* previously fixed value */
> -#define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
> -#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
> +#define VIRTIO_NET_VHOST_USER_DEFAULT_SIZE 2048
>  
>  /* for now, only allow larger queue_pairs; with virtio-1, guest can downsize */
> -#define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
> -#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
> +#define VIRTIO_NET_RX_QUEUE_MIN_SIZE 256
> +#define VIRTIO_NET_TX_QUEUE_MIN_SIZE 256
>  
>  #define VIRTIO_NET_IP4_ADDR_SIZE   8        /* ipv4 saddr + daddr */
>  
> @@ -593,6 +592,28 @@ static int peer_has_ufo(VirtIONet *n)
>  
>      return n->has_ufo;
>  }
> +static void virtio_net_set_default_queue_size(VirtIONet *n)
> +{
> +    NetClientState *peer = n->nic_conf.peers.ncs[0];
> +
> +    /* Default value is 0 if not set */
> +    if (n->net_conf.rx_queue_size == 0) {
> +        if (peer && peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
> +            n->net_conf.rx_queue_size = VIRTIO_NET_VHOST_USER_DEFAULT_SIZE;
> +        } else {
> +            n->net_conf.rx_queue_size = VIRTIO_NET_VQ_MAX_SIZE;
> +        }
> +    }
> +
> +    if (n->net_conf.tx_queue_size == 0) {
> +        if (peer && peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
> +            n->net_conf.tx_queue_size = VIRTIO_NET_VHOST_USER_DEFAULT_SIZE;
> +        } else {
> +            n->net_conf.tx_queue_size = VIRTIO_NET_VQ_MAX_SIZE;
> +        }
> +    }
> +}
> +
>  
>  static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
>                                         int version_1, int hash_report)
> @@ -633,7 +654,7 @@ static int virtio_net_max_tx_queue_size(VirtIONet *n)
>       * size.
>       */
>      if (!peer) {
> -        return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
> +        return VIRTIO_NET_VQ_MAX_SIZE;
>      }
>  
>      switch(peer->info->type) {
> @@ -641,7 +662,7 @@ static int virtio_net_max_tx_queue_size(VirtIONet *n)
>      case NET_CLIENT_DRIVER_VHOST_VDPA:
>          return VIRTQUEUE_MAX_SIZE;
>      default:
> -        return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
> +        return VIRTIO_NET_VQ_MAX_SIZE;
>      };
>  }
>  
> @@ -3450,6 +3471,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>  
>      virtio_net_set_config_size(n, n->host_features);
>      virtio_init(vdev, VIRTIO_ID_NET, n->config_size);
> +    virtio_net_set_default_queue_size(n);
>  
>      /*
>       * We set a lower limit on RX queue size to what it always was.
> @@ -3750,10 +3772,8 @@ static Property virtio_net_properties[] = {
>                         TX_TIMER_INTERVAL),
>      DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
>      DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
> -    DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
> -                       VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
> -    DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
> -                       VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
> +    DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size, 0),
> +    DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size, 0),
>      DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
>      DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
>                       true),
> -- 
> 2.27.0



  reply	other threads:[~2022-09-19 10:33 UTC|newest]

Thread overview: 6+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-19  9:39 [PATCH 1/3] virtio-net: fix max vring buf size when set ring num liuhaiwei
2022-09-19  9:39 ` [PATCH 2/3] virtio-net: update the default and max of rx/tx_queue_size liuhaiwei
2022-09-19 10:22   ` Michael S. Tsirkin [this message]
2022-09-19  9:39 ` [PATCH 3/3] virtio-net: set the max of queue size to 4096 liuhaiwei
2022-09-19 10:23   ` Michael S. Tsirkin
2022-09-19 10:21 ` [PATCH 1/3] virtio-net: fix max vring buf size when set ring num Michael S. Tsirkin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220919062211-mutt-send-email-mst@kernel.org \
    --to=mst@redhat.com \
    --cc=jasowang@redhat.com \
    --cc=liuhaiwei9699@126.com \
    --cc=qemu-devel@nongnu.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).