* [Qemu-devel] [PATCH v3 1/2] virtio-net: enable configurable tx queue size
@ 2017-06-23 2:32 Wei Wang
2017-06-23 2:32 ` [Qemu-devel] [PATCH v3 2/2] virtio-net: code cleanup Wei Wang
` (2 more replies)
0 siblings, 3 replies; 13+ messages in thread
From: Wei Wang @ 2017-06-23 2:32 UTC (permalink / raw)
To: mst, jasowang, eblake, virtio-dev, qemu-devel
Cc: armbru, pbonzini, jan.scheurich, marcandre.lureau, stefanha,
Wei Wang
This patch enables the virtio-net tx queue size to be configurable
between 256 (the default queue size) and 1024 by the user when the
vhost-user backend is used.
Currently, the maximum tx queue size for other backends is 512 due
to the following limitations:
- QEMU backend: the QEMU backend implementation in some cases may
send 1024+1 iovs to writev.
- Vhost_net backend: there are possibilities that the guest sends
a vring_desc of memory which corsses a MemoryRegion thereby
generating more than 1024 iovs after translattion from guest-physical
address in the backend.
Signed-off-by: Wei Wang <wei.w.wang@intel.com>
---
hw/net/virtio-net.c | 45 +++++++++++++++++++++++++++++++++---------
include/hw/virtio/virtio-net.h | 1 +
2 files changed, 37 insertions(+), 9 deletions(-)
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 91eddaf..d13ca60 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -34,8 +34,11 @@
/* previously fixed value */
#define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
+#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
+
/* for now, only allow larger queues; with virtio-1, guest can downsize */
#define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
+#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
/*
* Calculate the number of bytes up to and including the given 'field' of
@@ -1508,15 +1511,18 @@ static void virtio_net_add_queue(VirtIONet *n, int index)
n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
virtio_net_handle_rx);
+
if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
n->vqs[index].tx_vq =
- virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
+ virtio_add_queue(vdev, n->net_conf.tx_queue_size,
+ virtio_net_handle_tx_timer);
n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
virtio_net_tx_timer,
&n->vqs[index]);
} else {
n->vqs[index].tx_vq =
- virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
+ virtio_add_queue(vdev, n->net_conf.tx_queue_size,
+ virtio_net_handle_tx_bh);
n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
}
@@ -1927,6 +1933,17 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
return;
}
+ if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
+ n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
+ !is_power_of_2(n->net_conf.tx_queue_size)) {
+ error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
+ "must be a power of 2 between %d and %d",
+ n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
+ VIRTQUEUE_MAX_SIZE);
+ virtio_cleanup(vdev);
+ return;
+ }
+
n->max_queues = MAX(n->nic_conf.peers.queues, 1);
if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
@@ -1947,17 +1964,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
error_report("Defaulting to \"bh\"");
}
- for (i = 0; i < n->max_queues; i++) {
- virtio_net_add_queue(n, i);
- }
-
- n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
n->status = VIRTIO_NET_S_LINK_UP;
n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
virtio_net_announce_timer, n);
-
if (n->netclient_type) {
/*
* Happen when virtio_net_set_netclient_name has been called.
@@ -1968,6 +1979,21 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
object_get_typename(OBJECT(dev)), dev->id, n);
}
+ nc = qemu_get_queue(n->nic);
+
+ /*
+ * Currently, backends other than vhost-user don't support 1024 queue
+ * size.
+ */
+ if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
+ nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
+ n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
+ }
+
+ for (i = 0; i < n->max_queues; i++) {
+ virtio_net_add_queue(n, i);
+ }
+ n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
peer_test_vnet_hdr(n);
if (peer_has_vnet_hdr(n)) {
@@ -1990,7 +2016,6 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
n->vlans = g_malloc0(MAX_VLAN >> 3);
- nc = qemu_get_queue(n->nic);
nc->rxfilter_notify_enabled = 1;
n->qdev = dev;
@@ -2106,6 +2131,8 @@ static Property virtio_net_properties[] = {
DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
+ DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
+ VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
true),
diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
index 602b486..b81b6a4 100644
--- a/include/hw/virtio/virtio-net.h
+++ b/include/hw/virtio/virtio-net.h
@@ -36,6 +36,7 @@ typedef struct virtio_net_conf
int32_t txburst;
char *tx;
uint16_t rx_queue_size;
+ uint16_t tx_queue_size;
uint16_t mtu;
} virtio_net_conf;
--
2.7.4
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [Qemu-devel] [PATCH v3 2/2] virtio-net: code cleanup
2017-06-23 2:32 [Qemu-devel] [PATCH v3 1/2] virtio-net: enable configurable tx queue size Wei Wang
@ 2017-06-23 2:32 ` Wei Wang
2017-06-26 3:18 ` [Qemu-devel] [PATCH v3 1/2] virtio-net: enable configurable tx queue size Jason Wang
2017-06-27 1:51 ` [Qemu-devel] " Eric Blake
2 siblings, 0 replies; 13+ messages in thread
From: Wei Wang @ 2017-06-23 2:32 UTC (permalink / raw)
To: mst, jasowang, eblake, virtio-dev, qemu-devel
Cc: armbru, pbonzini, jan.scheurich, marcandre.lureau, stefanha,
Wei Wang
Use is_power_of_2(), and remove trailing "." from error_setg().
Signed-off-by: Wei Wang <wei.w.wang@intel.com>
---
hw/net/virtio-net.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index d13ca60..b42423c 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -1924,9 +1924,9 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
*/
if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
- (n->net_conf.rx_queue_size & (n->net_conf.rx_queue_size - 1))) {
+ !is_power_of_2(n->net_conf.rx_queue_size)) {
error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
- "must be a power of 2 between %d and %d.",
+ "must be a power of 2 between %d and %d",
n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
VIRTQUEUE_MAX_SIZE);
virtio_cleanup(vdev);
@@ -1947,7 +1947,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
n->max_queues = MAX(n->nic_conf.peers.queues, 1);
if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
- "must be a positive integer less than %d.",
+ "must be a positive integer less than %d",
n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
virtio_cleanup(vdev);
return;
--
2.7.4
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [PATCH v3 1/2] virtio-net: enable configurable tx queue size
2017-06-23 2:32 [Qemu-devel] [PATCH v3 1/2] virtio-net: enable configurable tx queue size Wei Wang
2017-06-23 2:32 ` [Qemu-devel] [PATCH v3 2/2] virtio-net: code cleanup Wei Wang
@ 2017-06-26 3:18 ` Jason Wang
2017-06-26 4:55 ` Wei Wang
2017-06-27 1:51 ` [Qemu-devel] " Eric Blake
2 siblings, 1 reply; 13+ messages in thread
From: Jason Wang @ 2017-06-26 3:18 UTC (permalink / raw)
To: Wei Wang, mst, eblake, virtio-dev, qemu-devel
Cc: stefanha, armbru, jan.scheurich, marcandre.lureau, pbonzini
On 2017年06月23日 10:32, Wei Wang wrote:
> This patch enables the virtio-net tx queue size to be configurable
> between 256 (the default queue size) and 1024 by the user when the
> vhost-user backend is used.
>
> Currently, the maximum tx queue size for other backends is 512 due
> to the following limitations:
> - QEMU backend: the QEMU backend implementation in some cases may
> send 1024+1 iovs to writev.
> - Vhost_net backend: there are possibilities that the guest sends
> a vring_desc of memory which corsses a MemoryRegion thereby
> generating more than 1024 iovs after translattion from guest-physical
> address in the backend.
>
> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> ---
> hw/net/virtio-net.c | 45 +++++++++++++++++++++++++++++++++---------
> include/hw/virtio/virtio-net.h | 1 +
> 2 files changed, 37 insertions(+), 9 deletions(-)
>
> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> index 91eddaf..d13ca60 100644
> --- a/hw/net/virtio-net.c
> +++ b/hw/net/virtio-net.c
> @@ -34,8 +34,11 @@
>
> /* previously fixed value */
> #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
> +#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
> +
> /* for now, only allow larger queues; with virtio-1, guest can downsize */
> #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
> +#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
>
> /*
> * Calculate the number of bytes up to and including the given 'field' of
> @@ -1508,15 +1511,18 @@ static void virtio_net_add_queue(VirtIONet *n, int index)
>
> n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
> virtio_net_handle_rx);
> +
Unnecessary whitespace change.
> if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
> n->vqs[index].tx_vq =
> - virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
> + virtio_add_queue(vdev, n->net_conf.tx_queue_size,
> + virtio_net_handle_tx_timer);
> n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
> virtio_net_tx_timer,
> &n->vqs[index]);
> } else {
> n->vqs[index].tx_vq =
> - virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
> + virtio_add_queue(vdev, n->net_conf.tx_queue_size,
> + virtio_net_handle_tx_bh);
> n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
> }
>
> @@ -1927,6 +1933,17 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> return;
> }
>
> + if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
> + n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
> + !is_power_of_2(n->net_conf.tx_queue_size)) {
> + error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
> + "must be a power of 2 between %d and %d",
> + n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
> + VIRTQUEUE_MAX_SIZE);
> + virtio_cleanup(vdev);
> + return;
> + }
> +
> n->max_queues = MAX(n->nic_conf.peers.queues, 1);
> if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
> error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
> @@ -1947,17 +1964,11 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> error_report("Defaulting to \"bh\"");
> }
>
> - for (i = 0; i < n->max_queues; i++) {
> - virtio_net_add_queue(n, i);
> - }
> -
> - n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
> qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
> memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
> n->status = VIRTIO_NET_S_LINK_UP;
> n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
> virtio_net_announce_timer, n);
> -
Unnecessary whitespace change.
> if (n->netclient_type) {
> /*
> * Happen when virtio_net_set_netclient_name has been called.
> @@ -1968,6 +1979,21 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
> n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
> object_get_typename(OBJECT(dev)), dev->id, n);
> }
> + nc = qemu_get_queue(n->nic);
> +
> + /*
> + * Currently, backends other than vhost-user don't support 1024 queue
> + * size.
> + */
> + if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
> + nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
> + n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
Do we really want assume all vhost-user backend support 1024 queue size?
> + }
> +
> + for (i = 0; i < n->max_queues; i++) {
> + virtio_net_add_queue(n, i);
> + }
Any reason to move virtio_net_add_queue() here?
> + n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
>
> peer_test_vnet_hdr(n);
> if (peer_has_vnet_hdr(n)) {
> @@ -1990,7 +2016,6 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
>
> n->vlans = g_malloc0(MAX_VLAN >> 3);
>
> - nc = qemu_get_queue(n->nic);
> nc->rxfilter_notify_enabled = 1;
>
> n->qdev = dev;
> @@ -2106,6 +2131,8 @@ static Property virtio_net_properties[] = {
> DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
> DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
> VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
> + DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
> + VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
> DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
> DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
> true),
> diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h
> index 602b486..b81b6a4 100644
> --- a/include/hw/virtio/virtio-net.h
> +++ b/include/hw/virtio/virtio-net.h
> @@ -36,6 +36,7 @@ typedef struct virtio_net_conf
> int32_t txburst;
> char *tx;
> uint16_t rx_queue_size;
> + uint16_t tx_queue_size;
> uint16_t mtu;
> } virtio_net_conf;
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [PATCH v3 1/2] virtio-net: enable configurable tx queue size
2017-06-26 3:18 ` [Qemu-devel] [PATCH v3 1/2] virtio-net: enable configurable tx queue size Jason Wang
@ 2017-06-26 4:55 ` Wei Wang
2017-06-26 8:05 ` [Qemu-devel] [virtio-dev] " Jason Wang
0 siblings, 1 reply; 13+ messages in thread
From: Wei Wang @ 2017-06-26 4:55 UTC (permalink / raw)
To: Jason Wang, mst, eblake, virtio-dev, qemu-devel
Cc: stefanha, armbru, jan.scheurich, marcandre.lureau, pbonzini
On 06/26/2017 11:18 AM, Jason Wang wrote:
>
> On 2017年06月23日 10:32, Wei Wang wrote:
>> This patch enables the virtio-net tx queue size to be configurable
>> between 256 (the default queue size) and 1024 by the user when the
>> vhost-user backend is used.
>>
>> Currently, the maximum tx queue size for other backends is 512 due
>> to the following limitations:
>> - QEMU backend: the QEMU backend implementation in some cases may
>> send 1024+1 iovs to writev.
>> - Vhost_net backend: there are possibilities that the guest sends
>> a vring_desc of memory which corsses a MemoryRegion thereby
>> generating more than 1024 iovs after translattion from guest-physical
>> address in the backend.
>>
>> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
>> ---
>> hw/net/virtio-net.c | 45
>> +++++++++++++++++++++++++++++++++---------
>> include/hw/virtio/virtio-net.h | 1 +
>> 2 files changed, 37 insertions(+), 9 deletions(-)
>>
>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>> index 91eddaf..d13ca60 100644
>> --- a/hw/net/virtio-net.c
>> +++ b/hw/net/virtio-net.c
>> @@ -34,8 +34,11 @@
>> /* previously fixed value */
>> #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
>> +
>> + /*
>> + * Currently, backends other than vhost-user don't support 1024
>> queue
>> + * size.
>> + */
>> + if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
>> + nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
>> + n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
>
> Do we really want assume all vhost-user backend support 1024 queue size?
>
Do you know any vhost-user backend implementation that doesn't support
1024 tx queue size?
>> + }
>> +
>> + for (i = 0; i < n->max_queues; i++) {
>> + virtio_net_add_queue(n, i);
>> + }
>
> Any reason to move virtio_net_add_queue() here?
>
Please check the whole init steps. It was moved here (after qemu_new_nic())
to make sure nc->peer is not NULL.
Best,
Wei
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [virtio-dev] Re: [PATCH v3 1/2] virtio-net: enable configurable tx queue size
2017-06-26 4:55 ` Wei Wang
@ 2017-06-26 8:05 ` Jason Wang
2017-06-26 10:34 ` Wei Wang
0 siblings, 1 reply; 13+ messages in thread
From: Jason Wang @ 2017-06-26 8:05 UTC (permalink / raw)
To: Wei Wang, mst, eblake, virtio-dev, qemu-devel
Cc: stefanha, armbru, jan.scheurich, marcandre.lureau, pbonzini
On 2017年06月26日 12:55, Wei Wang wrote:
> On 06/26/2017 11:18 AM, Jason Wang wrote:
>>
>> On 2017年06月23日 10:32, Wei Wang wrote:
>>> This patch enables the virtio-net tx queue size to be configurable
>>> between 256 (the default queue size) and 1024 by the user when the
>>> vhost-user backend is used.
>>>
>>> Currently, the maximum tx queue size for other backends is 512 due
>>> to the following limitations:
>>> - QEMU backend: the QEMU backend implementation in some cases may
>>> send 1024+1 iovs to writev.
>>> - Vhost_net backend: there are possibilities that the guest sends
>>> a vring_desc of memory which corsses a MemoryRegion thereby
>>> generating more than 1024 iovs after translattion from guest-physical
>>> address in the backend.
>>>
>>> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
>>> ---
>>> hw/net/virtio-net.c | 45
>>> +++++++++++++++++++++++++++++++++---------
>>> include/hw/virtio/virtio-net.h | 1 +
>>> 2 files changed, 37 insertions(+), 9 deletions(-)
>>>
>>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>>> index 91eddaf..d13ca60 100644
>>> --- a/hw/net/virtio-net.c
>>> +++ b/hw/net/virtio-net.c
>>> @@ -34,8 +34,11 @@
>>> /* previously fixed value */
>>> #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
>>> +
>>> + /*
>>> + * Currently, backends other than vhost-user don't support 1024
>>> queue
>>> + * size.
>>> + */
>>> + if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
>>> + nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
>>> + n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
>>
>> Do we really want assume all vhost-user backend support 1024 queue size?
>>
>
> Do you know any vhost-user backend implementation that doesn't support
> 1024 tx queue size?
>
>
I don't but the issue is vhost-user uditis an open protocol, there could
be even closed source implementation on this. So we can't audit all
kinds of backends.
>
>>> + }
>>> +
>>> + for (i = 0; i < n->max_queues; i++) {
>>> + virtio_net_add_queue(n, i);
>>> + }
>>
>> Any reason to move virtio_net_add_queue() here?
>>
>
> Please check the whole init steps. It was moved here (after
> qemu_new_nic())
> to make sure nc->peer is not NULL.
I don't get here, can't you get peer just from nic_conf.peers?
Thanks
>
> Best,
> Wei
>
> ---------------------------------------------------------------------
> To unsubscribe, e-mail: virtio-dev-unsubscribe@lists.oasis-open.org
> For additional commands, e-mail: virtio-dev-help@lists.oasis-open.org
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [virtio-dev] Re: [PATCH v3 1/2] virtio-net: enable configurable tx queue size
2017-06-26 8:05 ` [Qemu-devel] [virtio-dev] " Jason Wang
@ 2017-06-26 10:34 ` Wei Wang
2017-06-26 21:21 ` Michael S. Tsirkin
0 siblings, 1 reply; 13+ messages in thread
From: Wei Wang @ 2017-06-26 10:34 UTC (permalink / raw)
To: Jason Wang, mst, eblake, virtio-dev, qemu-devel
Cc: stefanha, armbru, jan.scheurich, marcandre.lureau, pbonzini
On 06/26/2017 04:05 PM, Jason Wang wrote:
>
>
> On 2017年06月26日 12:55, Wei Wang wrote:
>> On 06/26/2017 11:18 AM, Jason Wang wrote:
>>>
>>> On 2017年06月23日 10:32, Wei Wang wrote:
>>>> This patch enables the virtio-net tx queue size to be configurable
>>>> between 256 (the default queue size) and 1024 by the user when the
>>>> vhost-user backend is used.
>>>>
>>>> Currently, the maximum tx queue size for other backends is 512 due
>>>> to the following limitations:
>>>> - QEMU backend: the QEMU backend implementation in some cases may
>>>> send 1024+1 iovs to writev.
>>>> - Vhost_net backend: there are possibilities that the guest sends
>>>> a vring_desc of memory which corsses a MemoryRegion thereby
>>>> generating more than 1024 iovs after translattion from guest-physical
>>>> address in the backend.
>>>>
>>>> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
>>>> ---
>>>> hw/net/virtio-net.c | 45
>>>> +++++++++++++++++++++++++++++++++---------
>>>> include/hw/virtio/virtio-net.h | 1 +
>>>> 2 files changed, 37 insertions(+), 9 deletions(-)
>>>>
>>>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>>>> index 91eddaf..d13ca60 100644
>>>> --- a/hw/net/virtio-net.c
>>>> +++ b/hw/net/virtio-net.c
>>>> @@ -34,8 +34,11 @@
>>>> /* previously fixed value */
>>>> #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
>>>> +
>>>> + /*
>>>> + * Currently, backends other than vhost-user don't support
>>>> 1024 queue
>>>> + * size.
>>>> + */
>>>> + if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
>>>> + nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
>>>> + n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
>>>
>>> Do we really want assume all vhost-user backend support 1024 queue
>>> size?
>>>
>>
>> Do you know any vhost-user backend implementation that doesn't support
>> 1024 tx queue size?
>>
>>
>
> I don't but the issue is vhost-user uditis an open protocol, there
> could be even closed source implementation on this. So we can't audit
> all kinds of backends.
>
This is the discussion left before. Here is my thought:
Until now, nobody is officially using 1024 tx queue size, including the
unknown
vhost-user backend implementation. So, applying this patch won't affect any
already deployed products.
If someday, people whose products are based on the unknown vhost-user
implementation change to try with 1024 tx queue size and apply this patch,
and find 1024 doesn't work for them, they can simply set the value to
512 (which
is better than the 256 size they were using) or apply the 2nd patch entitled
"VIRTIO_F_MAX_CHAIN_SIZE" to use 1024 queue size.
I didn't see any big issue here. Would you see any issues?
@Michael, what is your opinion?
>>
>>>> + }
>>>> +
>>>> + for (i = 0; i < n->max_queues; i++) {
>>>> + virtio_net_add_queue(n, i);
>>>> + }
>>>
>>> Any reason to move virtio_net_add_queue() here?
>>>
>>
>> Please check the whole init steps. It was moved here (after
>> qemu_new_nic())
>> to make sure nc->peer is not NULL.
>
> I don't get here, can't you get peer just from nic_conf.peers?
>
Correct, nic_conf.peers also works. Thanks.
Best,
Wei
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [virtio-dev] Re: [PATCH v3 1/2] virtio-net: enable configurable tx queue size
2017-06-26 10:34 ` Wei Wang
@ 2017-06-26 21:21 ` Michael S. Tsirkin
2017-06-27 1:06 ` Wei Wang
2017-06-27 2:19 ` Jason Wang
0 siblings, 2 replies; 13+ messages in thread
From: Michael S. Tsirkin @ 2017-06-26 21:21 UTC (permalink / raw)
To: Wei Wang
Cc: Jason Wang, eblake, virtio-dev, qemu-devel, stefanha, armbru,
jan.scheurich, marcandre.lureau, pbonzini
On Mon, Jun 26, 2017 at 06:34:25PM +0800, Wei Wang wrote:
> On 06/26/2017 04:05 PM, Jason Wang wrote:
> >
> >
> > On 2017年06月26日 12:55, Wei Wang wrote:
> > > On 06/26/2017 11:18 AM, Jason Wang wrote:
> > > >
> > > > On 2017年06月23日 10:32, Wei Wang wrote:
> > > > > This patch enables the virtio-net tx queue size to be configurable
> > > > > between 256 (the default queue size) and 1024 by the user when the
> > > > > vhost-user backend is used.
> > > > >
> > > > > Currently, the maximum tx queue size for other backends is 512 due
> > > > > to the following limitations:
> > > > > - QEMU backend: the QEMU backend implementation in some cases may
> > > > > send 1024+1 iovs to writev.
> > > > > - Vhost_net backend: there are possibilities that the guest sends
> > > > > a vring_desc of memory which corsses a MemoryRegion thereby
> > > > > generating more than 1024 iovs after translattion from guest-physical
> > > > > address in the backend.
> > > > >
> > > > > Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> > > > > ---
> > > > > hw/net/virtio-net.c | 45
> > > > > +++++++++++++++++++++++++++++++++---------
> > > > > include/hw/virtio/virtio-net.h | 1 +
> > > > > 2 files changed, 37 insertions(+), 9 deletions(-)
> > > > >
> > > > > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > > > > index 91eddaf..d13ca60 100644
> > > > > --- a/hw/net/virtio-net.c
> > > > > +++ b/hw/net/virtio-net.c
> > > > > @@ -34,8 +34,11 @@
> > > > > /* previously fixed value */
> > > > > #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
> > > > > +
> > > > > + /*
> > > > > + * Currently, backends other than vhost-user don't
> > > > > support 1024 queue
> > > > > + * size.
> > > > > + */
> > > > > + if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
> > > > > + nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
> > > > > + n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
> > > >
> > > > Do we really want assume all vhost-user backend support 1024
> > > > queue size?
> > > >
> > >
> > > Do you know any vhost-user backend implementation that doesn't support
> > > 1024 tx queue size?
> > >
> > >
> >
> > I don't but the issue is vhost-user uditis an open protocol, there could
> > be even closed source implementation on this. So we can't audit all
> > kinds of backends.
> >
>
> This is the discussion left before. Here is my thought:
>
> Until now, nobody is officially using 1024 tx queue size, including the
> unknown
> vhost-user backend implementation. So, applying this patch won't affect any
> already deployed products.
>
> If someday, people whose products are based on the unknown vhost-user
> implementation change to try with 1024 tx queue size and apply this patch,
> and find 1024 doesn't work for them, they can simply set the value to 512
> (which
> is better than the 256 size they were using) or apply the 2nd patch entitled
> "VIRTIO_F_MAX_CHAIN_SIZE" to use 1024 queue size.
>
> I didn't see any big issue here. Would you see any issues?
>
> @Michael, what is your opinion?
Since the default does not change, I think it's fine.
You need to both have a non-default value and be using
a backend that does not support the 1024 size.
Seems unlikely to me.
>
> > >
> > > > > + }
> > > > > +
> > > > > + for (i = 0; i < n->max_queues; i++) {
> > > > > + virtio_net_add_queue(n, i);
> > > > > + }
> > > >
> > > > Any reason to move virtio_net_add_queue() here?
> > > >
> > >
> > > Please check the whole init steps. It was moved here (after
> > > qemu_new_nic())
> > > to make sure nc->peer is not NULL.
> >
> > I don't get here, can't you get peer just from nic_conf.peers?
> >
>
> Correct, nic_conf.peers also works. Thanks.
>
> Best,
> Wei
>
>
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [virtio-dev] Re: [PATCH v3 1/2] virtio-net: enable configurable tx queue size
2017-06-26 21:21 ` Michael S. Tsirkin
@ 2017-06-27 1:06 ` Wei Wang
2017-06-27 1:51 ` Michael S. Tsirkin
2017-06-27 2:19 ` Jason Wang
1 sibling, 1 reply; 13+ messages in thread
From: Wei Wang @ 2017-06-27 1:06 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: Jason Wang, eblake, virtio-dev, qemu-devel, stefanha, armbru,
jan.scheurich, marcandre.lureau, pbonzini
On 06/27/2017 05:21 AM, Michael S. Tsirkin wrote:
> On Mon, Jun 26, 2017 at 06:34:25PM +0800, Wei Wang wrote:
>> On 06/26/2017 04:05 PM, Jason Wang wrote:
>>>
>>> On 2017年06月26日 12:55, Wei Wang wrote:
>>>> On 06/26/2017 11:18 AM, Jason Wang wrote:
>>>>> On 2017年06月23日 10:32, Wei Wang wrote:
>>>>>> This patch enables the virtio-net tx queue size to be configurable
>>>>>> between 256 (the default queue size) and 1024 by the user when the
>>>>>> vhost-user backend is used.
>>>>>>
>>>>>> Currently, the maximum tx queue size for other backends is 512 due
>>>>>> to the following limitations:
>>>>>> - QEMU backend: the QEMU backend implementation in some cases may
>>>>>> send 1024+1 iovs to writev.
>>>>>> - Vhost_net backend: there are possibilities that the guest sends
>>>>>> a vring_desc of memory which corsses a MemoryRegion thereby
>>>>>> generating more than 1024 iovs after translattion from guest-physical
>>>>>> address in the backend.
>>>>>>
>>>>>> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
>>>>>> ---
>>>>>> hw/net/virtio-net.c | 45
>>>>>> +++++++++++++++++++++++++++++++++---------
>>>>>> include/hw/virtio/virtio-net.h | 1 +
>>>>>> 2 files changed, 37 insertions(+), 9 deletions(-)
>>>>>>
>>>>>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>>>>>> index 91eddaf..d13ca60 100644
>>>>>> --- a/hw/net/virtio-net.c
>>>>>> +++ b/hw/net/virtio-net.c
>>>>>> @@ -34,8 +34,11 @@
>>>>>> /* previously fixed value */
>>>>>> #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
>>>>>> +
>>>>>> + /*
>>>>>> + * Currently, backends other than vhost-user don't
>>>>>> support 1024 queue
>>>>>> + * size.
>>>>>> + */
>>>>>> + if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
>>>>>> + nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
>>>>>> + n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
>>>>> Do we really want assume all vhost-user backend support 1024
>>>>> queue size?
>>>>>
>>>> Do you know any vhost-user backend implementation that doesn't support
>>>> 1024 tx queue size?
>>>>
>>>>
>>> I don't but the issue is vhost-user uditis an open protocol, there could
>>> be even closed source implementation on this. So we can't audit all
>>> kinds of backends.
>>>
>> This is the discussion left before. Here is my thought:
>>
>> Until now, nobody is officially using 1024 tx queue size, including the
>> unknown
>> vhost-user backend implementation. So, applying this patch won't affect any
>> already deployed products.
>>
>> If someday, people whose products are based on the unknown vhost-user
>> implementation change to try with 1024 tx queue size and apply this patch,
>> and find 1024 doesn't work for them, they can simply set the value to 512
>> (which
>> is better than the 256 size they were using) or apply the 2nd patch entitled
>> "VIRTIO_F_MAX_CHAIN_SIZE" to use 1024 queue size.
>>
>> I didn't see any big issue here. Would you see any issues?
>>
>> @Michael, what is your opinion?
> Since the default does not change, I think it's fine.
> You need to both have a non-default value and be using
> a backend that does not support the 1024 size.
> Seems unlikely to me.
>
Were you referring to the use of 512 queue size? I think that's already
supported
for all the backends.
The above code only makes a non-vhost-user backend, which doesn't
support 1024,
use the default value when the user specified 1024 queue size. It won't
make any
change if 512 was given.
Now, we treat all vhost-user backends as the ones that support 1024. If
a user got an
unknown issue due to their special vhost-user backend
implementation(maybe bugs
generated by their own) with 1024 queue size, they'll need to re-start
with 512, and
the code will work with 512.
Best,
Wei
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [virtio-dev] Re: [PATCH v3 1/2] virtio-net: enable configurable tx queue size
2017-06-27 1:06 ` Wei Wang
@ 2017-06-27 1:51 ` Michael S. Tsirkin
0 siblings, 0 replies; 13+ messages in thread
From: Michael S. Tsirkin @ 2017-06-27 1:51 UTC (permalink / raw)
To: Wei Wang
Cc: Jason Wang, eblake, virtio-dev, qemu-devel, stefanha, armbru,
jan.scheurich, marcandre.lureau, pbonzini
On Tue, Jun 27, 2017 at 09:06:07AM +0800, Wei Wang wrote:
> On 06/27/2017 05:21 AM, Michael S. Tsirkin wrote:
> > On Mon, Jun 26, 2017 at 06:34:25PM +0800, Wei Wang wrote:
> > > On 06/26/2017 04:05 PM, Jason Wang wrote:
> > > >
> > > > On 2017年06月26日 12:55, Wei Wang wrote:
> > > > > On 06/26/2017 11:18 AM, Jason Wang wrote:
> > > > > > On 2017年06月23日 10:32, Wei Wang wrote:
> > > > > > > This patch enables the virtio-net tx queue size to be configurable
> > > > > > > between 256 (the default queue size) and 1024 by the user when the
> > > > > > > vhost-user backend is used.
> > > > > > >
> > > > > > > Currently, the maximum tx queue size for other backends is 512 due
> > > > > > > to the following limitations:
> > > > > > > - QEMU backend: the QEMU backend implementation in some cases may
> > > > > > > send 1024+1 iovs to writev.
> > > > > > > - Vhost_net backend: there are possibilities that the guest sends
> > > > > > > a vring_desc of memory which corsses a MemoryRegion thereby
> > > > > > > generating more than 1024 iovs after translattion from guest-physical
> > > > > > > address in the backend.
> > > > > > >
> > > > > > > Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> > > > > > > ---
> > > > > > > hw/net/virtio-net.c | 45
> > > > > > > +++++++++++++++++++++++++++++++++---------
> > > > > > > include/hw/virtio/virtio-net.h | 1 +
> > > > > > > 2 files changed, 37 insertions(+), 9 deletions(-)
> > > > > > >
> > > > > > > diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
> > > > > > > index 91eddaf..d13ca60 100644
> > > > > > > --- a/hw/net/virtio-net.c
> > > > > > > +++ b/hw/net/virtio-net.c
> > > > > > > @@ -34,8 +34,11 @@
> > > > > > > /* previously fixed value */
> > > > > > > #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
> > > > > > > +
> > > > > > > + /*
> > > > > > > + * Currently, backends other than vhost-user don't
> > > > > > > support 1024 queue
> > > > > > > + * size.
> > > > > > > + */
> > > > > > > + if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
> > > > > > > + nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
> > > > > > > + n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
> > > > > > Do we really want assume all vhost-user backend support 1024
> > > > > > queue size?
> > > > > >
> > > > > Do you know any vhost-user backend implementation that doesn't support
> > > > > 1024 tx queue size?
> > > > >
> > > > >
> > > > I don't but the issue is vhost-user uditis an open protocol, there could
> > > > be even closed source implementation on this. So we can't audit all
> > > > kinds of backends.
> > > >
> > > This is the discussion left before. Here is my thought:
> > >
> > > Until now, nobody is officially using 1024 tx queue size, including the
> > > unknown
> > > vhost-user backend implementation. So, applying this patch won't affect any
> > > already deployed products.
> > >
> > > If someday, people whose products are based on the unknown vhost-user
> > > implementation change to try with 1024 tx queue size and apply this patch,
> > > and find 1024 doesn't work for them, they can simply set the value to 512
> > > (which
> > > is better than the 256 size they were using) or apply the 2nd patch entitled
> > > "VIRTIO_F_MAX_CHAIN_SIZE" to use 1024 queue size.
> > >
> > > I didn't see any big issue here. Would you see any issues?
> > >
> > > @Michael, what is your opinion?
> > Since the default does not change, I think it's fine.
> > You need to both have a non-default value and be using
> > a backend that does not support the 1024 size.
> > Seems unlikely to me.
> >
>
> Were you referring to the use of 512 queue size? I think that's already
> supported
> for all the backends.
>
> The above code only makes a non-vhost-user backend, which doesn't support
> 1024,
> use the default value when the user specified 1024 queue size. It won't make
> any
> change if 512 was given.
>
> Now, we treat all vhost-user backends as the ones that support 1024. If a
> user got an
> unknown issue due to their special vhost-user backend implementation(maybe
> bugs
> generated by their own) with 1024 queue size, they'll need to re-start with
> 512, and
> the code will work with 512.
>
> Best,
> Wei
I am just saying since default queue size is unchanged, your patches
are safe.
--
MST
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [PATCH v3 1/2] virtio-net: enable configurable tx queue size
2017-06-23 2:32 [Qemu-devel] [PATCH v3 1/2] virtio-net: enable configurable tx queue size Wei Wang
2017-06-23 2:32 ` [Qemu-devel] [PATCH v3 2/2] virtio-net: code cleanup Wei Wang
2017-06-26 3:18 ` [Qemu-devel] [PATCH v3 1/2] virtio-net: enable configurable tx queue size Jason Wang
@ 2017-06-27 1:51 ` Eric Blake
2017-06-27 5:24 ` Wang, Wei W
2 siblings, 1 reply; 13+ messages in thread
From: Eric Blake @ 2017-06-27 1:51 UTC (permalink / raw)
To: Wei Wang, mst, jasowang, virtio-dev, qemu-devel
Cc: armbru, pbonzini, jan.scheurich, marcandre.lureau, stefanha
[-- Attachment #1: Type: text/plain, Size: 1157 bytes --]
On 06/22/2017 09:32 PM, Wei Wang wrote:
> This patch enables the virtio-net tx queue size to be configurable
> between 256 (the default queue size) and 1024 by the user when the
> vhost-user backend is used.
When sending a multi-patch series, don't forget the 0/2 cover letter.
>
> Currently, the maximum tx queue size for other backends is 512 due
> to the following limitations:
> - QEMU backend: the QEMU backend implementation in some cases may
> send 1024+1 iovs to writev.
> - Vhost_net backend: there are possibilities that the guest sends
> a vring_desc of memory which corsses a MemoryRegion thereby
s/corsses/crosses/
> generating more than 1024 iovs after translattion from guest-physical
s/translattion/translation/
> address in the backend.
>
> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
> ---
> hw/net/virtio-net.c | 45 +++++++++++++++++++++++++++++++++---------
> include/hw/virtio/virtio-net.h | 1 +
> 2 files changed, 37 insertions(+), 9 deletions(-)
>
--
Eric Blake, Principal Software Engineer
Red Hat, Inc. +1-919-301-3266
Virtualization: qemu.org | libvirt.org
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 604 bytes --]
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [virtio-dev] Re: [PATCH v3 1/2] virtio-net: enable configurable tx queue size
2017-06-26 21:21 ` Michael S. Tsirkin
2017-06-27 1:06 ` Wei Wang
@ 2017-06-27 2:19 ` Jason Wang
2017-06-27 5:22 ` Wang, Wei W
1 sibling, 1 reply; 13+ messages in thread
From: Jason Wang @ 2017-06-27 2:19 UTC (permalink / raw)
To: Michael S. Tsirkin, Wei Wang
Cc: eblake, virtio-dev, qemu-devel, stefanha, armbru, jan.scheurich,
marcandre.lureau, pbonzini
On 2017年06月27日 05:21, Michael S. Tsirkin wrote:
> On Mon, Jun 26, 2017 at 06:34:25PM +0800, Wei Wang wrote:
>> On 06/26/2017 04:05 PM, Jason Wang wrote:
>>>
>>> On 2017年06月26日 12:55, Wei Wang wrote:
>>>> On 06/26/2017 11:18 AM, Jason Wang wrote:
>>>>> On 2017年06月23日 10:32, Wei Wang wrote:
>>>>>> This patch enables the virtio-net tx queue size to be configurable
>>>>>> between 256 (the default queue size) and 1024 by the user when the
>>>>>> vhost-user backend is used.
>>>>>>
>>>>>> Currently, the maximum tx queue size for other backends is 512 due
>>>>>> to the following limitations:
>>>>>> - QEMU backend: the QEMU backend implementation in some cases may
>>>>>> send 1024+1 iovs to writev.
>>>>>> - Vhost_net backend: there are possibilities that the guest sends
>>>>>> a vring_desc of memory which corsses a MemoryRegion thereby
>>>>>> generating more than 1024 iovs after translattion from guest-physical
>>>>>> address in the backend.
>>>>>>
>>>>>> Signed-off-by: Wei Wang <wei.w.wang@intel.com>
>>>>>> ---
>>>>>> hw/net/virtio-net.c | 45
>>>>>> +++++++++++++++++++++++++++++++++---------
>>>>>> include/hw/virtio/virtio-net.h | 1 +
>>>>>> 2 files changed, 37 insertions(+), 9 deletions(-)
>>>>>>
>>>>>> diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
>>>>>> index 91eddaf..d13ca60 100644
>>>>>> --- a/hw/net/virtio-net.c
>>>>>> +++ b/hw/net/virtio-net.c
>>>>>> @@ -34,8 +34,11 @@
>>>>>> /* previously fixed value */
>>>>>> #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
>>>>>> +
>>>>>> + /*
>>>>>> + * Currently, backends other than vhost-user don't
>>>>>> support 1024 queue
>>>>>> + * size.
>>>>>> + */
>>>>>> + if (n->net_conf.tx_queue_size == VIRTQUEUE_MAX_SIZE &&
>>>>>> + nc->peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
>>>>>> + n->net_conf.tx_queue_size = VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
>>>>> Do we really want assume all vhost-user backend support 1024
>>>>> queue size?
>>>>>
>>>> Do you know any vhost-user backend implementation that doesn't support
>>>> 1024 tx queue size?
>>>>
>>>>
>>> I don't but the issue is vhost-user uditis an open protocol, there could
>>> be even closed source implementation on this. So we can't audit all
>>> kinds of backends.
>>>
>> This is the discussion left before. Here is my thought:
>>
>> Until now, nobody is officially using 1024 tx queue size, including the
>> unknown
>> vhost-user backend implementation. So, applying this patch won't affect any
>> already deployed products.
>>
>> If someday, people whose products are based on the unknown vhost-user
>> implementation change to try with 1024 tx queue size and apply this patch,
>> and find 1024 doesn't work for them, they can simply set the value to 512
>> (which
>> is better than the 256 size they were using) or apply the 2nd patch entitled
>> "VIRTIO_F_MAX_CHAIN_SIZE" to use 1024 queue size.
>>
>> I didn't see any big issue here. Would you see any issues?
>>
>> @Michael, what is your opinion?
> Since the default does not change, I think it's fine.
> You need to both have a non-default value and be using
> a backend that does not support the 1024 size.
> Seems unlikely to me.
But this patch in fact allows 1024 to be used even for vhost-kernel
after migration from vhost-user?
Thanks
>
>
>>>>>> + }
>>>>>> +
>>>>>> + for (i = 0; i < n->max_queues; i++) {
>>>>>> + virtio_net_add_queue(n, i);
>>>>>> + }
>>>>> Any reason to move virtio_net_add_queue() here?
>>>>>
>>>> Please check the whole init steps. It was moved here (after
>>>> qemu_new_nic())
>>>> to make sure nc->peer is not NULL.
>>> I don't get here, can't you get peer just from nic_conf.peers?
>>>
>> Correct, nic_conf.peers also works. Thanks.
>>
>> Best,
>> Wei
>>
>>
>>
> ---------------------------------------------------------------------
> To unsubscribe, e-mail: virtio-dev-unsubscribe@lists.oasis-open.org
> For additional commands, e-mail: virtio-dev-help@lists.oasis-open.org
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [virtio-dev] Re: [PATCH v3 1/2] virtio-net: enable configurable tx queue size
2017-06-27 2:19 ` Jason Wang
@ 2017-06-27 5:22 ` Wang, Wei W
0 siblings, 0 replies; 13+ messages in thread
From: Wang, Wei W @ 2017-06-27 5:22 UTC (permalink / raw)
To: Jason Wang, Michael S. Tsirkin
Cc: eblake@redhat.com, virtio-dev@lists.oasis-open.org,
qemu-devel@nongnu.org, stefanha@gmail.com, armbru@redhat.com,
jan.scheurich@ericsson.com, marcandre.lureau@gmail.com,
pbonzini@redhat.com
On Tuesday, June 27, 2017 10:20 AM, Jason Wang wrote:
> On 2017年06月27日 05:21, Michael S. Tsirkin wrote:
> > On Mon, Jun 26, 2017 at 06:34:25PM +0800, Wei Wang wrote:
> >> On 06/26/2017 04:05 PM, Jason Wang wrote:
> >>>
> But this patch in fact allows 1024 to be used even for vhost-kernel after
> migration from vhost-user?
As discussed before, we only consider migration with the same configuration at this stage.
Best,
Wei
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Qemu-devel] [PATCH v3 1/2] virtio-net: enable configurable tx queue size
2017-06-27 1:51 ` [Qemu-devel] " Eric Blake
@ 2017-06-27 5:24 ` Wang, Wei W
0 siblings, 0 replies; 13+ messages in thread
From: Wang, Wei W @ 2017-06-27 5:24 UTC (permalink / raw)
To: Eric Blake, mst@redhat.com, jasowang@redhat.com,
virtio-dev@lists.oasis-open.org, qemu-devel@nongnu.org
Cc: armbru@redhat.com, pbonzini@redhat.com,
jan.scheurich@ericsson.com, marcandre.lureau@gmail.com,
stefanha@gmail.com
On Tuesday, June 27, 2017 9:51 AM, Eric Blake wrote:
> On 06/22/2017 09:32 PM, Wei Wang wrote:
> > This patch enables the virtio-net tx queue size to be configurable
> > between 256 (the default queue size) and 1024 by the user when the
> > vhost-user backend is used.
>
> When sending a multi-patch series, don't forget the 0/2 cover letter.
>
Thanks for the reminder, and I'll do it in the next version.
Best,
Wei
^ permalink raw reply [flat|nested] 13+ messages in thread
end of thread, other threads:[~2017-06-27 5:25 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-06-23 2:32 [Qemu-devel] [PATCH v3 1/2] virtio-net: enable configurable tx queue size Wei Wang
2017-06-23 2:32 ` [Qemu-devel] [PATCH v3 2/2] virtio-net: code cleanup Wei Wang
2017-06-26 3:18 ` [Qemu-devel] [PATCH v3 1/2] virtio-net: enable configurable tx queue size Jason Wang
2017-06-26 4:55 ` Wei Wang
2017-06-26 8:05 ` [Qemu-devel] [virtio-dev] " Jason Wang
2017-06-26 10:34 ` Wei Wang
2017-06-26 21:21 ` Michael S. Tsirkin
2017-06-27 1:06 ` Wei Wang
2017-06-27 1:51 ` Michael S. Tsirkin
2017-06-27 2:19 ` Jason Wang
2017-06-27 5:22 ` Wang, Wei W
2017-06-27 1:51 ` [Qemu-devel] " Eric Blake
2017-06-27 5:24 ` Wang, Wei W
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).