* [Qemu-devel] [PATCH] vhost-user: multiqueue support
@ 2014-12-06 16:52 Nikolay Nikolaev
2014-12-08 9:04 ` Olivier MATZ
` (2 more replies)
0 siblings, 3 replies; 6+ messages in thread
From: Nikolay Nikolaev @ 2014-12-06 16:52 UTC (permalink / raw)
To: thomas.long, snabb-devel, eblake, qemu-devel, mst; +Cc: tech, n.nikolaev
Vhost-user will implement the multiqueueu support in a similar way to what
vhost already has - a separate thread for each queue.
To enable multiquue funcionality - a new command line parameter
"queues" is introduced for the vhost-user netdev.
Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
---
docs/specs/vhost-user.txt | 7 +++++++
hw/virtio/vhost-user.c | 6 +++++-
net/vhost-user.c | 35 +++++++++++++++++++++++------------
qapi-schema.json | 5 ++++-
qemu-options.hx | 5 +++--
5 files changed, 42 insertions(+), 16 deletions(-)
diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
index 650bb18..d3857f5 100644
--- a/docs/specs/vhost-user.txt
+++ b/docs/specs/vhost-user.txt
@@ -127,6 +127,13 @@ in the ancillary data:
If Master is unable to send the full message or receives a wrong reply it will
close the connection. An optional reconnection mechanism can be implemented.
+Multi queue suport
+---------------------
+The protocol supports multiple queues by setting all index fields in the sent
+messages to a value calculated by the following formula:
+<queue idx> + <vring idx>
+The <queue idx> is increased by 2.
+
Message types
-------------
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index aefe0bb..83ebcaa 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -253,17 +253,20 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
case VHOST_SET_VRING_NUM:
case VHOST_SET_VRING_BASE:
memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
+ msg.state.index += dev->vq_index;
msg.size = sizeof(m.state);
break;
case VHOST_GET_VRING_BASE:
memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
+ msg.state.index += dev->vq_index;
msg.size = sizeof(m.state);
need_reply = 1;
break;
case VHOST_SET_VRING_ADDR:
memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
+ msg.addr.index += dev->vq_index;
msg.size = sizeof(m.addr);
break;
@@ -271,7 +274,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
case VHOST_SET_VRING_CALL:
case VHOST_SET_VRING_ERR:
file = arg;
- msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
+ msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK;
msg.size = sizeof(m.u64);
if (ioeventfd_enabled() && file->fd > 0) {
fds[fd_num++] = file->fd;
@@ -313,6 +316,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
error_report("Received bad msg size.\n");
return -1;
}
+ msg.state.index -= dev->vq_index;
memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
break;
default:
diff --git a/net/vhost-user.c b/net/vhost-user.c
index 24e050c..1ea2f98 100644
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -134,25 +134,27 @@ static void net_vhost_user_event(void *opaque, int event)
static int net_vhost_user_init(NetClientState *peer, const char *device,
const char *name, CharDriverState *chr,
- bool vhostforce)
+ bool vhostforce, uint32_t queues)
{
NetClientState *nc;
VhostUserState *s;
+ int i;
- nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
+ for (i = 0; i < queues; i++) {
+ nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
- snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
- chr->label);
+ snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
+ i, chr->label);
- s = DO_UPCAST(VhostUserState, nc, nc);
+ s = DO_UPCAST(VhostUserState, nc, nc);
- /* We don't provide a receive callback */
- s->nc.receive_disabled = 1;
- s->chr = chr;
- s->vhostforce = vhostforce;
-
- qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
+ /* We don't provide a receive callback */
+ s->nc.receive_disabled = 1;
+ s->chr = chr;
+ s->vhostforce = vhostforce;
+ qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
+ }
return 0;
}
@@ -228,6 +230,7 @@ static int net_vhost_check_net(QemuOpts *opts, void *opaque)
int net_init_vhost_user(const NetClientOptions *opts, const char *name,
NetClientState *peer)
{
+ uint32_t queues;
const NetdevVhostUserOptions *vhost_user_opts;
CharDriverState *chr;
bool vhostforce;
@@ -254,5 +257,13 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
vhostforce = false;
}
- return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce);
+ /* number of queues for multiqueue */
+ if (vhost_user_opts->has_queues) {
+ queues = vhost_user_opts->queues;
+ } else {
+ queues = 1;
+ }
+
+ return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce,
+ queues);
}
diff --git a/qapi-schema.json b/qapi-schema.json
index 9ffdcf8..aa3bb6f 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -2208,12 +2208,15 @@
#
# @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
#
+# @queues: #optional number of queues to be created for multiqueue vhost-user
+#
# Since 2.1
##
{ 'type': 'NetdevVhostUserOptions',
'data': {
'chardev': 'str',
- '*vhostforce': 'bool' } }
+ '*vhostforce': 'bool',
+ '*queues': 'uint32' } }
##
# @NetClientOptions
diff --git a/qemu-options.hx b/qemu-options.hx
index 64af16d..23f010f 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -1893,13 +1893,14 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
netdev. @code{-net} and @code{-device} with parameter @option{vlan} create the
required hub automatically.
-@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
+@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
be a unix domain socket backed one. The vhost-user uses a specifically defined
protocol to pass vhost ioctl replacement messages to an application on the other
end of the socket. On non-MSIX guests, the feature can be forced with
-@var{vhostforce}.
+@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to
+be created for multiqueue vhost-user.
Example:
@example
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [Qemu-devel] [PATCH] vhost-user: multiqueue support
2014-12-06 16:52 [Qemu-devel] [PATCH] vhost-user: multiqueue support Nikolay Nikolaev
@ 2014-12-08 9:04 ` Olivier MATZ
2014-12-08 9:41 ` Michael S. Tsirkin
2014-12-08 15:58 ` Eric Blake
2015-01-21 14:25 ` Michael S. Tsirkin
2 siblings, 1 reply; 6+ messages in thread
From: Olivier MATZ @ 2014-12-08 9:04 UTC (permalink / raw)
To: Nikolay Nikolaev, thomas.long, snabb-devel, eblake, qemu-devel,
mst; +Cc: tech
Hi Nikolay,
On 12/06/2014 05:52 PM, Nikolay Nikolaev wrote:
> Vhost-user will implement the multiqueueu support in a similar way to what
> vhost already has - a separate thread for each queue.
>
> To enable multiquue funcionality - a new command line parameter
> "queues" is introduced for the vhost-user netdev.
>
> Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
> ---
> docs/specs/vhost-user.txt | 7 +++++++
> hw/virtio/vhost-user.c | 6 +++++-
> net/vhost-user.c | 35 +++++++++++++++++++++++------------
> qapi-schema.json | 5 ++++-
> qemu-options.hx | 5 +++--
> 5 files changed, 42 insertions(+), 16 deletions(-)
> [...]
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -134,25 +134,27 @@ static void net_vhost_user_event(void *opaque, int event)
>
> static int net_vhost_user_init(NetClientState *peer, const char *device,
> const char *name, CharDriverState *chr,
> - bool vhostforce)
> + bool vhostforce, uint32_t queues)
> {
> NetClientState *nc;
> VhostUserState *s;
> + int i;
>
> - nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
> + for (i = 0; i < queues; i++) {
> + nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
>
> - snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
> - chr->label);
> + snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
> + i, chr->label);
>
Now that there several vhost-user are pointing to the same unix socket,
it could make sense to display "nc->info_str" instead of "s->chr->label"
in net_vhost_user_event(). Something like that:
--- a/net/vhost-user.c
+++ b/net/vhost-user.c
@@ -122,36 +122,39 @@ static void net_vhost_user_event(void *opaque, int
event)
case CHR_EVENT_OPENED:
vhost_user_start(s);
net_vhost_link_down(s, false);
- error_report("chardev \"%s\" went up\n", s->chr->label);
+ error_report("chardev \"%s\" went up\n", s->nc.info_str);
break;
case CHR_EVENT_CLOSED:
net_vhost_link_down(s, true);
vhost_user_stop(s);
- error_report("chardev \"%s\" went down\n", s->chr->label);
+ error_report("chardev \"%s\" went down\n", s->nc.info_str);
break;
}
}
Also, another comment: if I understand well, the messages like
VHOST_USER_SET_OWNER, VHOST_USER_SET_FEATURES, VHOST_SET_MEM_TABLE,
(...) will be send once per queue pair and not once per device.
I don't think it's a problem, but maybe it deserves a small comment
in the protocol documentation.
Apart from these 2 small comments, the approach looks correct, so
Acked-by: Olivier Matz <olivier.matz@6wind.com>
Regards,
Olivier
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [Qemu-devel] [PATCH] vhost-user: multiqueue support
2014-12-08 9:04 ` Olivier MATZ
@ 2014-12-08 9:41 ` Michael S. Tsirkin
0 siblings, 0 replies; 6+ messages in thread
From: Michael S. Tsirkin @ 2014-12-08 9:41 UTC (permalink / raw)
To: Olivier MATZ; +Cc: snabb-devel, qemu-devel, Nikolay Nikolaev, thomas.long, tech
On Mon, Dec 08, 2014 at 10:04:37AM +0100, Olivier MATZ wrote:
> Hi Nikolay,
>
> On 12/06/2014 05:52 PM, Nikolay Nikolaev wrote:
> > Vhost-user will implement the multiqueueu support in a similar way to what
> > vhost already has - a separate thread for each queue.
> >
> > To enable multiquue funcionality - a new command line parameter
> > "queues" is introduced for the vhost-user netdev.
> >
> > Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
> > ---
> > docs/specs/vhost-user.txt | 7 +++++++
> > hw/virtio/vhost-user.c | 6 +++++-
> > net/vhost-user.c | 35 +++++++++++++++++++++++------------
> > qapi-schema.json | 5 ++++-
> > qemu-options.hx | 5 +++--
> > 5 files changed, 42 insertions(+), 16 deletions(-)
> > [...]
> > --- a/net/vhost-user.c
> > +++ b/net/vhost-user.c
> > @@ -134,25 +134,27 @@ static void net_vhost_user_event(void *opaque, int event)
> >
> > static int net_vhost_user_init(NetClientState *peer, const char *device,
> > const char *name, CharDriverState *chr,
> > - bool vhostforce)
> > + bool vhostforce, uint32_t queues)
> > {
> > NetClientState *nc;
> > VhostUserState *s;
> > + int i;
> >
> > - nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
> > + for (i = 0; i < queues; i++) {
> > + nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
> >
> > - snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
> > - chr->label);
> > + snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
> > + i, chr->label);
> >
>
> Now that there several vhost-user are pointing to the same unix socket,
> it could make sense to display "nc->info_str" instead of "s->chr->label"
> in net_vhost_user_event(). Something like that:
>
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -122,36 +122,39 @@ static void net_vhost_user_event(void *opaque, int
> event)
> case CHR_EVENT_OPENED:
> vhost_user_start(s);
> net_vhost_link_down(s, false);
> - error_report("chardev \"%s\" went up\n", s->chr->label);
> + error_report("chardev \"%s\" went up\n", s->nc.info_str);
> break;
> case CHR_EVENT_CLOSED:
> net_vhost_link_down(s, true);
> vhost_user_stop(s);
> - error_report("chardev \"%s\" went down\n", s->chr->label);
> + error_report("chardev \"%s\" went down\n", s->nc.info_str);
> break;
> }
> }
>
>
> Also, another comment: if I understand well, the messages like
> VHOST_USER_SET_OWNER, VHOST_USER_SET_FEATURES, VHOST_SET_MEM_TABLE,
> (...) will be send once per queue pair and not once per device.
One wonders why that's necessary.
> I don't think it's a problem, but maybe it deserves a small comment
> in the protocol documentation.
>
>
> Apart from these 2 small comments, the approach looks correct, so
> Acked-by: Olivier Matz <olivier.matz@6wind.com>
>
>
> Regards,
> Olivier
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [Qemu-devel] [PATCH] vhost-user: multiqueue support
2014-12-06 16:52 [Qemu-devel] [PATCH] vhost-user: multiqueue support Nikolay Nikolaev
2014-12-08 9:04 ` Olivier MATZ
@ 2014-12-08 15:58 ` Eric Blake
2015-01-21 14:25 ` Michael S. Tsirkin
2 siblings, 0 replies; 6+ messages in thread
From: Eric Blake @ 2014-12-08 15:58 UTC (permalink / raw)
To: Nikolay Nikolaev, thomas.long, snabb-devel, qemu-devel, mst; +Cc: tech
[-- Attachment #1: Type: text/plain, Size: 789 bytes --]
On 12/06/2014 09:52 AM, Nikolay Nikolaev wrote:
> Vhost-user will implement the multiqueueu support in a similar way to what
> vhost already has - a separate thread for each queue.
>
> To enable multiquue funcionality - a new command line parameter
> "queues" is introduced for the vhost-user netdev.
>
> Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
> ---
> @@ -2208,12 +2208,15 @@
> #
> # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
> #
> +# @queues: #optional number of queues to be created for multiqueue vhost-user
Missing a '(since 2.3)' designation. What is the default when it is
omitted?
--
Eric Blake eblake redhat com +1-919-301-3266
Libvirt virtualization library http://libvirt.org
[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 539 bytes --]
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [Qemu-devel] [PATCH] vhost-user: multiqueue support
2014-12-06 16:52 [Qemu-devel] [PATCH] vhost-user: multiqueue support Nikolay Nikolaev
2014-12-08 9:04 ` Olivier MATZ
2014-12-08 15:58 ` Eric Blake
@ 2015-01-21 14:25 ` Michael S. Tsirkin
2015-01-22 8:14 ` [Qemu-devel] [snabb-devel] " Nikolay Nikolaev
2 siblings, 1 reply; 6+ messages in thread
From: Michael S. Tsirkin @ 2015-01-21 14:25 UTC (permalink / raw)
To: Nikolay Nikolaev; +Cc: snabb-devel, thomas.long, qemu-devel, tech
On Sat, Dec 06, 2014 at 06:52:56PM +0200, Nikolay Nikolaev wrote:
> Vhost-user will implement the multiqueueu support in a similar way to what
> vhost already has - a separate thread for each queue.
>
> To enable multiquue funcionality - a new command line parameter
> "queues" is introduced for the vhost-user netdev.
>
> Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
Nikolay - plan to repost addressing comments?
> ---
> docs/specs/vhost-user.txt | 7 +++++++
> hw/virtio/vhost-user.c | 6 +++++-
> net/vhost-user.c | 35 +++++++++++++++++++++++------------
> qapi-schema.json | 5 ++++-
> qemu-options.hx | 5 +++--
> 5 files changed, 42 insertions(+), 16 deletions(-)
>
> diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
> index 650bb18..d3857f5 100644
> --- a/docs/specs/vhost-user.txt
> +++ b/docs/specs/vhost-user.txt
> @@ -127,6 +127,13 @@ in the ancillary data:
> If Master is unable to send the full message or receives a wrong reply it will
> close the connection. An optional reconnection mechanism can be implemented.
>
> +Multi queue suport
> +---------------------
> +The protocol supports multiple queues by setting all index fields in the sent
> +messages to a value calculated by the following formula:
> +<queue idx> + <vring idx>
> +The <queue idx> is increased by 2.
> +
> Message types
> -------------
>
How is the support negotiated though?
What if I set queues=N with a legacy backend that
does not support multiqueue?
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index aefe0bb..83ebcaa 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -253,17 +253,20 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
> case VHOST_SET_VRING_NUM:
> case VHOST_SET_VRING_BASE:
> memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
> + msg.state.index += dev->vq_index;
> msg.size = sizeof(m.state);
> break;
>
> case VHOST_GET_VRING_BASE:
> memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
> + msg.state.index += dev->vq_index;
> msg.size = sizeof(m.state);
> need_reply = 1;
> break;
>
> case VHOST_SET_VRING_ADDR:
> memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
> + msg.addr.index += dev->vq_index;
> msg.size = sizeof(m.addr);
> break;
>
> @@ -271,7 +274,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
> case VHOST_SET_VRING_CALL:
> case VHOST_SET_VRING_ERR:
> file = arg;
> - msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
> + msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK;
> msg.size = sizeof(m.u64);
> if (ioeventfd_enabled() && file->fd > 0) {
> fds[fd_num++] = file->fd;
> @@ -313,6 +316,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
> error_report("Received bad msg size.\n");
> return -1;
> }
> + msg.state.index -= dev->vq_index;
> memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
> break;
> default:
> diff --git a/net/vhost-user.c b/net/vhost-user.c
> index 24e050c..1ea2f98 100644
> --- a/net/vhost-user.c
> +++ b/net/vhost-user.c
> @@ -134,25 +134,27 @@ static void net_vhost_user_event(void *opaque, int event)
>
> static int net_vhost_user_init(NetClientState *peer, const char *device,
> const char *name, CharDriverState *chr,
> - bool vhostforce)
> + bool vhostforce, uint32_t queues)
> {
> NetClientState *nc;
> VhostUserState *s;
> + int i;
>
> - nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
> + for (i = 0; i < queues; i++) {
> + nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
>
> - snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
> - chr->label);
> + snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
> + i, chr->label);
>
> - s = DO_UPCAST(VhostUserState, nc, nc);
> + s = DO_UPCAST(VhostUserState, nc, nc);
>
> - /* We don't provide a receive callback */
> - s->nc.receive_disabled = 1;
> - s->chr = chr;
> - s->vhostforce = vhostforce;
> -
> - qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
> + /* We don't provide a receive callback */
> + s->nc.receive_disabled = 1;
> + s->chr = chr;
> + s->vhostforce = vhostforce;
>
> + qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
> + }
> return 0;
> }
>
> @@ -228,6 +230,7 @@ static int net_vhost_check_net(QemuOpts *opts, void *opaque)
> int net_init_vhost_user(const NetClientOptions *opts, const char *name,
> NetClientState *peer)
> {
> + uint32_t queues;
> const NetdevVhostUserOptions *vhost_user_opts;
> CharDriverState *chr;
> bool vhostforce;
> @@ -254,5 +257,13 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
> vhostforce = false;
> }
>
> - return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce);
> + /* number of queues for multiqueue */
> + if (vhost_user_opts->has_queues) {
> + queues = vhost_user_opts->queues;
> + } else {
> + queues = 1;
> + }
> +
> + return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce,
> + queues);
> }
> diff --git a/qapi-schema.json b/qapi-schema.json
> index 9ffdcf8..aa3bb6f 100644
> --- a/qapi-schema.json
> +++ b/qapi-schema.json
> @@ -2208,12 +2208,15 @@
> #
> # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
> #
> +# @queues: #optional number of queues to be created for multiqueue vhost-user
> +#
> # Since 2.1
> ##
> { 'type': 'NetdevVhostUserOptions',
> 'data': {
> 'chardev': 'str',
> - '*vhostforce': 'bool' } }
> + '*vhostforce': 'bool',
> + '*queues': 'uint32' } }
>
> ##
> # @NetClientOptions
> diff --git a/qemu-options.hx b/qemu-options.hx
> index 64af16d..23f010f 100644
> --- a/qemu-options.hx
> +++ b/qemu-options.hx
> @@ -1893,13 +1893,14 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
> netdev. @code{-net} and @code{-device} with parameter @option{vlan} create the
> required hub automatically.
>
> -@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
> +@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
>
> Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
> be a unix domain socket backed one. The vhost-user uses a specifically defined
> protocol to pass vhost ioctl replacement messages to an application on the other
> end of the socket. On non-MSIX guests, the feature can be forced with
> -@var{vhostforce}.
> +@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to
> +be created for multiqueue vhost-user.
>
> Example:
> @example
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [Qemu-devel] [snabb-devel] Re: [PATCH] vhost-user: multiqueue support
2015-01-21 14:25 ` Michael S. Tsirkin
@ 2015-01-22 8:14 ` Nikolay Nikolaev
0 siblings, 0 replies; 6+ messages in thread
From: Nikolay Nikolaev @ 2015-01-22 8:14 UTC (permalink / raw)
To: snabb-devel@googlegroups.com
Cc: VirtualOpenSystems Technical Team, Long, Thomas, qemu-devel
On Wed, Jan 21, 2015 at 4:25 PM, Michael S. Tsirkin <mst@redhat.com> wrote:
>
> On Sat, Dec 06, 2014 at 06:52:56PM +0200, Nikolay Nikolaev wrote:
> > Vhost-user will implement the multiqueueu support in a similar way to what
> > vhost already has - a separate thread for each queue.
> >
> > To enable multiquue funcionality - a new command line parameter
> > "queues" is introduced for the vhost-user netdev.
> >
> > Signed-off-by: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com>
>
> Nikolay - plan to repost addressing comments?
I can send v2 with the small polishing fixes.
However we currently don't have he resources to fix sending
VHOST_USER_SET_OWNER, VHOST_USER_SET_FEATURES, VHOST_SET_MEM_TABLE
messages per virtq. It work like this as it follows the tap backend
behavior, which AFAIK assigns a different tap device per virtq and
initializes them separately. At least I don't see an obvious and quick
patch around this.
>
>
> > ---
> > docs/specs/vhost-user.txt | 7 +++++++
> > hw/virtio/vhost-user.c | 6 +++++-
> > net/vhost-user.c | 35 +++++++++++++++++++++++------------
> > qapi-schema.json | 5 ++++-
> > qemu-options.hx | 5 +++--
> > 5 files changed, 42 insertions(+), 16 deletions(-)
> >
> > diff --git a/docs/specs/vhost-user.txt b/docs/specs/vhost-user.txt
> > index 650bb18..d3857f5 100644
> > --- a/docs/specs/vhost-user.txt
> > +++ b/docs/specs/vhost-user.txt
> > @@ -127,6 +127,13 @@ in the ancillary data:
> > If Master is unable to send the full message or receives a wrong reply it will
> > close the connection. An optional reconnection mechanism can be implemented.
> >
> > +Multi queue suport
> > +---------------------
> > +The protocol supports multiple queues by setting all index fields in the sent
> > +messages to a value calculated by the following formula:
> > +<queue idx> + <vring idx>
> > +The <queue idx> is increased by 2.
> > +
> > Message types
> > -------------
> >
>
> How is the support negotiated though?
Not negotiated. Will a version bump be enough ?
> What if I set queues=N with a legacy backend that
> does not support multiqueue?
It is supposed that the device that does not support MQ won't
advertise VIRTIO_NET_F_MQ, thus not utilising more than 1 queue. Still
the initialisation of the multiple virtqs will happen.
regards,
Nikolay Nikolaev
>
> > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> > index aefe0bb..83ebcaa 100644
> > --- a/hw/virtio/vhost-user.c
> > +++ b/hw/virtio/vhost-user.c
> > @@ -253,17 +253,20 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
> > case VHOST_SET_VRING_NUM:
> > case VHOST_SET_VRING_BASE:
> > memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
> > + msg.state.index += dev->vq_index;
> > msg.size = sizeof(m.state);
> > break;
> >
> > case VHOST_GET_VRING_BASE:
> > memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
> > + msg.state.index += dev->vq_index;
> > msg.size = sizeof(m.state);
> > need_reply = 1;
> > break;
> >
> > case VHOST_SET_VRING_ADDR:
> > memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
> > + msg.addr.index += dev->vq_index;
> > msg.size = sizeof(m.addr);
> > break;
> >
> > @@ -271,7 +274,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
> > case VHOST_SET_VRING_CALL:
> > case VHOST_SET_VRING_ERR:
> > file = arg;
> > - msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
> > + msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK;
> > msg.size = sizeof(m.u64);
> > if (ioeventfd_enabled() && file->fd > 0) {
> > fds[fd_num++] = file->fd;
> > @@ -313,6 +316,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
> > error_report("Received bad msg size.\n");
> > return -1;
> > }
> > + msg.state.index -= dev->vq_index;
> > memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
> > break;
> > default:
> > diff --git a/net/vhost-user.c b/net/vhost-user.c
> > index 24e050c..1ea2f98 100644
> > --- a/net/vhost-user.c
> > +++ b/net/vhost-user.c
> > @@ -134,25 +134,27 @@ static void net_vhost_user_event(void *opaque, int event)
> >
> > static int net_vhost_user_init(NetClientState *peer, const char *device,
> > const char *name, CharDriverState *chr,
> > - bool vhostforce)
> > + bool vhostforce, uint32_t queues)
> > {
> > NetClientState *nc;
> > VhostUserState *s;
> > + int i;
> >
> > - nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
> > + for (i = 0; i < queues; i++) {
> > + nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
> >
> > - snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
> > - chr->label);
> > + snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
> > + i, chr->label);
> >
> > - s = DO_UPCAST(VhostUserState, nc, nc);
> > + s = DO_UPCAST(VhostUserState, nc, nc);
> >
> > - /* We don't provide a receive callback */
> > - s->nc.receive_disabled = 1;
> > - s->chr = chr;
> > - s->vhostforce = vhostforce;
> > -
> > - qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
> > + /* We don't provide a receive callback */
> > + s->nc.receive_disabled = 1;
> > + s->chr = chr;
> > + s->vhostforce = vhostforce;
> >
> > + qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
> > + }
> > return 0;
> > }
> >
> > @@ -228,6 +230,7 @@ static int net_vhost_check_net(QemuOpts *opts, void *opaque)
> > int net_init_vhost_user(const NetClientOptions *opts, const char *name,
> > NetClientState *peer)
> > {
> > + uint32_t queues;
> > const NetdevVhostUserOptions *vhost_user_opts;
> > CharDriverState *chr;
> > bool vhostforce;
> > @@ -254,5 +257,13 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
> > vhostforce = false;
> > }
> >
> > - return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce);
> > + /* number of queues for multiqueue */
> > + if (vhost_user_opts->has_queues) {
> > + queues = vhost_user_opts->queues;
> > + } else {
> > + queues = 1;
> > + }
> > +
> > + return net_vhost_user_init(peer, "vhost_user", name, chr, vhostforce,
> > + queues);
> > }
> > diff --git a/qapi-schema.json b/qapi-schema.json
> > index 9ffdcf8..aa3bb6f 100644
> > --- a/qapi-schema.json
> > +++ b/qapi-schema.json
> > @@ -2208,12 +2208,15 @@
> > #
> > # @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
> > #
> > +# @queues: #optional number of queues to be created for multiqueue vhost-user
> > +#
> > # Since 2.1
> > ##
> > { 'type': 'NetdevVhostUserOptions',
> > 'data': {
> > 'chardev': 'str',
> > - '*vhostforce': 'bool' } }
> > + '*vhostforce': 'bool',
> > + '*queues': 'uint32' } }
> >
> > ##
> > # @NetClientOptions
> > diff --git a/qemu-options.hx b/qemu-options.hx
> > index 64af16d..23f010f 100644
> > --- a/qemu-options.hx
> > +++ b/qemu-options.hx
> > @@ -1893,13 +1893,14 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
> > netdev. @code{-net} and @code{-device} with parameter @option{vlan} create the
> > required hub automatically.
> >
> > -@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
> > +@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
> >
> > Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
> > be a unix domain socket backed one. The vhost-user uses a specifically defined
> > protocol to pass vhost ioctl replacement messages to an application on the other
> > end of the socket. On non-MSIX guests, the feature can be forced with
> > -@var{vhostforce}.
> > +@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to
> > +be created for multiqueue vhost-user.
> >
> > Example:
> > @example
> >
>
> --
> You received this message because you are subscribed to the Google Groups "Snabb Switch development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to snabb-devel+unsubscribe@googlegroups.com.
> To post to this group, send an email to snabb-devel@googlegroups.com.
> Visit this group at http://groups.google.com/group/snabb-devel.
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2015-01-22 8:15 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2014-12-06 16:52 [Qemu-devel] [PATCH] vhost-user: multiqueue support Nikolay Nikolaev
2014-12-08 9:04 ` Olivier MATZ
2014-12-08 9:41 ` Michael S. Tsirkin
2014-12-08 15:58 ` Eric Blake
2015-01-21 14:25 ` Michael S. Tsirkin
2015-01-22 8:14 ` [Qemu-devel] [snabb-devel] " Nikolay Nikolaev
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).