* [Qemu-devel] [PATCH v3 1/1] vhost-user interrupt management fixes
@ 2016-02-17 9:35 Victor Kaplansky
2016-02-17 10:04 ` Michael S. Tsirkin
0 siblings, 1 reply; 2+ messages in thread
From: Victor Kaplansky @ 2016-02-17 9:35 UTC (permalink / raw)
To: qemu-devel
Cc: thibaut.collet, Jason Wang, jmg, Didier Pallard,
Michael S. Tsirkin
From: Didier Pallard <didier.pallard@6wind.com>
Since guest_mask_notifier can not be used in vhost-user mode due
to buffering implied by unix control socket, force
use_mask_notifier on virtio devices of vhost-user interfaces, and
send correct callfd to the guest at vhost start.
Using guest_notifier_mask function in vhost-user case may
break interrupt mask paradigm, because mask/unmask is not
really done when returning from guest_notifier_mask call, instead
message is posted in a unix socket, and processed later.
Add an option boolean flag 'use_mask_notifier' to disable the use
of guest_notifier_mask in virtio pci.
v3 changes:
In respond to Michael S. Tsirkin comments:
- vhost_net.c: removed dependency on virtio-pci.h
- vhost.c: simplified the check for vhost-user backend,
replaced by checking use_mask_notifier; added comment
explaining why vring for vhost-user initialized in
unmasked state;
- cosmetic fixes.
v2 changes:
- a new boolean field is added to all virtio devices instead
of defining a property in some virtio-pci devices.
Signed-off-by: Victor Kaplansky <victork@redhat.com>
---
include/hw/virtio/virtio.h | 1 +
hw/net/vhost_net.c | 16 ++++++++++++++--
hw/virtio/vhost.c | 13 +++++++++++++
hw/virtio/virtio-pci.c | 14 ++++++++------
hw/virtio/virtio.c | 1 +
5 files changed, 37 insertions(+), 8 deletions(-)
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 108cdb0f..3acbf999 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -90,6 +90,7 @@ struct VirtIODevice
VMChangeStateEntry *vmstate;
char *bus_name;
uint8_t device_endian;
+ bool use_mask_notifier;
QLIST_HEAD(, VirtQueue) *vector_queues;
};
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 3940a04b..8c1ccadb 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -306,13 +306,25 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
}
for (j = 0; j < total_queues; j++) {
+ struct vhost_net *net;
+
r = vhost_net_set_vnet_endian(dev, ncs[j].peer, true);
if (r < 0) {
goto err_endian;
}
- vhost_net_set_vq_index(get_vhost_net(ncs[j].peer), j * 2);
- }
+ net = get_vhost_net(ncs[j].peer);
+ vhost_net_set_vq_index(net, j * 2);
+
+ /* Force use_mask_notifier reset in vhost user case
+ * Must be done before set_guest_notifier call
+ */
+ if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
+ /* Force virtual device not use mask notifier */
+ dev->use_mask_notifier = false;
+ }
+ }
+
r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
if (r < 0) {
error_report("Error binding guest notifier: %d", -r);
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 7dff7554..f8031fae 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -855,8 +855,21 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
/* Clear and discard previous events if any. */
event_notifier_test_and_clear(&vq->masked_notifier);
+ /* For vhost user we set vring in unmasked state, since by the
+ * default it masked, and guest_notifier_mask is not used anymore.
+ */
+ if (vdev->use_mask_notifier == false) {
+ file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
+ r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
+ if (r) {
+ r = -errno;
+ goto fail_call;
+ }
+ }
+
return 0;
+fail_call:
fail_kick:
fail_alloc:
cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 5494ff4a..64d263ae 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -806,7 +806,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
/* If guest supports masking, set up irqfd now.
* Otherwise, delay until unmasked in the frontend.
*/
- if (k->guest_notifier_mask) {
+ if (vdev->use_mask_notifier && k->guest_notifier_mask) {
ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
if (ret < 0) {
kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -822,7 +822,7 @@ undo:
if (vector >= msix_nr_vectors_allocated(dev)) {
continue;
}
- if (k->guest_notifier_mask) {
+ if (vdev->use_mask_notifier && k->guest_notifier_mask) {
kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
}
kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -849,7 +849,7 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
/* If guest supports masking, clean up irqfd now.
* Otherwise, it was cleaned when masked in the frontend.
*/
- if (k->guest_notifier_mask) {
+ if (vdev->use_mask_notifier && k->guest_notifier_mask) {
kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
}
kvm_virtio_pci_vq_vector_release(proxy, vector);
@@ -882,7 +882,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
/* If guest supports masking, irqfd is already setup, unmask it.
* Otherwise, set it up now.
*/
- if (k->guest_notifier_mask) {
+ if (vdev->use_mask_notifier && k->guest_notifier_mask) {
k->guest_notifier_mask(vdev, queue_no, false);
/* Test after unmasking to avoid losing events. */
if (k->guest_notifier_pending &&
@@ -905,7 +905,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
/* If guest supports masking, keep irqfd but mask it.
* Otherwise, clean it up now.
*/
- if (k->guest_notifier_mask) {
+ if (vdev->use_mask_notifier && k->guest_notifier_mask) {
k->guest_notifier_mask(vdev, queue_no, true);
} else {
kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
@@ -1022,7 +1022,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
event_notifier_cleanup(notifier);
}
- if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
+ if (!msix_enabled(&proxy->pci_dev) &&
+ vdev->use_mask_notifier &&
+ vdc->guest_notifier_mask) {
vdc->guest_notifier_mask(vdev, n, !assign);
}
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 90f25451..c0238b39 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -792,6 +792,7 @@ void virtio_reset(void *opaque)
vdev->queue_sel = 0;
vdev->status = 0;
vdev->isr = 0;
+ vdev->use_mask_notifier = true;
vdev->config_vector = VIRTIO_NO_VECTOR;
virtio_notify_vector(vdev, vdev->config_vector);
--
Victor
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [Qemu-devel] [PATCH v3 1/1] vhost-user interrupt management fixes
2016-02-17 9:35 [Qemu-devel] [PATCH v3 1/1] vhost-user interrupt management fixes Victor Kaplansky
@ 2016-02-17 10:04 ` Michael S. Tsirkin
0 siblings, 0 replies; 2+ messages in thread
From: Michael S. Tsirkin @ 2016-02-17 10:04 UTC (permalink / raw)
To: Victor Kaplansky
Cc: thibaut.collet, Jason Wang, jmg, qemu-devel, Didier Pallard
Looks good overall, some comments:
On Wed, Feb 17, 2016 at 11:35:48AM +0200, Victor Kaplansky wrote:
> From: Didier Pallard <didier.pallard@6wind.com>
>
> Since guest_mask_notifier can not be used in vhost-user mode due
> to buffering implied by unix control socket, force
> use_mask_notifier on virtio devices of vhost-user interfaces, and
> send correct callfd to the guest at vhost start.
>
> Using guest_notifier_mask function in vhost-user case may
> break interrupt mask paradigm, because mask/unmask is not
> really done when returning from guest_notifier_mask call, instead
> message is posted in a unix socket, and processed later.
>
> Add an option boolean flag 'use_mask_notifier' to disable the use
> of guest_notifier_mask in virtio pci.
changelog (below) should go after --- so that git am
ignores it. We don't want a permament record of the changelog.
> v3 changes:
> In respond to Michael S. Tsirkin comments:
> - vhost_net.c: removed dependency on virtio-pci.h
> - vhost.c: simplified the check for vhost-user backend,
> replaced by checking use_mask_notifier; added comment
> explaining why vring for vhost-user initialized in
> unmasked state;
> - cosmetic fixes.
>
> v2 changes:
> - a new boolean field is added to all virtio devices instead
> of defining a property in some virtio-pci devices.
>
>
Avoid double empty lines pls.
> Signed-off-by: Victor Kaplansky <victork@redhat.com>
> ---
> include/hw/virtio/virtio.h | 1 +
> hw/net/vhost_net.c | 16 ++++++++++++++--
> hw/virtio/vhost.c | 13 +++++++++++++
> hw/virtio/virtio-pci.c | 14 ++++++++------
> hw/virtio/virtio.c | 1 +
> 5 files changed, 37 insertions(+), 8 deletions(-)
>
> diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
> index 108cdb0f..3acbf999 100644
> --- a/include/hw/virtio/virtio.h
> +++ b/include/hw/virtio/virtio.h
> @@ -90,6 +90,7 @@ struct VirtIODevice
> VMChangeStateEntry *vmstate;
> char *bus_name;
> uint8_t device_endian;
> + bool use_mask_notifier;
Hmm it isn't a mask notifier we use.
This flag tells us whether device can, and we should, mask guest notifiers.
So enable_guest_notifier_mask ?
vhost_virtqueue_mask should also probably assert that this is set
if mask == true.
> QLIST_HEAD(, VirtQueue) *vector_queues;
> };
>
> diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
> index 3940a04b..8c1ccadb 100644
> --- a/hw/net/vhost_net.c
> +++ b/hw/net/vhost_net.c
> @@ -306,13 +306,25 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs,
> }
>
> for (j = 0; j < total_queues; j++) {
> + struct vhost_net *net;
> +
> r = vhost_net_set_vnet_endian(dev, ncs[j].peer, true);
> if (r < 0) {
> goto err_endian;
> }
> - vhost_net_set_vq_index(get_vhost_net(ncs[j].peer), j * 2);
> - }
>
> + net = get_vhost_net(ncs[j].peer);
> + vhost_net_set_vq_index(net, j * 2);
> +
> + /* Force use_mask_notifier reset in vhost user case
> + * Must be done before set_guest_notifier call
> + */
> + if (net->nc->info->type == NET_CLIENT_OPTIONS_KIND_VHOST_USER) {
> + /* Force virtual device not use mask notifier */
A better comment would explain
- why do we suppress mask notifier for vhost user only
- what happens otherwise (where is it set to true)
> + dev->use_mask_notifier = false;
> + }
Can't the logic be moved to vhost-user.c? That would be cleaner.
> + }
> +
> r = k->set_guest_notifiers(qbus->parent, total_queues * 2, true);
> if (r < 0) {
> error_report("Error binding guest notifier: %d", -r);
> diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
> index 7dff7554..f8031fae 100644
> --- a/hw/virtio/vhost.c
> +++ b/hw/virtio/vhost.c
> @@ -855,8 +855,21 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
> /* Clear and discard previous events if any. */
> event_notifier_test_and_clear(&vq->masked_notifier);
>
> + /* For vhost user we set vring in unmasked state, since by the
> + * default it masked, and guest_notifier_mask is not used anymore.
> + */
> + if (vdev->use_mask_notifier == false) {
!vdev->use_mask_notifier please.
> + file.fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vvq));
> + r = dev->vhost_ops->vhost_set_vring_call(dev, &file);
> + if (r) {
> + r = -errno;
> + goto fail_call;
> + }
You can just call vhost_virtqueue_mask with mask == false,
seems a bit cleaner.
> + }
> +
> return 0;
>
> +fail_call:
> fail_kick:
> fail_alloc:
> cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
> diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
> index 5494ff4a..64d263ae 100644
> --- a/hw/virtio/virtio-pci.c
> +++ b/hw/virtio/virtio-pci.c
> @@ -806,7 +806,7 @@ static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
> /* If guest supports masking, set up irqfd now.
> * Otherwise, delay until unmasked in the frontend.
> */
> - if (k->guest_notifier_mask) {
> + if (vdev->use_mask_notifier && k->guest_notifier_mask) {
> ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
> if (ret < 0) {
> kvm_virtio_pci_vq_vector_release(proxy, vector);
> @@ -822,7 +822,7 @@ undo:
> if (vector >= msix_nr_vectors_allocated(dev)) {
> continue;
> }
> - if (k->guest_notifier_mask) {
> + if (vdev->use_mask_notifier && k->guest_notifier_mask) {
> kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> }
> kvm_virtio_pci_vq_vector_release(proxy, vector);
> @@ -849,7 +849,7 @@ static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
> /* If guest supports masking, clean up irqfd now.
> * Otherwise, it was cleaned when masked in the frontend.
> */
> - if (k->guest_notifier_mask) {
> + if (vdev->use_mask_notifier && k->guest_notifier_mask) {
> kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> }
> kvm_virtio_pci_vq_vector_release(proxy, vector);
> @@ -882,7 +882,7 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
> /* If guest supports masking, irqfd is already setup, unmask it.
> * Otherwise, set it up now.
> */
> - if (k->guest_notifier_mask) {
> + if (vdev->use_mask_notifier && k->guest_notifier_mask) {
> k->guest_notifier_mask(vdev, queue_no, false);
> /* Test after unmasking to avoid losing events. */
> if (k->guest_notifier_pending &&
> @@ -905,7 +905,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
> /* If guest supports masking, keep irqfd but mask it.
> * Otherwise, clean it up now.
> */
> - if (k->guest_notifier_mask) {
> + if (vdev->use_mask_notifier && k->guest_notifier_mask) {
> k->guest_notifier_mask(vdev, queue_no, true);
> } else {
> kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
> @@ -1022,7 +1022,9 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
> event_notifier_cleanup(notifier);
> }
>
> - if (!msix_enabled(&proxy->pci_dev) && vdc->guest_notifier_mask) {
> + if (!msix_enabled(&proxy->pci_dev) &&
> + vdev->use_mask_notifier &&
> + vdc->guest_notifier_mask) {
> vdc->guest_notifier_mask(vdev, n, !assign);
> }
>
> diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
> index 90f25451..c0238b39 100644
> --- a/hw/virtio/virtio.c
> +++ b/hw/virtio/virtio.c
> @@ -792,6 +792,7 @@ void virtio_reset(void *opaque)
> vdev->queue_sel = 0;
> vdev->status = 0;
> vdev->isr = 0;
> + vdev->use_mask_notifier = true;
> vdev->config_vector = VIRTIO_NO_VECTOR;
> virtio_notify_vector(vdev, vdev->config_vector);
Why isn't this in virtio_init?
Do we need to do this on each reset?
> --
> Victor
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2016-02-17 10:04 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2016-02-17 9:35 [Qemu-devel] [PATCH v3 1/1] vhost-user interrupt management fixes Victor Kaplansky
2016-02-17 10:04 ` Michael S. Tsirkin
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).