From: Cindy Lu <lulu@redhat.com>
To: lulu@redhat.com, mst@redhat.com, jasowang@redhat.com,
qemu-devel@nongnu.org
Subject: [PATCH v5 3/6] virtio:add support in configure interrupt
Date: Thu, 8 Apr 2021 17:38:21 +0800 [thread overview]
Message-ID: <20210408093824.14985-4-lulu@redhat.com> (raw)
In-Reply-To: <20210408093824.14985-1-lulu@redhat.com>
Add configure notifier support in virtio and related driver
When peer is vhost vdpa, setup the configure interrupt function
vhost_net_start and release the resource when vhost_net_stop
Signed-off-by: Cindy Lu <lulu@redhat.com>
---
hw/net/vhost_net.c | 9 +++++++++
hw/net/virtio-net.c | 6 ++++++
hw/virtio/vhost.c | 38 +++++++++++++++++++++++++++++++++++++-
hw/virtio/virtio.c | 25 +++++++++++++++++++++++++
include/hw/virtio/vhost.h | 3 +++
include/hw/virtio/virtio.h | 5 +++++
include/net/vhost_net.h | 3 +++
7 files changed, 88 insertions(+), 1 deletion(-)
diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c
index 24d555e764..12e30dc25e 100644
--- a/hw/net/vhost_net.c
+++ b/hw/net/vhost_net.c
@@ -426,6 +426,15 @@ void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
vhost_virtqueue_mask(&net->dev, dev, idx, mask);
}
+bool vhost_net_config_pending(VHostNetState *net, int idx)
+{
+ return vhost_config_pending(&net->dev, idx);
+}
+void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev,
+ bool mask)
+{
+ vhost_config_mask(&net->dev, dev, mask);
+}
VHostNetState *get_vhost_net(NetClientState *nc)
{
VHostNetState *vhost_net = 0;
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 00d95e6615..e30a7d9835 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -3064,6 +3064,9 @@ static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx,
if (type == VIRTIO_VQ_VECTOR) {
return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
}
+ if (type == VIRTIO_CONFIG_VECTOR) {
+ return vhost_net_config_pending(get_vhost_net(nc->peer), idx);
+ }
return false;
}
@@ -3076,6 +3079,9 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
if (type == VIRTIO_VQ_VECTOR) {
vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
}
+ if (type == VIRTIO_CONFIG_VECTOR) {
+ vhost_net_config_mask(get_vhost_net(nc->peer), vdev, mask);
+ }
}
static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 614ccc2bcb..b5e915d5cf 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1313,6 +1313,10 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
goto fail;
}
}
+ r = event_notifier_init(&hdev->masked_config_notifier, 0);
+ if (r < 0) {
+ return r;
+ }
if (busyloop_timeout) {
for (i = 0; i < hdev->nvqs; ++i) {
@@ -1405,6 +1409,7 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
for (i = 0; i < hdev->nvqs; ++i) {
vhost_virtqueue_cleanup(hdev->vqs + i);
}
+ event_notifier_cleanup(&hdev->masked_config_notifier);
if (hdev->mem) {
/* those are only safe after successful init */
memory_listener_unregister(&hdev->memory_listener);
@@ -1498,6 +1503,10 @@ bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n)
return event_notifier_test_and_clear(&vq->masked_notifier);
}
+bool vhost_config_pending(struct vhost_dev *hdev, int n)
+{
+ return event_notifier_test_and_clear(&hdev->masked_config_notifier);
+}
/* Mask/unmask events from this vq. */
void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
bool mask)
@@ -1522,6 +1531,28 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
VHOST_OPS_DEBUG("vhost_set_vring_call failed");
}
}
+void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev,
+ bool mask)
+{
+ int fd;
+ int r;
+ EventNotifier *masked_config_notifier = &hdev->masked_config_notifier;
+ EventNotifier *config_notifier = &vdev->config_notifier;
+ if (vdev->use_config_notifier != true) {
+ return;
+ }
+ assert(hdev->vhost_ops);
+ if (mask) {
+ assert(vdev->use_guest_notifier_mask);
+ fd = event_notifier_get_fd(masked_config_notifier);
+ } else {
+ fd = event_notifier_get_fd(config_notifier);
+ }
+ r = hdev->vhost_ops->vhost_set_config_call(hdev, &fd);
+ if (r < 0) {
+ error_report("vhost_set_config_call failed");
+ }
+}
uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
uint64_t features)
@@ -1732,7 +1763,12 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
goto fail_vq;
}
}
-
+ if (vdev->use_config_notifier == true) {
+ event_notifier_test_and_clear(&hdev->masked_config_notifier);
+ if (!vdev->use_guest_notifier_mask) {
+ vhost_config_mask(hdev, vdev, false);
+ }
+ }
if (hdev->log_enabled) {
uint64_t log_base;
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index ceb58fda6c..774ac3893b 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -3278,6 +3278,7 @@ void virtio_init(VirtIODevice *vdev, const char *name,
virtio_vmstate_change, vdev);
vdev->device_endian = virtio_default_endian();
vdev->use_guest_notifier_mask = true;
+ vdev->use_config_notifier = false;
}
/*
@@ -3502,6 +3503,16 @@ static void virtio_queue_guest_notifier_read(EventNotifier *n)
}
}
+static void virtio_config_read(EventNotifier *n)
+{
+ VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier);
+ if (vdev->use_config_notifier == false) {
+ return;
+ }
+ if (event_notifier_test_and_clear(n)) {
+ virtio_notify_config(vdev);
+ }
+}
void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
bool with_irqfd)
{
@@ -3517,6 +3528,16 @@ void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
virtio_queue_guest_notifier_read(&vq->guest_notifier);
}
}
+void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
+ bool with_irqfd)
+{
+ if (assign && !with_irqfd) {
+ event_notifier_set_handler(&vdev->config_notifier,
+ virtio_config_read);
+ } else {
+ event_notifier_set_handler(&vdev->config_notifier, NULL);
+ }
+}
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
{
@@ -3591,6 +3612,10 @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
return &vq->host_notifier;
}
+EventNotifier *virtio_get_config_notifier(VirtIODevice *vdev)
+{
+ return &vdev->config_notifier;
+}
void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
{
vq->host_notifier_enabled = enabled;
diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h
index 4a8bc75415..22efa7008e 100644
--- a/include/hw/virtio/vhost.h
+++ b/include/hw/virtio/vhost.h
@@ -91,6 +91,7 @@ struct vhost_dev {
QLIST_HEAD(, vhost_iommu) iommu_list;
IOMMUNotifier n;
const VhostDevConfigOps *config_ops;
+ EventNotifier masked_config_notifier;
};
struct vhost_net {
@@ -108,6 +109,8 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev);
void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev);
int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev);
+bool vhost_config_pending(struct vhost_dev *hdev, int n);
+void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask);
/* Test and clear masked event pending status.
* Should be called after unmask to avoid losing events.
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 5a1940fe70..fd17dbb097 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -113,6 +113,8 @@ struct VirtIODevice
bool use_guest_notifier_mask;
AddressSpace *dma_as;
QLIST_HEAD(, VirtQueue) *vector_queues;
+ EventNotifier config_notifier;
+ bool use_config_notifier;
};
struct VirtioDeviceClass {
@@ -315,11 +317,14 @@ uint16_t virtio_get_queue_index(VirtQueue *vq);
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
bool with_irqfd);
+void virtio_set_config_notifier_fd_handler(VirtIODevice *vdev, bool assign,
+ bool with_irqfd);
int virtio_device_start_ioeventfd(VirtIODevice *vdev);
int virtio_device_grab_ioeventfd(VirtIODevice *vdev);
void virtio_device_release_ioeventfd(VirtIODevice *vdev);
bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev);
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
+EventNotifier *virtio_get_config_notifier(VirtIODevice *vdev);
void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled);
void virtio_queue_host_notifier_read(EventNotifier *n);
void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h
index 172b0051d8..0d38c97c94 100644
--- a/include/net/vhost_net.h
+++ b/include/net/vhost_net.h
@@ -36,6 +36,9 @@ int vhost_net_set_config(struct vhost_net *net, const uint8_t *data,
bool vhost_net_virtqueue_pending(VHostNetState *net, int n);
void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev,
int idx, bool mask);
+bool vhost_net_config_pending(VHostNetState *net, int n);
+void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev,
+ bool mask);
int vhost_net_notify_migration_done(VHostNetState *net, char* mac_addr);
VHostNetState *get_vhost_net(NetClientState *nc);
--
2.21.3
next prev parent reply other threads:[~2021-04-08 9:41 UTC|newest]
Thread overview: 18+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-04-08 9:38 [PATCH v5 0/6] vhost-vdpa: add support for configure interrupt Cindy Lu
2021-04-08 9:38 ` [PATCH v5 1/6] virtio: introduce new type in interrupt process Cindy Lu
2021-04-09 6:57 ` Jason Wang
2021-04-12 8:09 ` Cindy Lu
2021-04-08 9:38 ` [PATCH v5 2/6] vhost: add new call back function for config interrupt Cindy Lu
2021-04-09 7:12 ` Jason Wang
2021-04-12 8:06 ` Cindy Lu
2021-04-08 9:38 ` Cindy Lu [this message]
2021-04-09 7:21 ` [PATCH v5 3/6] virtio:add support in configure interrupt Jason Wang
2021-04-12 8:05 ` Cindy Lu
2021-04-08 9:38 ` [PATCH v5 4/6] vhost-vdpa: add support for " Cindy Lu
2021-04-09 7:24 ` Jason Wang
2021-04-12 8:18 ` Cindy Lu
2021-04-08 9:38 ` [PATCH v5 5/6] virtio-mmio: " Cindy Lu
2021-04-09 7:27 ` Jason Wang
2021-04-08 9:38 ` [PATCH v5 6/6] virtio-pci: " Cindy Lu
2021-04-09 7:39 ` Jason Wang
2021-04-12 8:27 ` Cindy Lu
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210408093824.14985-4-lulu@redhat.com \
--to=lulu@redhat.com \
--cc=jasowang@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).