From: "Michael S. Tsirkin" <mst@redhat.com>
To: qemu-devel@nongnu.org
Cc: Peter Maydell <peter.maydell@linaro.org>,
Paolo Bonzini <pbonzini@redhat.com>,
Stefan Hajnoczi <stefanha@redhat.com>
Subject: [Qemu-devel] [PULL 09/23] virtio: add MemoryListener to cache ring translations
Date: Fri, 17 Feb 2017 21:54:23 +0200 [thread overview]
Message-ID: <1487361200-29966-10-git-send-email-mst@redhat.com> (raw)
In-Reply-To: <1487361200-29966-1-git-send-email-mst@redhat.com>
From: Paolo Bonzini <pbonzini@redhat.com>
The cached translations are RCU-protected to allow efficient use
when processing virtqueues.
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
---
include/hw/virtio/virtio.h | 1 +
hw/virtio/virtio.c | 105 +++++++++++++++++++++++++++++++++++++++++++--
2 files changed, 103 insertions(+), 3 deletions(-)
diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h
index 0863a25..15efcf2 100644
--- a/include/hw/virtio/virtio.h
+++ b/include/hw/virtio/virtio.h
@@ -85,6 +85,7 @@ struct VirtIODevice
uint32_t generation;
int nvectors;
VirtQueue *vq;
+ MemoryListener listener;
uint16_t device_id;
bool vm_running;
bool broken; /* device in invalid state, needs reset */
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 71e41f6..b75cb52 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -60,6 +60,13 @@ typedef struct VRingUsed
VRingUsedElem ring[0];
} VRingUsed;
+typedef struct VRingMemoryRegionCaches {
+ struct rcu_head rcu;
+ MemoryRegionCache desc;
+ MemoryRegionCache avail;
+ MemoryRegionCache used;
+} VRingMemoryRegionCaches;
+
typedef struct VRing
{
unsigned int num;
@@ -68,6 +75,7 @@ typedef struct VRing
hwaddr desc;
hwaddr avail;
hwaddr used;
+ VRingMemoryRegionCaches *caches;
} VRing;
struct VirtQueue
@@ -104,6 +112,51 @@ struct VirtQueue
QLIST_ENTRY(VirtQueue) node;
};
+static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
+{
+ if (!caches) {
+ return;
+ }
+
+ address_space_cache_destroy(&caches->desc);
+ address_space_cache_destroy(&caches->avail);
+ address_space_cache_destroy(&caches->used);
+ g_free(caches);
+}
+
+static void virtio_init_region_cache(VirtIODevice *vdev, int n)
+{
+ VirtQueue *vq = &vdev->vq[n];
+ VRingMemoryRegionCaches *old = vq->vring.caches;
+ VRingMemoryRegionCaches *new;
+ hwaddr addr, size;
+ int event_size;
+
+ event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+
+ addr = vq->vring.desc;
+ if (!addr) {
+ return;
+ }
+ new = g_new0(VRingMemoryRegionCaches, 1);
+ size = virtio_queue_get_desc_size(vdev, n);
+ address_space_cache_init(&new->desc, vdev->dma_as,
+ addr, size, false);
+
+ size = virtio_queue_get_used_size(vdev, n) + event_size;
+ address_space_cache_init(&new->used, vdev->dma_as,
+ vq->vring.used, size, true);
+
+ size = virtio_queue_get_avail_size(vdev, n) + event_size;
+ address_space_cache_init(&new->avail, vdev->dma_as,
+ vq->vring.avail, size, false);
+
+ atomic_rcu_set(&vq->vring.caches, new);
+ if (old) {
+ call_rcu(old, virtio_free_region_cache, rcu);
+ }
+}
+
/* virt queue functions */
void virtio_queue_update_rings(VirtIODevice *vdev, int n)
{
@@ -117,6 +170,7 @@ void virtio_queue_update_rings(VirtIODevice *vdev, int n)
vring->used = vring_align(vring->avail +
offsetof(VRingAvail, ring[vring->num]),
vring->align);
+ virtio_init_region_cache(vdev, n);
}
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
@@ -1264,6 +1318,7 @@ void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
vdev->vq[n].vring.desc = desc;
vdev->vq[n].vring.avail = avail;
vdev->vq[n].vring.used = used;
+ virtio_init_region_cache(vdev, n);
}
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
@@ -1984,9 +2039,6 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
void virtio_cleanup(VirtIODevice *vdev)
{
qemu_del_vm_change_state_handler(vdev->vmstate);
- g_free(vdev->config);
- g_free(vdev->vq);
- g_free(vdev->vector_queues);
}
static void virtio_vmstate_change(void *opaque, int running, RunState state)
@@ -2248,6 +2300,19 @@ void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
}
}
+static void virtio_memory_listener_commit(MemoryListener *listener)
+{
+ VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
+ int i;
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ if (vdev->vq[i].vring.num == 0) {
+ break;
+ }
+ virtio_init_region_cache(vdev, i);
+ }
+}
+
static void virtio_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
@@ -2270,6 +2335,9 @@ static void virtio_device_realize(DeviceState *dev, Error **errp)
error_propagate(errp, err);
return;
}
+
+ vdev->listener.commit = virtio_memory_listener_commit;
+ memory_listener_register(&vdev->listener, vdev->dma_as);
}
static void virtio_device_unrealize(DeviceState *dev, Error **errp)
@@ -2292,6 +2360,36 @@ static void virtio_device_unrealize(DeviceState *dev, Error **errp)
vdev->bus_name = NULL;
}
+static void virtio_device_free_virtqueues(VirtIODevice *vdev)
+{
+ int i;
+ if (!vdev->vq) {
+ return;
+ }
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ VRingMemoryRegionCaches *caches;
+ if (vdev->vq[i].vring.num == 0) {
+ break;
+ }
+ caches = atomic_read(&vdev->vq[i].vring.caches);
+ atomic_set(&vdev->vq[i].vring.caches, NULL);
+ virtio_free_region_cache(caches);
+ }
+ g_free(vdev->vq);
+}
+
+static void virtio_device_instance_finalize(Object *obj)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(obj);
+
+ memory_listener_unregister(&vdev->listener);
+ virtio_device_free_virtqueues(vdev);
+
+ g_free(vdev->config);
+ g_free(vdev->vector_queues);
+}
+
static Property virtio_properties[] = {
DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
DEFINE_PROP_END_OF_LIST(),
@@ -2418,6 +2516,7 @@ static const TypeInfo virtio_device_info = {
.parent = TYPE_DEVICE,
.instance_size = sizeof(VirtIODevice),
.class_init = virtio_device_class_init,
+ .instance_finalize = virtio_device_instance_finalize,
.abstract = true,
.class_size = sizeof(VirtioDeviceClass),
};
--
MST
next prev parent reply other threads:[~2017-02-17 19:54 UTC|newest]
Thread overview: 36+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-02-17 19:53 [Qemu-devel] [PULL 00/23] virtio, pci: fixes, features Michael S. Tsirkin
2017-02-17 19:53 ` [Qemu-devel] [PULL 01/23] pci/pcie: don't assume cap id 0 is reserved Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 02/23] virtio: Report real progress in VQ aio poll handler Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 03/23] docs: add document to explain the usage of vNVDIMM Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 04/23] memory: make memory_listener_unregister idempotent Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 05/23] virtio: add virtio_*_phys_cached Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 06/23] virtio: use address_space_map/unmap to access descriptors Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 07/23] exec: make address_space_cache_destroy idempotent Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 08/23] virtio: use MemoryRegionCache to access descriptors Michael S. Tsirkin
2017-02-17 19:54 ` Michael S. Tsirkin [this message]
2017-02-17 19:54 ` [Qemu-devel] [PULL 10/23] virtio: use VRingMemoryRegionCaches for descriptor ring Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 11/23] virtio: check for vring setup in virtio_queue_update_used_idx Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 12/23] virtio: use VRingMemoryRegionCaches for avail and used rings Michael S. Tsirkin
2017-02-21 12:57 ` Gerd Hoffmann
2017-02-21 16:25 ` Laszlo Ersek
2017-02-21 17:54 ` Laszlo Ersek
2017-02-22 9:03 ` Paolo Bonzini
2017-02-23 0:32 ` Alex Williamson
2017-02-23 9:33 ` Cédric Le Goater
2017-02-23 9:47 ` Paolo Bonzini
2017-02-23 11:56 ` Cédric Le Goater
2017-02-21 18:08 ` Paolo Bonzini
2017-02-21 19:07 ` Laszlo Ersek
2017-02-21 20:04 ` Gerd Hoffmann
2017-02-17 19:54 ` [Qemu-devel] [PULL 13/23] virtio: Fix no interrupt when not creating msi controller Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 14/23] pcie: simplify pcie_add_capability() Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 15/23] vfio: trace map/unmap for notify as well Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 16/23] vfio: introduce vfio_get_vaddr() Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 17/23] vfio: allow to notify unmap for very large region Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 18/23] intel_iommu: add "caching-mode" option Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 19/23] intel_iommu: simplify irq region translation Michael S. Tsirkin
2017-02-17 19:54 ` [Qemu-devel] [PULL 20/23] intel_iommu: renaming gpa to iova where proper Michael S. Tsirkin
2017-02-17 19:55 ` [Qemu-devel] [PULL 21/23] intel_iommu: convert dbg macros to traces for inv Michael S. Tsirkin
2017-02-17 19:55 ` [Qemu-devel] [PULL 22/23] intel_iommu: convert dbg macros to trace for trans Michael S. Tsirkin
2017-02-17 19:55 ` [Qemu-devel] [PULL 23/23] intel_iommu: vtd_slpt_level_shift check level Michael S. Tsirkin
2017-02-20 11:55 ` [Qemu-devel] [PULL 00/23] virtio, pci: fixes, features Peter Maydell
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1487361200-29966-10-git-send-email-mst@redhat.com \
--to=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=peter.maydell@linaro.org \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).