From: David Hildenbrand <david@redhat.com>
To: qemu-devel@nongnu.org
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>,
David Hildenbrand <david@redhat.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
Peter Xu <peterx@redhat.com>,
Luiz Capitulino <lcapitulino@redhat.com>,
Auger Eric <eric.auger@redhat.com>,
Alex Williamson <alex.williamson@redhat.com>,
Wei Yang <richardw.yang@linux.intel.com>,
Igor Mammedov <imammedo@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>
Subject: [PATCH PROTOTYPE 2/6] virtio-mem: Impelement SparseRAMHandler interface
Date: Thu, 24 Sep 2020 18:04:19 +0200 [thread overview]
Message-ID: <20200924160423.106747-3-david@redhat.com> (raw)
In-Reply-To: <20200924160423.106747-1-david@redhat.com>
Let's properly notify when (un)plugging blocks. Handle errors from
notifiers gracefully when mapping, rolling back the change and telling
the guest that the VM is busy.
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Wei Yang <richardw.yang@linux.intel.com>
Cc: Dr. David Alan Gilbert <dgilbert@redhat.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
hw/virtio/virtio-mem.c | 158 ++++++++++++++++++++++++++++++++-
include/hw/virtio/virtio-mem.h | 3 +
2 files changed, 160 insertions(+), 1 deletion(-)
diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c
index 8fbec77ccc..e23969eaed 100644
--- a/hw/virtio/virtio-mem.c
+++ b/hw/virtio/virtio-mem.c
@@ -72,6 +72,64 @@ static bool virtio_mem_is_busy(void)
return migration_in_incoming_postcopy() || !migration_is_idle();
}
+static void virtio_mem_srh_notify_unmap(VirtIOMEM *vmem, uint64_t offset,
+ uint64_t size)
+{
+ SparseRAMNotifier *notifier;
+
+ QLIST_FOREACH(notifier, &vmem->sram_notify, next) {
+ notifier->notify_unmap(notifier, &vmem->memdev->mr, offset, size);
+ }
+}
+
+static int virtio_mem_srh_notify_map(VirtIOMEM *vmem, uint64_t offset,
+ uint64_t size)
+{
+ SparseRAMNotifier *notifier, *notifier2;
+ int ret = 0;
+
+ QLIST_FOREACH(notifier, &vmem->sram_notify, next) {
+ ret = notifier->notify_map(notifier, &vmem->memdev->mr, offset, size);
+ if (ret) {
+ break;
+ }
+ }
+
+ /* In case any notifier failed, undo the whole operation. */
+ if (ret) {
+ QLIST_FOREACH(notifier2, &vmem->sram_notify, next) {
+ if (notifier2 == notifier) {
+ break;
+ }
+ notifier2->notify_unmap(notifier2, &vmem->memdev->mr, offset, size);
+ }
+ }
+ return ret;
+}
+
+/*
+ * TODO: Maybe we could notify directly that everything is unmapped/discarded.
+ * at least vfio should be able to deal with that.
+ */
+static void virtio_mem_srh_notify_unplug_all(VirtIOMEM *vmem)
+{
+ unsigned long first_zero_bit, last_zero_bit;
+ uint64_t offset, length;
+
+ /* Find consecutive unplugged blocks and notify */
+ first_zero_bit = find_first_zero_bit(vmem->bitmap, vmem->bitmap_size);
+ while (first_zero_bit < vmem->bitmap_size) {
+ offset = first_zero_bit * vmem->block_size;
+ last_zero_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size,
+ first_zero_bit + 1) - 1;
+ length = (last_zero_bit - first_zero_bit + 1) * vmem->block_size;
+
+ virtio_mem_srh_notify_unmap(vmem, offset, length);
+ first_zero_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size,
+ last_zero_bit + 2);
+ }
+}
+
static bool virtio_mem_test_bitmap(VirtIOMEM *vmem, uint64_t start_gpa,
uint64_t size, bool plugged)
{
@@ -146,7 +204,7 @@ static int virtio_mem_set_block_state(VirtIOMEM *vmem, uint64_t start_gpa,
uint64_t size, bool plug)
{
const uint64_t offset = start_gpa - vmem->addr;
- int ret;
+ int ret, ret2;
if (virtio_mem_is_busy()) {
return -EBUSY;
@@ -159,6 +217,23 @@ static int virtio_mem_set_block_state(VirtIOMEM *vmem, uint64_t start_gpa,
strerror(-ret));
return -EBUSY;
}
+ /*
+ * We'll notify *after* discarding succeeded, because we might not be
+ * able to map again ...
+ */
+ virtio_mem_srh_notify_unmap(vmem, offset, size);
+ } else if (virtio_mem_srh_notify_map(vmem, offset, size)) {
+ /*
+ * Could be a mapping attempt already already resulted in memory
+ * getting populated.
+ */
+ ret2 = ram_block_discard_range(vmem->memdev->mr.ram_block, offset,
+ size);
+ if (ret2) {
+ error_report("Unexpected error discarding RAM: %s",
+ strerror(-ret2));
+ }
+ return -EBUSY;
}
virtio_mem_set_bitmap(vmem, start_gpa, size, plug);
return 0;
@@ -253,6 +328,8 @@ static int virtio_mem_unplug_all(VirtIOMEM *vmem)
error_report("Unexpected error discarding RAM: %s", strerror(-ret));
return -EBUSY;
}
+ virtio_mem_srh_notify_unplug_all(vmem);
+
bitmap_clear(vmem->bitmap, 0, vmem->bitmap_size);
if (vmem->size) {
vmem->size = 0;
@@ -480,6 +557,13 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp)
vmstate_register_ram(&vmem->memdev->mr, DEVICE(vmem));
qemu_register_reset(virtio_mem_system_reset, vmem);
precopy_add_notifier(&vmem->precopy_notifier);
+
+ /*
+ * Set it to sparse, so everybody is aware of it before the plug handler
+ * exposes the region to the system.
+ */
+ memory_region_set_sparse_ram_handler(&vmem->memdev->mr,
+ SPARSE_RAM_HANDLER(vmem));
}
static void virtio_mem_device_unrealize(DeviceState *dev)
@@ -487,6 +571,7 @@ static void virtio_mem_device_unrealize(DeviceState *dev)
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOMEM *vmem = VIRTIO_MEM(dev);
+ memory_region_set_sparse_ram_handler(&vmem->memdev->mr, NULL);
precopy_remove_notifier(&vmem->precopy_notifier);
qemu_unregister_reset(virtio_mem_system_reset, vmem);
vmstate_unregister_ram(&vmem->memdev->mr, DEVICE(vmem));
@@ -813,6 +898,7 @@ static void virtio_mem_instance_init(Object *obj)
vmem->block_size = VIRTIO_MEM_MIN_BLOCK_SIZE;
notifier_list_init(&vmem->size_change_notifiers);
vmem->precopy_notifier.notify = virtio_mem_precopy_notify;
+ QLIST_INIT(&vmem->sram_notify);
object_property_add(obj, VIRTIO_MEM_SIZE_PROP, "size", virtio_mem_get_size,
NULL, NULL, NULL);
@@ -832,11 +918,72 @@ static Property virtio_mem_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
+static uint64_t virtio_mem_srh_get_granularity(const SparseRAMHandler *srh,
+ const MemoryRegion *mr)
+{
+ const VirtIOMEM *vmem = VIRTIO_MEM(srh);
+
+ g_assert(mr == &vmem->memdev->mr);
+ return vmem->block_size;
+}
+
+static void virtio_mem_srh_register_listener(SparseRAMHandler *srh,
+ const MemoryRegion *mr,
+ SparseRAMNotifier *notifier)
+{
+ VirtIOMEM *vmem = VIRTIO_MEM(srh);
+
+ g_assert(mr == &vmem->memdev->mr);
+ QLIST_INSERT_HEAD(&vmem->sram_notify, notifier, next);
+}
+
+static void virtio_mem_srh_unregister_listener(SparseRAMHandler *srh,
+ const MemoryRegion *mr,
+ SparseRAMNotifier *notifier)
+{
+ VirtIOMEM *vmem = VIRTIO_MEM(srh);
+
+ g_assert(mr == &vmem->memdev->mr);
+ QLIST_REMOVE(notifier, next);
+}
+
+static int virtio_mem_srh_replay_mapped(SparseRAMHandler *srh,
+ const MemoryRegion *mr,
+ SparseRAMNotifier *notifier)
+{
+ VirtIOMEM *vmem = VIRTIO_MEM(srh);
+ unsigned long first_bit, last_bit;
+ uint64_t offset, length;
+ int ret = 0;
+
+ g_assert(mr == &vmem->memdev->mr);
+
+ /* Find consecutive plugged blocks and notify */
+ first_bit = find_first_bit(vmem->bitmap, vmem->bitmap_size);
+ while (first_bit < vmem->bitmap_size) {
+ offset = first_bit * vmem->block_size;
+ last_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size,
+ first_bit + 1) - 1;
+ length = (last_bit - first_bit + 1) * vmem->block_size;
+
+ ret = notifier->notify_map(notifier, mr, offset, length);
+ if (ret) {
+ break;
+ }
+ first_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size,
+ last_bit + 2);
+ }
+
+ /* TODO: cleanup on error if necessary. */
+ return ret;
+}
+
static void virtio_mem_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
VirtIOMEMClass *vmc = VIRTIO_MEM_CLASS(klass);
+ SparseRAMHandlerClass *srhc = SPARSE_RAM_HANDLER_CLASS(klass);
device_class_set_props(dc, virtio_mem_properties);
dc->vmsd = &vmstate_virtio_mem;
@@ -852,6 +999,11 @@ static void virtio_mem_class_init(ObjectClass *klass, void *data)
vmc->get_memory_region = virtio_mem_get_memory_region;
vmc->add_size_change_notifier = virtio_mem_add_size_change_notifier;
vmc->remove_size_change_notifier = virtio_mem_remove_size_change_notifier;
+
+ srhc->get_granularity = virtio_mem_srh_get_granularity;
+ srhc->register_listener = virtio_mem_srh_register_listener;
+ srhc->unregister_listener = virtio_mem_srh_unregister_listener;
+ srhc->replay_mapped = virtio_mem_srh_replay_mapped;
}
static const TypeInfo virtio_mem_info = {
@@ -861,6 +1013,10 @@ static const TypeInfo virtio_mem_info = {
.instance_init = virtio_mem_instance_init,
.class_init = virtio_mem_class_init,
.class_size = sizeof(VirtIOMEMClass),
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_SPARSE_RAM_HANDLER },
+ { }
+ },
};
static void virtio_register_types(void)
diff --git a/include/hw/virtio/virtio-mem.h b/include/hw/virtio/virtio-mem.h
index 4eeb82d5dd..91d9b48ba0 100644
--- a/include/hw/virtio/virtio-mem.h
+++ b/include/hw/virtio/virtio-mem.h
@@ -67,6 +67,9 @@ struct VirtIOMEM {
/* don't migrate unplugged memory */
NotifierWithReturn precopy_notifier;
+
+ /* SparseRAMNotifier list to be notified on plug/unplug events. */
+ QLIST_HEAD(, SparseRAMNotifier) sram_notify;
};
struct VirtIOMEMClass {
--
2.26.2
next prev parent reply other threads:[~2020-09-24 16:39 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-09-24 16:04 [PATCH PROTOTYPE 0/6] virtio-mem: vfio support David Hildenbrand
2020-09-24 16:04 ` [PATCH PROTOTYPE 1/6] memory: Introduce sparse RAM handler for memory regions David Hildenbrand
2020-10-20 19:24 ` Peter Xu
2020-10-20 20:13 ` David Hildenbrand
2020-09-24 16:04 ` David Hildenbrand [this message]
2020-09-24 16:04 ` [PATCH PROTOTYPE 3/6] vfio: Implement support for sparse RAM " David Hildenbrand
2020-10-20 19:44 ` Peter Xu
2020-10-20 20:01 ` David Hildenbrand
2020-10-20 20:44 ` Peter Xu
2020-11-12 10:11 ` David Hildenbrand
2020-11-18 13:04 ` David Hildenbrand
2020-11-18 15:23 ` Peter Xu
2020-11-18 16:14 ` David Hildenbrand
2020-11-18 17:01 ` Peter Xu
2020-11-18 17:37 ` David Hildenbrand
2020-11-18 19:05 ` Peter Xu
2020-11-18 19:20 ` David Hildenbrand
2020-09-24 16:04 ` [PATCH PROTOTYPE 4/6] memory: Extend ram_block_discard_(require|disable) by two discard types David Hildenbrand
2020-10-20 19:17 ` Peter Xu
2020-10-20 19:58 ` David Hildenbrand
2020-10-20 20:49 ` Peter Xu
2020-10-20 21:30 ` Peter Xu
2020-09-24 16:04 ` [PATCH PROTOTYPE 5/6] virtio-mem: Require only RAM_BLOCK_DISCARD_T_COORDINATED discards David Hildenbrand
2020-09-24 16:04 ` [PATCH PROTOTYPE 6/6] vfio: Disable only RAM_BLOCK_DISCARD_T_UNCOORDINATED discards David Hildenbrand
2020-09-24 19:30 ` [PATCH PROTOTYPE 0/6] virtio-mem: vfio support no-reply
2020-09-29 17:02 ` Dr. David Alan Gilbert
2020-09-29 17:05 ` David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200924160423.106747-3-david@redhat.com \
--to=david@redhat.com \
--cc=alex.williamson@redhat.com \
--cc=dgilbert@redhat.com \
--cc=eric.auger@redhat.com \
--cc=imammedo@redhat.com \
--cc=lcapitulino@redhat.com \
--cc=mst@redhat.com \
--cc=pankaj.gupta.linux@gmail.com \
--cc=pbonzini@redhat.com \
--cc=peterx@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=richardw.yang@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).