From: Avi Kivity <avi@redhat.com>
To: qemu-devel@nongnu.org, Anthony Liguori <anthony@codemonkey.ws>,
liu ping fan <qemulist@gmail.com>,
"Michael S. Tsirkin" <mst@redhat.com>,
Paolo Bonzini <pbonzini@redhat.com>,
Blue Swirl <blauwirbel@gmail.com>
Subject: [Qemu-devel] [RFC v1 14/22] memory: manage coalesced mmio via a MemoryListener
Date: Wed, 3 Oct 2012 18:03:57 +0200 [thread overview]
Message-ID: <1349280245-16341-15-git-send-email-avi@redhat.com> (raw)
In-Reply-To: <1349280245-16341-1-git-send-email-avi@redhat.com>
Instead of calling a global function on coalesced mmio changes, which
routes the call to kvm if enabled, add coalesced mmio hooks to
MemoryListener and make kvm use that instead.
The motivation is support for multiple address spaces (which means we
we need to filter the call on the right address space) but the result
is cleaner as well.
Signed-off-by: Avi Kivity <avi@redhat.com>
---
exec.c | 13 -------------
kvm-all.c | 20 ++++++++++----------
kvm-stub.c | 10 ----------
kvm.h | 2 --
memory.c | 24 ++++++++++++++++++++----
memory.h | 12 +++++++++++-
6 files changed, 41 insertions(+), 40 deletions(-)
diff --git a/exec.c b/exec.c
index 1fd6a10..51a32e7 100644
--- a/exec.c
+++ b/exec.c
@@ -2313,19 +2313,6 @@ void cpu_register_physical_memory_log(MemoryRegionSection *section,
}
}
-
-void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
-{
- if (kvm_enabled())
- kvm_coalesce_mmio_region(addr, size);
-}
-
-void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
-{
- if (kvm_enabled())
- kvm_uncoalesce_mmio_region(addr, size);
-}
-
void qemu_flush_coalesced_mmio_buffer(void)
{
if (kvm_enabled())
diff --git a/kvm-all.c b/kvm-all.c
index 5e9215d..25ca202 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -454,9 +454,10 @@ static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
return ret;
}
-int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
+static void kvm_coalesce_mmio_region(MemoryListener *listener,
+ MemoryRegionSection *secion,
+ target_phys_addr_t start, ram_addr_t size)
{
- int ret = -ENOSYS;
KVMState *s = kvm_state;
if (s->coalesced_mmio) {
@@ -466,15 +467,14 @@ int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
zone.size = size;
zone.pad = 0;
- ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
+ (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
}
-
- return ret;
}
-int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
+static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
+ MemoryRegionSection *secion,
+ target_phys_addr_t start, ram_addr_t size)
{
- int ret = -ENOSYS;
KVMState *s = kvm_state;
if (s->coalesced_mmio) {
@@ -484,10 +484,8 @@ int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
zone.size = size;
zone.pad = 0;
- ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
+ (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
}
-
- return ret;
}
int kvm_check_extension(KVMState *s, unsigned int extension)
@@ -818,6 +816,8 @@ static void kvm_io_ioeventfd_del(MemoryListener *listener,
.log_global_stop = kvm_log_global_stop,
.eventfd_add = kvm_mem_ioeventfd_add,
.eventfd_del = kvm_mem_ioeventfd_del,
+ .coalesced_mmio_add = kvm_coalesce_mmio_region,
+ .coalesced_mmio_del = kvm_uncoalesce_mmio_region,
.priority = 10,
};
diff --git a/kvm-stub.c b/kvm-stub.c
index 3c52eb5..a3455e2 100644
--- a/kvm-stub.c
+++ b/kvm-stub.c
@@ -29,16 +29,6 @@ int kvm_init_vcpu(CPUArchState *env)
return -ENOSYS;
}
-int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
-{
- return -ENOSYS;
-}
-
-int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
-{
- return -ENOSYS;
-}
-
int kvm_init(void)
{
return -ENOSYS;
diff --git a/kvm.h b/kvm.h
index dea2998..eefcb49 100644
--- a/kvm.h
+++ b/kvm.h
@@ -129,8 +129,6 @@ void *kvm_vmalloc(ram_addr_t size);
void *kvm_arch_vmalloc(ram_addr_t size);
void kvm_setup_guest_memory(void *start, size_t size);
-int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
-int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
void kvm_flush_coalesced_mmio_buffer(void);
#endif
diff --git a/memory.c b/memory.c
index efefcb8..eb75349 100644
--- a/memory.c
+++ b/memory.c
@@ -1130,11 +1130,19 @@ static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpa
FlatRange *fr;
CoalescedMemoryRange *cmr;
AddrRange tmp;
+ MemoryRegionSection section;
FOR_EACH_FLAT_RANGE(fr, as->current_map) {
if (fr->mr == mr) {
- qemu_unregister_coalesced_mmio(int128_get64(fr->addr.start),
- int128_get64(fr->addr.size));
+ section = (MemoryRegionSection) {
+ .address_space = as->root,
+ .offset_within_address_space = int128_get64(fr->addr.start),
+ .size = int128_get64(fr->addr.size),
+ };
+
+ MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, §ion,
+ int128_get64(fr->addr.start),
+ int128_get64(fr->addr.size));
QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
tmp = addrrange_shift(cmr->addr,
int128_sub(fr->addr.start,
@@ -1143,8 +1151,9 @@ static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpa
continue;
}
tmp = addrrange_intersection(tmp, fr->addr);
- qemu_register_coalesced_mmio(int128_get64(tmp.start),
- int128_get64(tmp.size));
+ MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, §ion,
+ int128_get64(tmp.start),
+ int128_get64(tmp.size));
}
}
}
@@ -1529,6 +1538,13 @@ void memory_listener_default_eventfd(MemoryListener *listener,
{
}
+void memory_listener_default_coalesced_mmio(MemoryListener *listener,
+ MemoryRegionSection *section,
+ target_phys_addr_t addr,
+ target_phys_addr_t len)
+{
+}
+
void address_space_init(AddressSpace *as, MemoryRegion *root)
{
memory_region_transaction_begin();
diff --git a/memory.h b/memory.h
index 0ef95cb..5f50bce 100644
--- a/memory.h
+++ b/memory.h
@@ -217,6 +217,10 @@ struct MemoryListener {
bool match_data, uint64_t data, EventNotifier *e);
void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
bool match_data, uint64_t data, EventNotifier *e);
+ void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
+ target_phys_addr_t addr, target_phys_addr_t len);
+ void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
+ target_phys_addr_t addr, target_phys_addr_t len);
/* Lower = earlier (during add), later (during del) */
unsigned priority;
MemoryRegion *address_space_filter;
@@ -235,7 +239,9 @@ struct MemoryListener {
.log_global_start = memory_listener_default_global, \
.log_global_stop = memory_listener_default_global, \
.eventfd_add = memory_listener_default_eventfd, \
- .eventfd_del = memory_listener_default_eventfd \
+ .eventfd_del = memory_listener_default_eventfd, \
+ .coalesced_mmio_add = memory_listener_default_coalesced_mmio, \
+ .coalesced_mmio_del = memory_listener_default_coalesced_mmio \
void memory_listener_default_global(MemoryListener *listener);
void memory_listener_default_section(MemoryListener *listener,
@@ -243,6 +249,10 @@ void memory_listener_default_section(MemoryListener *listener,
void memory_listener_default_eventfd(MemoryListener *listener,
MemoryRegionSection *section,
bool match_data, uint64_t data, EventNotifier *e);
+void memory_listener_default_coalesced_mmio(MemoryListener *listener,
+ MemoryRegionSection *section,
+ target_phys_addr_t addr,
+ target_phys_addr_t len);
/**
* memory_region_init: Initialize a memory region
--
1.7.12
next prev parent reply other threads:[~2012-10-03 16:04 UTC|newest]
Thread overview: 70+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-10-03 16:03 [Qemu-devel] [RFC v1 00/22] Integrate DMA into the memory API Avi Kivity
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 01/22] memory: rename 'exec-obsolete.h' Avi Kivity
2012-10-04 13:58 ` Anthony Liguori
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 02/22] vhost: use MemoryListener filtering to only monitor RAM address space Avi Kivity
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 03/22] kvm: use separate MemoryListeners for memory and I/O Avi Kivity
2012-10-03 20:16 ` Blue Swirl
2012-10-04 6:33 ` Avi Kivity
2012-10-04 16:44 ` Blue Swirl
2012-10-04 16:58 ` Avi Kivity
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 04/22] xen_pt: " Avi Kivity
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 05/22] memory: prepare AddressSpace for exporting Avi Kivity
2012-10-04 14:01 ` Anthony Liguori
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 06/22] memory: export AddressSpace Avi Kivity
2012-10-04 14:02 ` Anthony Liguori
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 07/22] memory: maintain a list of address spaces Avi Kivity
2012-10-04 10:17 ` Gleb Natapov
2012-10-04 10:19 ` Avi Kivity
2012-10-04 14:03 ` Anthony Liguori
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 08/22] memory: provide defaults for MemoryListener operations Avi Kivity
2012-10-04 14:05 ` Anthony Liguori
2012-10-04 14:29 ` Avi Kivity
2012-10-09 15:14 ` Anthony Liguori
2012-10-09 15:28 ` Avi Kivity
2012-10-09 18:34 ` Anthony Liguori
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 09/22] memory: use new MEMORY_LISTENER_DEFAULT_OPS Avi Kivity
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 10/22] vfio: " Avi Kivity
2012-10-04 15:45 ` Alex Williamson
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 11/22] xen_pt: " Avi Kivity
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 12/22] kvm: " Avi Kivity
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 13/22] xen: " Avi Kivity
2012-10-03 16:03 ` Avi Kivity [this message]
2012-10-04 14:08 ` [Qemu-devel] [RFC v1 14/22] memory: manage coalesced mmio via a MemoryListener Anthony Liguori
2012-10-04 14:33 ` Avi Kivity
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 15/22] memory: move address_space_memory and address_space_io out of memory core Avi Kivity
2012-10-04 14:08 ` Anthony Liguori
2012-10-03 16:03 ` [Qemu-devel] [RFC v1 16/22] memory: move tcg flush into a tcg memory listener Avi Kivity
2012-10-03 16:04 ` [Qemu-devel] [RFC v1 17/22] memory: use AddressSpace for MemoryListener filtering Avi Kivity
2012-10-03 20:16 ` Blue Swirl
2012-10-04 10:17 ` Avi Kivity
2012-10-04 16:57 ` Blue Swirl
2012-10-04 14:09 ` Anthony Liguori
2012-10-03 16:04 ` [Qemu-devel] [RFC v1 18/22] s390: avoid reaching into memory core internals Avi Kivity
2012-10-04 8:12 ` Christian Borntraeger
2012-10-03 16:04 ` [Qemu-devel] [RFC v1 19/22] memory: per-AddressSpace dispatch Avi Kivity
2012-10-03 20:24 ` Blue Swirl
2012-10-04 6:38 ` Avi Kivity
2012-10-04 8:47 ` Peter Maydell
2012-10-04 10:15 ` Avi Kivity
2012-10-04 10:29 ` Peter Maydell
2012-10-04 10:30 ` Avi Kivity
2012-10-04 17:13 ` Blue Swirl
2012-10-04 17:19 ` Avi Kivity
2012-10-04 17:42 ` Blue Swirl
2012-10-04 19:05 ` Anthony Liguori
2012-10-04 19:15 ` Blue Swirl
2012-10-04 19:16 ` Peter Maydell
2012-10-07 10:34 ` Avi Kivity
2012-10-04 14:13 ` Anthony Liguori
2012-10-04 14:43 ` Avi Kivity
2012-10-09 15:17 ` Anthony Liguori
2012-10-03 16:04 ` [Qemu-devel] [RFC v1 20/22] dma: make dma access its own address space Avi Kivity
2012-10-04 14:15 ` Anthony Liguori
2012-10-03 16:04 ` [Qemu-devel] [RFC v1 21/22] pci: give each device " Avi Kivity
2012-10-03 16:04 ` [Qemu-devel] [RFC v1 22/22] pci: honor PCI_COMMAND_MASTER Avi Kivity
2012-10-03 20:26 ` [Qemu-devel] [RFC v1 00/22] Integrate DMA into the memory API Blue Swirl
2012-10-04 10:18 ` Avi Kivity
2012-10-04 6:41 ` Avi Kivity
2012-10-04 8:13 ` Paolo Bonzini
2012-10-04 14:16 ` Anthony Liguori
2012-10-04 14:36 ` Avi Kivity
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1349280245-16341-15-git-send-email-avi@redhat.com \
--to=avi@redhat.com \
--cc=anthony@codemonkey.ws \
--cc=blauwirbel@gmail.com \
--cc=mst@redhat.com \
--cc=pbonzini@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=qemulist@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).