* [Qemu-devel] [PATCH V3 1/2] xen: Add xc_domain_add_to_physmap to xen_interface.
2011-05-24 13:34 [Qemu-devel] [PATCH V3 0/2] Xen VGA dirtybit support anthony.perard
@ 2011-05-24 13:34 ` anthony.perard
2011-05-24 13:34 ` [Qemu-devel] [PATCH V3 2/2] xen: Introduce VGA sync dirty bitmap support anthony.perard
1 sibling, 0 replies; 3+ messages in thread
From: anthony.perard @ 2011-05-24 13:34 UTC (permalink / raw)
To: QEMU-devel, Stefano Stabellini, Alexander Graf; +Cc: Anthony PERARD, Xen Devel
From: Anthony PERARD <anthony.perard@citrix.com>
This function will be used to support sync dirty bitmap.
This come with a check against every Xen release, and special
implementation for Xen version that doesn't have this specific call.
This function will not be usable with Xen 3.3 because the behavior is
different.
Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
---
configure | 29 ++++++++++++++++++++++++++++-
hw/xen_common.h | 14 ++++++++++++++
2 files changed, 42 insertions(+), 1 deletions(-)
diff --git a/configure b/configure
index a318d37..7886d3f 100755
--- a/configure
+++ b/configure
@@ -1206,6 +1206,7 @@ int main(void) {
xc = xc_interface_open(0, 0, 0);
xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0);
xc_gnttab_open(NULL, 0);
+ xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0);
return 0;
}
EOF
@@ -1224,10 +1225,14 @@ EOF
# error HVM_MAX_VCPUS not defined
#endif
int main(void) {
+ struct xen_add_to_physmap xatp = {
+ .domid = 0, .space = XENMAPSPACE_gmfn, .idx = 0, .gpfn = 0,
+ };
xs_daemon_open();
xc_interface_open();
xc_gnttab_open();
xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0);
+ xc_memory_op(0, XENMEM_add_to_physmap, &xatp);
return 0;
}
EOF
@@ -1236,7 +1241,29 @@ EOF
xen_ctrl_version=400
xen=yes
- # Xen 3.3.0, 3.4.0
+ # Xen 3.4.0
+ elif (
+ cat > $TMPC <<EOF
+#include <xenctrl.h>
+#include <xs.h>
+int main(void) {
+ struct xen_add_to_physmap xatp = {
+ .domid = 0, .space = XENMAPSPACE_gmfn, .idx = 0, .gpfn = 0,
+ };
+ xs_daemon_open();
+ xc_interface_open();
+ xc_gnttab_open();
+ xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0);
+ xc_memory_op(0, XENMEM_add_to_physmap, &xatp);
+ return 0;
+}
+EOF
+ compile_prog "" "$xen_libs"
+ ) ; then
+ xen_ctrl_version=340
+ xen=yes
+
+ # Xen 3.3.0
elif (
cat > $TMPC <<EOF
#include <xenctrl.h>
diff --git a/hw/xen_common.h b/hw/xen_common.h
index a1958a0..2c79af6 100644
--- a/hw/xen_common.h
+++ b/hw/xen_common.h
@@ -71,6 +71,20 @@ static inline int xc_domain_populate_physmap_exact
(xc_handle, domid, nr_extents, extent_order, mem_flags, extent_start);
}
+static inline int xc_domain_add_to_physmap(int xc_handle, uint32_t domid,
+ unsigned int space, unsigned long idx,
+ xen_pfn_t gpfn)
+{
+ struct xen_add_to_physmap xatp = {
+ .domid = domid,
+ .space = space,
+ .idx = idx,
+ .gpfn = gpfn,
+ };
+
+ return xc_memory_op(xc_handle, XENMEM_add_to_physmap, &xatp);
+}
+
/* Xen 4.1 */
#else
--
1.7.2.5
^ permalink raw reply related [flat|nested] 3+ messages in thread
* [Qemu-devel] [PATCH V3 2/2] xen: Introduce VGA sync dirty bitmap support
2011-05-24 13:34 [Qemu-devel] [PATCH V3 0/2] Xen VGA dirtybit support anthony.perard
2011-05-24 13:34 ` [Qemu-devel] [PATCH V3 1/2] xen: Add xc_domain_add_to_physmap to xen_interface anthony.perard
@ 2011-05-24 13:34 ` anthony.perard
1 sibling, 0 replies; 3+ messages in thread
From: anthony.perard @ 2011-05-24 13:34 UTC (permalink / raw)
To: QEMU-devel, Stefano Stabellini, Alexander Graf; +Cc: Anthony PERARD, Xen Devel
From: Anthony PERARD <anthony.perard@citrix.com>
This patch introduces phys memory client for Xen.
Only sync dirty_bitmap and set_memory are actually implemented.
migration_log will stay empty for the moment.
Xen can only log one range for bit change, so only the range in the
first call will be synced.
Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
---
trace-events | 1 +
xen-all.c | 267 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 268 insertions(+), 0 deletions(-)
diff --git a/trace-events b/trace-events
index 385cb00..31875c3 100644
--- a/trace-events
+++ b/trace-events
@@ -367,6 +367,7 @@ disable milkymist_vgafb_memory_write(uint32_t addr, uint32_t value) "addr %08x v
# xen-all.c
disable xen_ram_alloc(unsigned long ram_addr, unsigned long size) "requested: %#lx, size %#lx"
+disable xen_client_set_memory(uint64_t start_addr, unsigned long size, unsigned long phys_offset, bool log_dirty) "%#"PRIx64" size %#lx, offset %#lx, log_dirty %i"
# xen-mapcache.c
disable qemu_map_cache(uint64_t phys_addr) "want %#"PRIx64""
diff --git a/xen-all.c b/xen-all.c
index 0eac202..75a82c2 100644
--- a/xen-all.c
+++ b/xen-all.c
@@ -13,6 +13,7 @@
#include "hw/xen_common.h"
#include "hw/xen_backend.h"
+#include "range.h"
#include "xen-mapcache.h"
#include "trace.h"
@@ -54,6 +55,14 @@ static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
#define BUFFER_IO_MAX_DELAY 100
+typedef struct XenPhysmap {
+ target_phys_addr_t start_addr;
+ ram_addr_t size;
+ target_phys_addr_t phys_offset;
+
+ QLIST_ENTRY(XenPhysmap) list;
+} XenPhysmap;
+
typedef struct XenIOState {
shared_iopage_t *shared_page;
buffered_iopage_t *buffered_io_page;
@@ -66,6 +75,9 @@ typedef struct XenIOState {
int send_vcpu;
struct xs_handle *xenstore;
+ CPUPhysMemoryClient client;
+ QLIST_HEAD(, XenPhysmap) physmap;
+ const XenPhysmap *log_for_dirtybit;
Notifier exit;
} XenIOState;
@@ -178,6 +190,256 @@ void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size)
qemu_free(pfn_list);
}
+static XenPhysmap *get_physmapping(XenIOState *state,
+ target_phys_addr_t start_addr, ram_addr_t size)
+{
+ XenPhysmap *physmap = NULL;
+
+ start_addr &= TARGET_PAGE_MASK;
+
+ QLIST_FOREACH(physmap, &state->physmap, list) {
+ if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
+ return physmap;
+ }
+ }
+ return NULL;
+}
+
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340
+static int xen_add_to_physmap(XenIOState *state,
+ target_phys_addr_t start_addr,
+ ram_addr_t size,
+ target_phys_addr_t phys_offset)
+{
+ unsigned long i = 0;
+ int rc = 0;
+ XenPhysmap *physmap = NULL;
+ target_phys_addr_t pfn, start_gpfn;
+
+ if (get_physmapping(state, start_addr, size)) {
+ return 0;
+ }
+ if (size <= 0) {
+ return -1;
+ }
+
+ DPRINTF("mapping vram to %llx - %llx, from %llx\n",
+ start_addr, start_addr + size, phys_offset);
+
+ pfn = phys_offset >> TARGET_PAGE_BITS;
+ start_gpfn = start_addr >> TARGET_PAGE_BITS;
+ for (i = 0; i < size >> TARGET_PAGE_BITS; i++) {
+ unsigned long idx = pfn + i;
+ xen_pfn_t gpfn = start_gpfn + i;
+
+ rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
+ if (rc) {
+ DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
+ PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
+ return -rc;
+ }
+ }
+
+ physmap = qemu_malloc(sizeof (XenPhysmap));
+
+ physmap->start_addr = start_addr;
+ physmap->size = size;
+ physmap->phys_offset = phys_offset;
+
+ QLIST_INSERT_HEAD(&state->physmap, physmap, list);
+
+ xc_domain_pin_memory_cacheattr(xen_xc, xen_domid,
+ start_addr >> TARGET_PAGE_BITS,
+ (start_addr + size) >> TARGET_PAGE_BITS,
+ XEN_DOMCTL_MEM_CACHEATTR_WB);
+ return 0;
+}
+
+static int xen_remove_from_physmap(XenIOState *state,
+ target_phys_addr_t start_addr,
+ ram_addr_t size)
+{
+ unsigned long i = 0;
+ int rc = 0;
+ XenPhysmap *physmap = NULL;
+ target_phys_addr_t phys_offset = 0;
+
+ physmap = get_physmapping(state, start_addr, size);
+ if (physmap == NULL) {
+ return -1;
+ }
+
+ phys_offset = physmap->phys_offset;
+ size = physmap->size;
+
+ DPRINTF("unmapping vram to %llx - %llx, from %llx\n",
+ phys_offset, phys_offset + size, start_addr);
+
+ size >>= TARGET_PAGE_BITS;
+ start_addr >>= TARGET_PAGE_BITS;
+ phys_offset >>= TARGET_PAGE_BITS;
+ for (i = 0; i < size; i++) {
+ unsigned long idx = start_addr + i;
+ xen_pfn_t gpfn = phys_offset + i;
+
+ rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
+ if (rc) {
+ fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
+ PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
+ return -rc;
+ }
+ }
+
+ QLIST_REMOVE(physmap, list);
+ if (state->log_for_dirtybit == physmap) {
+ state->log_for_dirtybit = NULL;
+ }
+ free(physmap);
+
+ return 0;
+}
+
+#else
+static int xen_add_to_physmap(XenIOState *state,
+ target_phys_addr_t start_addr,
+ ram_addr_t size,
+ target_phys_addr_t phys_offset)
+{
+ return -ENOSYS;
+}
+
+static int xen_remove_from_physmap(XenIOState *state,
+ target_phys_addr_t start_addr,
+ ram_addr_t size)
+{
+ return -ENOSYS;
+}
+#endif
+
+static void xen_client_set_memory(struct CPUPhysMemoryClient *client,
+ target_phys_addr_t start_addr,
+ ram_addr_t size,
+ ram_addr_t phys_offset,
+ bool log_dirty)
+{
+ XenIOState *state = container_of(client, XenIOState, client);
+ ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
+ hvmmem_type_t mem_type;
+
+ if (!(start_addr != phys_offset
+ && ( (log_dirty && flags < IO_MEM_UNASSIGNED)
+ || (!log_dirty && flags == IO_MEM_UNASSIGNED)))) {
+ return;
+ }
+
+ trace_xen_client_set_memory(start_addr, size, phys_offset, log_dirty);
+
+ start_addr &= TARGET_PAGE_MASK;
+ size = TARGET_PAGE_ALIGN(size);
+ phys_offset &= TARGET_PAGE_MASK;
+
+ switch (flags) {
+ case IO_MEM_RAM:
+ xen_add_to_physmap(state, start_addr, size, phys_offset);
+ break;
+ case IO_MEM_ROM:
+ mem_type = HVMMEM_ram_ro;
+ if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type,
+ start_addr >> TARGET_PAGE_BITS,
+ size >> TARGET_PAGE_BITS)) {
+ DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n",
+ start_addr);
+ }
+ break;
+ case IO_MEM_UNASSIGNED:
+ if (xen_remove_from_physmap(state, start_addr, size) < 0) {
+ DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
+ }
+ break;
+ }
+}
+
+static int xen_sync_dirty_bitmap(XenIOState *state,
+ target_phys_addr_t start_addr,
+ ram_addr_t size)
+{
+ target_phys_addr_t npages = size >> TARGET_PAGE_BITS;
+ target_phys_addr_t vram_offset = 0;
+ const int width = sizeof(unsigned long) * 8;
+ unsigned long bitmap[(npages + width - 1) / width];
+ int rc, i, j;
+ const XenPhysmap *physmap = NULL;
+
+ physmap = get_physmapping(state, start_addr, size);
+ if (physmap == NULL) {
+ /* not handled */
+ return -1;
+ }
+
+ if (state->log_for_dirtybit == NULL) {
+ state->log_for_dirtybit = physmap;
+ } else if (state->log_for_dirtybit != physmap) {
+ return -1;
+ }
+ vram_offset = physmap->phys_offset;
+
+ rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid,
+ start_addr >> TARGET_PAGE_BITS, npages,
+ bitmap);
+ if (rc) {
+ return rc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(bitmap); i++) {
+ unsigned long map = bitmap[i];
+ while (map != 0) {
+ j = ffsl(map) - 1;
+ map &= ~(1ul << j);
+ cpu_physical_memory_set_dirty(vram_offset + (i * width + j) * TARGET_PAGE_SIZE);
+ };
+ }
+
+ return 0;
+}
+
+static int xen_log_start(CPUPhysMemoryClient *client, target_phys_addr_t phys_addr, ram_addr_t size)
+{
+ XenIOState *state = container_of(client, XenIOState, client);
+
+ return xen_sync_dirty_bitmap(state, phys_addr, size);
+}
+
+static int xen_log_stop(CPUPhysMemoryClient *client, target_phys_addr_t phys_addr, ram_addr_t size)
+{
+ XenIOState *state = container_of(client, XenIOState, client);
+
+ state->log_for_dirtybit = NULL;
+ /* Disable dirty bit tracking */
+ return xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL);
+}
+
+static int xen_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client,
+ target_phys_addr_t start_addr,
+ target_phys_addr_t end_addr)
+{
+ XenIOState *state = container_of(client, XenIOState, client);
+
+ return xen_sync_dirty_bitmap(state, start_addr, end_addr - start_addr);
+}
+
+static int xen_client_migration_log(struct CPUPhysMemoryClient *client,
+ int enable)
+{
+ return 0;
+}
+
+static CPUPhysMemoryClient xen_cpu_phys_memory_client = {
+ .set_memory = xen_client_set_memory,
+ .sync_dirty_bitmap = xen_client_sync_dirty_bitmap,
+ .migration_log = xen_client_migration_log,
+ .log_start = xen_log_start,
+ .log_stop = xen_log_stop,
+};
/* VCPU Operations, MMIO, IO ring ... */
@@ -581,6 +843,11 @@ int xen_hvm_init(void)
qemu_add_vm_change_state_handler(xen_vm_change_state_handler, state);
+ state->client = xen_cpu_phys_memory_client;
+ QLIST_INIT(&state->physmap);
+ cpu_register_phys_memory_client(&state->client);
+ state->log_for_dirtybit = NULL;
+
return 0;
}
--
1.7.2.5
^ permalink raw reply related [flat|nested] 3+ messages in thread