From mboxrd@z Thu Jan 1 00:00:00 1970 Received: from eggs.gnu.org ([2001:4830:134:3::10]:36257) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Ve2nu-0002WZ-CQ for qemu-devel@nongnu.org; Wed, 06 Nov 2013 08:05:56 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1Ve2nn-000532-Nt for qemu-devel@nongnu.org; Wed, 06 Nov 2013 08:05:50 -0500 Received: from mx1.redhat.com ([209.132.183.28]:14127) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1Ve2nn-00052l-C5 for qemu-devel@nongnu.org; Wed, 06 Nov 2013 08:05:43 -0500 From: Juan Quintela Date: Wed, 6 Nov 2013 14:04:48 +0100 Message-Id: <1383743088-8139-40-git-send-email-quintela@redhat.com> In-Reply-To: <1383743088-8139-1-git-send-email-quintela@redhat.com> References: <1383743088-8139-1-git-send-email-quintela@redhat.com> Subject: [Qemu-devel] [PATCH 39/39] migration: synchronize memory bitmap 64bits at a time List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , To: qemu-devel@nongnu.org Cc: chegu_vinod@hp.com We use the old code if the bitmaps are not aligned Signed-off-by: Juan Quintela --- arch_init.c | 43 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/arch_init.c b/arch_init.c index fe88d64..0909531 100644 --- a/arch_init.c +++ b/arch_init.c @@ -50,6 +50,7 @@ #include "exec/cpu-all.h" #include "exec/memory-physical.h" #include "hw/acpi/acpi.h" +#include "qemu/host-utils.h" #ifdef DEBUG_ARCH_INIT #define DPRINTF(fmt, ...) \ @@ -376,15 +377,37 @@ static inline bool migration_bitmap_set_dirty(ram_addr_t addr) static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length) { ram_addr_t addr; - - for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { - if (cpu_physical_memory_get_dirty(start + addr, - TARGET_PAGE_SIZE, - DIRTY_MEMORY_MIGRATION)) { - cpu_physical_memory_reset_dirty(start + addr, - TARGET_PAGE_SIZE, - DIRTY_MEMORY_MIGRATION); - migration_bitmap_set_dirty(start + addr); + unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); + + /* start address is aligned at the start of a word? */ + if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) { + int k; + int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); + unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION]; + + printf("XXXX:optimized start %lx page %lx length %lu\n", start, page, length); + + for (k = page; k < page + nr; k++) { + if (src[k]) { + unsigned long new_dirty; + new_dirty = ~migration_bitmap[k]; + migration_bitmap[k] |= src[k]; + new_dirty &= src[k]; + migration_dirty_pages += ctpopl(new_dirty); + src[k] = 0; + } + } + } else { + printf("XXXX:not optimized start %lx length %lu\n", start, length); + for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { + if (cpu_physical_memory_get_dirty(start + addr, + TARGET_PAGE_SIZE, + DIRTY_MEMORY_MIGRATION)) { + cpu_physical_memory_reset_dirty(start + addr, + TARGET_PAGE_SIZE, + DIRTY_MEMORY_MIGRATION); + migration_bitmap_set_dirty(start + addr); + } } } } @@ -415,6 +438,8 @@ static void migration_bitmap_sync(void) address_space_sync_dirty_bitmap(&address_space_memory); QTAILQ_FOREACH(block, &ram_list.blocks, next) { + printf("XXXX: name %s addr %lx length %lu\n", + block->idstr, block->mr->ram_addr, block->length); migration_bitmap_sync_range(block->mr->ram_addr, block->length); } trace_migration_bitmap_sync_end(migration_dirty_pages -- 1.8.3.1