* [PATCH 0 of 4] Xen 4.0 backport of cs 23420-23423
@ 2012-07-13 9:28 Juergen Gross
2012-07-13 9:28 ` [PATCH 1 of 4] xen 4.0: tools: libxc: Detect superpages on domain restore Juergen Gross
` (3 more replies)
0 siblings, 4 replies; 5+ messages in thread
From: Juergen Gross @ 2012-07-13 9:28 UTC (permalink / raw)
To: xen-devel; +Cc: george.dunlap
This is a backport of chagesets 23420-23423 from xen-unstable to support
allocation of superpages for hvm domains on restore.
Original patches by George Dunlap
6 files changed, 201 insertions(+), 80 deletions(-)
tools/libxc/xc_domain_restore.c | 157 ++++++++++++++++++++++++-------
tools/libxc/xc_domain_save.c | 117 ++++++++++++++---------
tools/libxl/libxl_dom.c | 2
tools/python/xen/xend/XendCheckpoint.py | 2
tools/xcutils/xc_restore.c | 2
xen/include/public/domctl.h | 1
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 1 of 4] xen 4.0: tools: libxc: Detect superpages on domain restore
2012-07-13 9:28 [PATCH 0 of 4] Xen 4.0 backport of cs 23420-23423 Juergen Gross
@ 2012-07-13 9:28 ` Juergen Gross
2012-07-13 9:28 ` [PATCH 2 of 4] xen 4.0: tools: Save superpages in the same batch, to make detection easier Juergen Gross
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Juergen Gross @ 2012-07-13 9:28 UTC (permalink / raw)
To: xen-devel; +Cc: george.dunlap
[-- Attachment #1: Type: text/plain, Size: 509 bytes --]
When receiving pages, look for contiguous 2-meg aligned regions and
attempt to allocate a superpage for that region, falling back to
4k pages if the allocation fails.
(Minor conflict fixed up. -iwj)
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
1 file changed, 106 insertions(+), 16 deletions(-)
tools/libxc/xc_domain_restore.c | 122 +++++++++++++++++++++++++++++++++------
[-- Attachment #2: xen-4.0-testing.hg-4.patch --]
[-- Type: text/x-patch, Size: 7774 bytes --]
# HG changeset patch
# User Juergen Gross <juergen.gross@ts.fujitsu.com>
# Date 1342171661 -7200
# Node ID b2d7c4238c2f488d6d19b4b1be5e836872e276bd
# Parent 30c9bcaec782d200113dfaebb97d55a9e73cd869
xen 4.0: tools: libxc: Detect superpages on domain restore
When receiving pages, look for contiguous 2-meg aligned regions and
attempt to allocate a superpage for that region, falling back to
4k pages if the allocation fails.
(Minor conflict fixed up. -iwj)
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
diff -r 30c9bcaec782 -r b2d7c4238c2f tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c Tue Jul 03 13:51:14 2012 +0100
+++ b/tools/libxc/xc_domain_restore.c Fri Jul 13 11:27:41 2012 +0200
@@ -45,6 +45,11 @@ struct restore_ctx {
};
#define HEARTBEAT_MS 1000
+
+#define SUPERPAGE_PFN_SHIFT 9
+#define SUPERPAGE_NR_PFNS (1UL << SUPERPAGE_PFN_SHIFT)
+
+#define SUPER_PAGE_START(pfn) (((pfn) & (SUPERPAGE_NR_PFNS-1)) == 0 )
#ifndef __MINIOS__
static ssize_t read_exact_timed(struct restore_ctx *ctx,
@@ -800,9 +805,11 @@ static int apply_batch(int xc_handle, ui
static int apply_batch(int xc_handle, uint32_t dom, struct restore_ctx *ctx,
xen_pfn_t* region_mfn, unsigned long* pfn_type, int pae_extended_cr3,
unsigned int hvm, struct xc_mmu* mmu,
- pagebuf_t* pagebuf, int curbatch)
+ pagebuf_t* pagebuf, int curbatch, int superpages)
{
int i, j, curpage, nr_mfns;
+ int k, scount;
+ unsigned long superpage_start=INVALID_P2M_ENTRY;
/* used by debug verify code */
unsigned long buf[PAGE_SIZE/sizeof(unsigned long)];
/* Our mapping of the current region (batch) */
@@ -820,8 +827,8 @@ static int apply_batch(int xc_handle, ui
if (j > MAX_BATCH_SIZE)
j = MAX_BATCH_SIZE;
- /* First pass for this batch: work out how much memory to alloc */
- nr_mfns = 0;
+ /* First pass for this batch: work out how much memory to alloc, and detect superpages */
+ nr_mfns = scount = 0;
for ( i = 0; i < j; i++ )
{
unsigned long pfn, pagetype;
@@ -832,19 +839,103 @@ static int apply_batch(int xc_handle, ui
(ctx->p2m[pfn] == INVALID_P2M_ENTRY) )
{
/* Have a live PFN which hasn't had an MFN allocated */
+
+ /* Logic if we're in the middle of detecting a candidate superpage */
+ if ( superpage_start != INVALID_P2M_ENTRY )
+ {
+ /* Is this the next expected continuation? */
+ if ( pfn == superpage_start + scount )
+ {
+ if ( !superpages )
+ {
+ ERROR("Unexpexted codepath with no superpages");
+ return -1;
+ }
+
+ scount++;
+
+ /* If we've found a whole superpage, allocate it and update p2m */
+ if ( scount == SUPERPAGE_NR_PFNS )
+ {
+ unsigned long supermfn;
+
+
+ supermfn=superpage_start;
+ if ( xc_domain_memory_populate_physmap(xc_handle, dom, 1,
+ SUPERPAGE_PFN_SHIFT, 0, &supermfn) != 0 )
+ {
+ DPRINTF("No 2M page available for pfn 0x%lx, fall back to 4K page.\n",
+ superpage_start);
+ /* If we're falling back from a failed allocation, subtract one
+ * from count, since the last page == pfn, which will behandled
+ * anyway. */
+ scount--;
+ goto fallback;
+ }
+
+ DPRINTF("Mapping superpage (%d) pfn %lx, mfn %lx\n", scount, superpage_start, supermfn);
+ for (k=0; k<scount; k++)
+ {
+ /* We just allocated a new mfn above; update p2m */
+ ctx->p2m[superpage_start+k] = supermfn+k;
+ ctx->nr_pfns++;
+ /* region_map[] will be set below */
+ }
+ superpage_start=INVALID_P2M_ENTRY;
+ scount=0;
+ }
+ continue;
+ }
+
+ fallback:
+ DPRINTF("Falling back %d pages pfn %lx\n", scount, superpage_start);
+ for (k=0; k<scount; k++)
+ {
+ ctx->p2m_batch[nr_mfns++] = superpage_start+k;
+ ctx->p2m[superpage_start+k]--;
+ }
+ superpage_start = INVALID_P2M_ENTRY;
+ scount=0;
+ }
+
+ /* Are we ready to start a new superpage candidate? */
+ if ( superpages && SUPER_PAGE_START(pfn) )
+ {
+ superpage_start=pfn;
+ scount++;
+ continue;
+ }
+
+ /* Add the current pfn to pfn_batch */
ctx->p2m_batch[nr_mfns++] = pfn;
ctx->p2m[pfn]--;
}
- }
+ }
+
+ /* Clean up any partial superpage candidates */
+ if ( superpage_start != INVALID_P2M_ENTRY )
+ {
+ DPRINTF("Falling back %d pages pfn %lx\n", scount, superpage_start);
+ for (k=0; k<scount; k++)
+ {
+ ctx->p2m_batch[nr_mfns++] = superpage_start+k;
+ ctx->p2m[superpage_start+k]--;
+ }
+ superpage_start = INVALID_P2M_ENTRY;
+ }
/* Now allocate a bunch of mfns for this batch */
- if ( nr_mfns &&
- (xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0,
- 0, ctx->p2m_batch) != 0) )
- {
- ERROR("Failed to allocate memory for batch.!\n");
- errno = ENOMEM;
- return -1;
+ if ( nr_mfns )
+ {
+ DPRINTF("Mapping order 0, %d; first pfn %lx\n", nr_mfns, ctx->p2m_batch[0]);
+
+ if(xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0,
+ 0, ctx->p2m_batch) != 0)
+ {
+ ERROR("Failed to allocate memory for batch.!\n");
+ errno = ENOMEM;
+ return -1;
+ }
}
/* Second pass for this batch: update p2m[] and region_mfn[] */
@@ -895,7 +986,8 @@ static int apply_batch(int xc_handle, ui
if (pfn_err[i])
{
- ERROR("unexpected PFN mapping failure");
+ ERROR("unexpected PFN mapping failure pfn %lx map_mfn %lx p2m_mfn %lx",
+ pfn, region_mfn[i], ctx->p2m[pfn]);
goto err_mapped;
}
@@ -1058,9 +1150,6 @@ int xc_domain_restore(int xc_handle, int
/* For info only */
ctx->nr_pfns = 0;
- if ( superpages )
- return 1;
-
if ( read_exact(io_fd, &dinfo->p2m_size, sizeof(unsigned long)) )
{
ERROR("read: p2m_size");
@@ -1209,7 +1298,8 @@ int xc_domain_restore(int xc_handle, int
int brc;
brc = apply_batch(xc_handle, dom, ctx, region_mfn, pfn_type,
- pae_extended_cr3, hvm, mmu, &pagebuf, curbatch);
+ pae_extended_cr3, hvm, mmu, &pagebuf, curbatch,
+ superpages);
if ( brc < 0 )
goto out;
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 2 of 4] xen 4.0: tools: Save superpages in the same batch, to make detection easier
2012-07-13 9:28 [PATCH 0 of 4] Xen 4.0 backport of cs 23420-23423 Juergen Gross
2012-07-13 9:28 ` [PATCH 1 of 4] xen 4.0: tools: libxc: Detect superpages on domain restore Juergen Gross
@ 2012-07-13 9:28 ` Juergen Gross
2012-07-13 9:28 ` [PATCH 3 of 4] xen 4.0: tools: Introduce "allocate-only" page type for migration Juergen Gross
2012-07-13 9:28 ` [PATCH 4 of 4] xen 4.0: tools: Enable superpages for HVM domains by default Juergen Gross
3 siblings, 0 replies; 5+ messages in thread
From: Juergen Gross @ 2012-07-13 9:28 UTC (permalink / raw)
To: xen-devel; +Cc: george.dunlap
[-- Attachment #1: Type: text/plain, Size: 415 bytes --]
On the first time through (when pfns are mostly allocated on
the receiving side), try to keep superpages together in the same
batch by ending a batch early if we see the first page of a
potential superpage and there isn't enough room in the batch
for a full superpage.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
1 file changed, 12 insertions(+)
tools/libxc/xc_domain_save.c | 12 ++++++++++++
[-- Attachment #2: xen-4.0-testing.hg-4.patch --]
[-- Type: text/x-patch, Size: 2157 bytes --]
# HG changeset patch
# User Juergen Gross <juergen.gross@ts.fujitsu.com>
# Date 1342171663 -7200
# Node ID 8b1e10c2494fd31f2ca53926cca2993dff045125
# Parent b2d7c4238c2f488d6d19b4b1be5e836872e276bd
xen 4.0: tools: Save superpages in the same batch, to make detection easier
On the first time through (when pfns are mostly allocated on
the receiving side), try to keep superpages together in the same
batch by ending a batch early if we see the first page of a
potential superpage and there isn't enough room in the batch
for a full superpage.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
diff -r b2d7c4238c2f -r 8b1e10c2494f tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c Fri Jul 13 11:27:41 2012 +0200
+++ b/tools/libxc/xc_domain_save.c Fri Jul 13 11:27:43 2012 +0200
@@ -67,6 +67,11 @@ struct outbuf {
(((_mfn) < (ctx->max_mfn)) && \
((mfn_to_pfn(_mfn) < (dinfo->p2m_size)) && \
(pfn_to_mfn(mfn_to_pfn(_mfn)) == (_mfn))))
+
+#define SUPERPAGE_PFN_SHIFT 9
+#define SUPERPAGE_NR_PFNS (1UL << SUPERPAGE_PFN_SHIFT)
+
+#define SUPER_PAGE_START(pfn) (((pfn) & (SUPERPAGE_NR_PFNS-1)) == 0 )
/*
** During (live) save/migrate, we maintain a number of bitmaps to track
@@ -866,6 +871,7 @@ int xc_domain_save(int xc_handle, int io
int rc = 1, frc, i, j, last_iter = 0, iter = 0;
int live = (flags & XCFLAGS_LIVE);
int debug = (flags & XCFLAGS_DEBUG);
+ int superpages = !!hvm;
int race = 0, sent_last_iter, skip_this_iter;
int tmem_saved = 0;
@@ -1228,6 +1234,12 @@ int xc_domain_save(int xc_handle, int io
(test_bit(n, to_fix) && last_iter)) )
continue;
+ /* First time through, try to keep superpages in the same batch */
+ if ( superpages && iter == 1
+ && SUPER_PAGE_START(n)
+ && batch + SUPERPAGE_NR_PFNS > MAX_BATCH_SIZE )
+ break;
+
/*
** we get here if:
** 1. page is marked to_send & hasn't already been re-dirtied
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 3 of 4] xen 4.0: tools: Introduce "allocate-only" page type for migration
2012-07-13 9:28 [PATCH 0 of 4] Xen 4.0 backport of cs 23420-23423 Juergen Gross
2012-07-13 9:28 ` [PATCH 1 of 4] xen 4.0: tools: libxc: Detect superpages on domain restore Juergen Gross
2012-07-13 9:28 ` [PATCH 2 of 4] xen 4.0: tools: Save superpages in the same batch, to make detection easier Juergen Gross
@ 2012-07-13 9:28 ` Juergen Gross
2012-07-13 9:28 ` [PATCH 4 of 4] xen 4.0: tools: Enable superpages for HVM domains by default Juergen Gross
3 siblings, 0 replies; 5+ messages in thread
From: Juergen Gross @ 2012-07-13 9:28 UTC (permalink / raw)
To: xen-devel; +Cc: george.dunlap
[-- Attachment #1: Type: text/plain, Size: 908 bytes --]
To detect presence of superpages on the receiver side, we need
to have strings of sequential pfns sent across on the first iteration
through the memory. However, as we go through the memory, more and
more of it will be marked dirty, making it wasteful to send those pages.
This patch introduces a new PFINFO type, "XALLOC". Like PFINFO_XTAB, it
indicates that there is no corresponding page present in the subsquent
page buffer. However, unlike PFINFO_XTAB, it contains a pfn which should be
allocated.
This new type is only used for migration; but it's placed in
xen/public/domctl.h so that the value isn't reused.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
3 files changed, 79 insertions(+), 62 deletions(-)
tools/libxc/xc_domain_restore.c | 35 +++++++------
tools/libxc/xc_domain_save.c | 105 +++++++++++++++++++++------------------
xen/include/public/domctl.h | 1
[-- Attachment #2: xen-4.0-testing.hg-4.patch --]
[-- Type: text/x-patch, Size: 10822 bytes --]
# HG changeset patch
# User Juergen Gross <juergen.gross@ts.fujitsu.com>
# Date 1342171664 -7200
# Node ID 7bf9266bc106acaedd3c2cf70f7040e48be96bbb
# Parent 8b1e10c2494fd31f2ca53926cca2993dff045125
xen 4.0: tools: Introduce "allocate-only" page type for migration
To detect presence of superpages on the receiver side, we need
to have strings of sequential pfns sent across on the first iteration
through the memory. However, as we go through the memory, more and
more of it will be marked dirty, making it wasteful to send those pages.
This patch introduces a new PFINFO type, "XALLOC". Like PFINFO_XTAB, it
indicates that there is no corresponding page present in the subsquent
page buffer. However, unlike PFINFO_XTAB, it contains a pfn which should be
allocated.
This new type is only used for migration; but it's placed in
xen/public/domctl.h so that the value isn't reused.
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
diff -r 8b1e10c2494f -r 7bf9266bc106 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c Fri Jul 13 11:27:43 2012 +0200
+++ b/tools/libxc/xc_domain_restore.c Fri Jul 13 11:27:44 2012 +0200
@@ -757,7 +757,8 @@ static int pagebuf_get_one(struct restor
countpages = count;
for (i = oldcount; i < buf->nr_pages; ++i)
- if ((buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) == XEN_DOMCTL_PFINFO_XTAB)
+ if ((buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) == XEN_DOMCTL_PFINFO_XTAB
+ ||(buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) == XEN_DOMCTL_PFINFO_XALLOC)
--countpages;
if (!countpages)
@@ -835,6 +836,7 @@ static int apply_batch(int xc_handle, ui
pfn = pagebuf->pfn_types[i + curbatch] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
pagetype = pagebuf->pfn_types[i + curbatch] & XEN_DOMCTL_PFINFO_LTAB_MASK;
+ /* For allocation purposes, treat XEN_DOMCTL_PFINFO_XALLOC as a normal page */
if ( (pagetype != XEN_DOMCTL_PFINFO_XTAB) &&
(ctx->p2m[pfn] == INVALID_P2M_ENTRY) )
{
@@ -946,21 +948,21 @@ static int apply_batch(int xc_handle, ui
pfn = pagebuf->pfn_types[i + curbatch] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
pagetype = pagebuf->pfn_types[i + curbatch] & XEN_DOMCTL_PFINFO_LTAB_MASK;
- if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
+ if ( pagetype != XEN_DOMCTL_PFINFO_XTAB
+ && ctx->p2m[pfn] == (INVALID_P2M_ENTRY-1) )
+ {
+ /* We just allocated a new mfn above; update p2m */
+ ctx->p2m[pfn] = ctx->p2m_batch[nr_mfns++];
+ ctx->nr_pfns++;
+ }
+
+ /* setup region_mfn[] for batch map, if necessary.
+ * For HVM guests, this interface takes PFNs, not MFNs */
+ if ( pagetype == XEN_DOMCTL_PFINFO_XTAB
+ || pagetype == XEN_DOMCTL_PFINFO_XALLOC )
region_mfn[i] = ~0UL; /* map will fail but we don't care */
- else
- {
- if ( ctx->p2m[pfn] == (INVALID_P2M_ENTRY-1) )
- {
- /* We just allocated a new mfn above; update p2m */
- ctx->p2m[pfn] = ctx->p2m_batch[nr_mfns++];
- ctx->nr_pfns++;
- }
-
- /* setup region_mfn[] for batch map.
- * For HVM guests, this interface takes PFNs, not MFNs */
+ else
region_mfn[i] = hvm ? pfn : ctx->p2m[pfn];
- }
}
/* Map relevant mfns */
@@ -980,8 +982,9 @@ static int apply_batch(int xc_handle, ui
pfn = pagebuf->pfn_types[i + curbatch] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
pagetype = pagebuf->pfn_types[i + curbatch] & XEN_DOMCTL_PFINFO_LTAB_MASK;
- if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
- /* a bogus/unmapped page: skip it */
+ if ( pagetype == XEN_DOMCTL_PFINFO_XTAB
+ || pagetype == XEN_DOMCTL_PFINFO_XALLOC)
+ /* a bogus/unmapped/allocate-only page: skip it */
continue;
if (pfn_err[i])
diff -r 8b1e10c2494f -r 7bf9266bc106 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c Fri Jul 13 11:27:43 2012 +0200
+++ b/tools/libxc/xc_domain_save.c Fri Jul 13 11:27:44 2012 +0200
@@ -1224,13 +1224,15 @@ int xc_domain_save(int xc_handle, int io
}
else
{
- if ( !last_iter &&
+ int dont_skip = (last_iter || (superpages && iter==1));
+
+ if ( !dont_skip &&
test_bit(n, to_send) &&
test_bit(n, to_skip) )
skip_this_iter++; /* stats keeping */
if ( !((test_bit(n, to_send) && !test_bit(n, to_skip)) ||
- (test_bit(n, to_send) && last_iter) ||
+ (test_bit(n, to_send) && dont_skip) ||
(test_bit(n, to_fix) && last_iter)) )
continue;
@@ -1243,7 +1245,7 @@ int xc_domain_save(int xc_handle, int io
/*
** we get here if:
** 1. page is marked to_send & hasn't already been re-dirtied
- ** 2. (ignore to_skip in last iteration)
+ ** 2. (ignore to_skip in first and last iterations)
** 3. add in pages that still need fixup (net bufs)
*/
@@ -1267,7 +1269,7 @@ int xc_domain_save(int xc_handle, int io
set_bit(n, to_fix);
continue;
}
-
+
if ( last_iter &&
test_bit(n, to_fix) &&
!test_bit(n, to_send) )
@@ -1294,56 +1296,66 @@ int xc_domain_save(int xc_handle, int io
goto out;
}
- if ( hvm )
+ /* Get page types */
+ if ( xc_get_pfn_type_batch(xc_handle, dom, batch, pfn_type) )
{
- /* Look for and skip completely empty batches. */
- for ( j = 0; j < batch; j++ )
+ PERROR("get_pfn_type_batch failed");
+ goto out;
+ }
+
+ for ( run = j = 0; j < batch; j++ )
+ {
+ unsigned long gmfn = pfn_batch[j];
+
+ if ( !hvm )
+ gmfn = pfn_to_mfn(gmfn);
+
+ if ( pfn_err[j] )
{
- if ( !pfn_err[j] )
- break;
- pfn_type[j] |= XEN_DOMCTL_PFINFO_XTAB;
- }
- if ( j == batch )
- {
- munmap(region_base, batch*PAGE_SIZE);
- continue; /* bail on this batch: no valid pages */
- }
- for ( ; j < batch; j++ )
- if ( pfn_err[j] )
- pfn_type[j] |= XEN_DOMCTL_PFINFO_XTAB;
- }
- else
- {
- /* Get page types */
- if ( xc_get_pfn_type_batch(xc_handle, dom, batch, pfn_type) )
- {
- ERROR("get_pfn_type_batch failed");
- goto out;
+ if ( pfn_type[j] == XEN_DOMCTL_PFINFO_XTAB )
+ continue;
+
+ DPRINTF("map fail: page %i mfn %08lx err %d\n",
+ j, gmfn, pfn_err[j]);
+ pfn_type[j] = XEN_DOMCTL_PFINFO_XTAB;
+ continue;
}
- for ( j = 0; j < batch; j++ )
+ if ( pfn_type[j] == XEN_DOMCTL_PFINFO_XTAB )
{
- unsigned long mfn = pfn_to_mfn(pfn_batch[j]);
-
- if ( pfn_type[j] == XEN_DOMCTL_PFINFO_XTAB )
- {
- DPRINTF("type fail: page %i mfn %08lx\n",
- j, mfn);
- continue;
- }
-
- if ( debug )
+ DPRINTF("type fail: page %i mfn %08lx\n", j, gmfn);
+ continue;
+ }
+
+ if ( superpages && iter==1 && test_bit(gmfn, to_skip))
+ pfn_type[j] = XEN_DOMCTL_PFINFO_XALLOC;
+
+ /* canonicalise mfn->pfn */
+ pfn_type[j] |= pfn_batch[j];
+ ++run;
+
+ if ( debug )
+ {
+ if ( hvm )
+ DPRINTF("%d pfn=%08lx sum=%08lx\n",
+ iter,
+ pfn_type[j],
+ csum_page(region_base + (PAGE_SIZE*j)));
+ else
DPRINTF("%d pfn= %08lx mfn= %08lx [mfn]= %08lx"
" sum= %08lx\n",
iter,
- pfn_type[j] | pfn_batch[j],
- mfn,
- mfn_to_pfn(mfn),
+ pfn_type[j],
+ gmfn,
+ mfn_to_pfn(gmfn),
csum_page(region_base + (PAGE_SIZE*j)));
-
- /* canonicalise mfn->pfn */
- pfn_type[j] |= pfn_batch[j];
}
+ }
+
+ if ( !run )
+ {
+ munmap(region_base, batch*PAGE_SIZE);
+ continue; /* bail on this batch: no valid pages */
}
if ( write_exact(io_fd, &batch, sizeof(unsigned int)) )
@@ -1392,8 +1404,9 @@ int xc_domain_save(int xc_handle, int io
}
}
- /* skip pages that aren't present */
- if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
+ /* skip pages that aren't present or are alloc-only */
+ if ( pagetype == XEN_DOMCTL_PFINFO_XTAB
+ || pagetype == XEN_DOMCTL_PFINFO_XALLOC )
continue;
pagetype &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;
diff -r 8b1e10c2494f -r 7bf9266bc106 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h Fri Jul 13 11:27:43 2012 +0200
+++ b/xen/include/public/domctl.h Fri Jul 13 11:27:44 2012 +0200
@@ -137,6 +137,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getme
#define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28)
#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
#define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */
+#define XEN_DOMCTL_PFINFO_XALLOC (0xeU<<28) /* allocate-only page */
#define XEN_DOMCTL_PFINFO_PAGEDTAB (0x8U<<28)
#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH 4 of 4] xen 4.0: tools: Enable superpages for HVM domains by default
2012-07-13 9:28 [PATCH 0 of 4] Xen 4.0 backport of cs 23420-23423 Juergen Gross
` (2 preceding siblings ...)
2012-07-13 9:28 ` [PATCH 3 of 4] xen 4.0: tools: Introduce "allocate-only" page type for migration Juergen Gross
@ 2012-07-13 9:28 ` Juergen Gross
3 siblings, 0 replies; 5+ messages in thread
From: Juergen Gross @ 2012-07-13 9:28 UTC (permalink / raw)
To: xen-devel; +Cc: george.dunlap
[-- Attachment #1: Type: text/plain, Size: 322 bytes --]
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
3 files changed, 4 insertions(+), 2 deletions(-)
tools/libxl/libxl_dom.c | 2 +-
tools/python/xen/xend/XendCheckpoint.py | 2 ++
tools/xcutils/xc_restore.c | 2 +-
[-- Attachment #2: xen-4.0-testing.hg-4.patch --]
[-- Type: text/x-patch, Size: 2042 bytes --]
# HG changeset patch
# User Juergen Gross <juergen.gross@ts.fujitsu.com>
# Date 1342171665 -7200
# Node ID 1d7a98073b7dc12542cf2e31e93462ad735b306d
# Parent 7bf9266bc106acaedd3c2cf70f7040e48be96bbb
xen 4.0: tools: Enable superpages for HVM domains by default
Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>
diff -r 7bf9266bc106 -r 1d7a98073b7d tools/libxl/libxl_dom.c
--- a/tools/libxl/libxl_dom.c Fri Jul 13 11:27:44 2012 +0200
+++ b/tools/libxl/libxl_dom.c Fri Jul 13 11:27:45 2012 +0200
@@ -192,7 +192,7 @@ int restore_common(struct libxl_ctx *ctx
return xc_domain_restore(ctx->xch, fd, domid,
state->store_port, &state->store_mfn,
state->console_port, &state->console_mfn,
- info->hvm, info->u.hvm.pae, 0);
+ info->hvm, info->u.hvm.pae, !!info->hvm);
}
struct suspendinfo {
diff -r 7bf9266bc106 -r 1d7a98073b7d tools/python/xen/xend/XendCheckpoint.py
--- a/tools/python/xen/xend/XendCheckpoint.py Fri Jul 13 11:27:44 2012 +0200
+++ b/tools/python/xen/xend/XendCheckpoint.py Fri Jul 13 11:27:45 2012 +0200
@@ -298,6 +298,8 @@ def restore(xd, fd, dominfo = None, paus
dominfo.info['shadow_memory'] = shadow_cur
superpages = restore_image.superpages
+ if is_hvm:
+ superpages = 1
cmd = map(str, [xen.util.auxbin.pathTo(XC_RESTORE),
fd, dominfo.getDomid(),
diff -r 7bf9266bc106 -r 1d7a98073b7d tools/xcutils/xc_restore.c
--- a/tools/xcutils/xc_restore.c Fri Jul 13 11:27:44 2012 +0200
+++ b/tools/xcutils/xc_restore.c Fri Jul 13 11:27:45 2012 +0200
@@ -42,7 +42,7 @@ main(int argc, char **argv)
if ( argc == 9 )
superpages = atoi(argv[8]);
else
- superpages = 0;
+ superpages = !!hvm;
ret = xc_domain_restore(xc_fd, io_fd, domid, store_evtchn, &store_mfn,
console_evtchn, &console_mfn, hvm, pae, superpages);
[-- Attachment #3: Type: text/plain, Size: 126 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2012-07-13 9:28 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-07-13 9:28 [PATCH 0 of 4] Xen 4.0 backport of cs 23420-23423 Juergen Gross
2012-07-13 9:28 ` [PATCH 1 of 4] xen 4.0: tools: libxc: Detect superpages on domain restore Juergen Gross
2012-07-13 9:28 ` [PATCH 2 of 4] xen 4.0: tools: Save superpages in the same batch, to make detection easier Juergen Gross
2012-07-13 9:28 ` [PATCH 3 of 4] xen 4.0: tools: Introduce "allocate-only" page type for migration Juergen Gross
2012-07-13 9:28 ` [PATCH 4 of 4] xen 4.0: tools: Enable superpages for HVM domains by default Juergen Gross
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).