xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 3 of 4] tools: Introduce "allocate-only" page type for migration
  2011-05-16 10:51 [PATCH 0 of 4] RESEND Use superpages on restore/migrate George Dunlap
@ 2011-05-16 10:51 ` George Dunlap
  0 siblings, 0 replies; 6+ messages in thread
From: George Dunlap @ 2011-05-16 10:51 UTC (permalink / raw)
  To: xen-devel; +Cc: george.dunlap

To detect presence of superpages on the receiver side, we need
to have strings of sequential pfns sent across on the first iteration
through the memory.  However, as we go through the memory, more and
more of it will be marked dirty, making it wasteful to send those pages.

This patch introduces a new PFINFO type, "XALLOC".  Like PFINFO_XTAB, it
indicates that there is no corresponding page present in the subsquent
page buffer.  However, unlike PFINFO_XTAB, it contains a pfn which should be
allocated.

This new type is only used for migration; but it's placed in
xen/public/domctl.h so that the value isn't reused.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>

diff -r a629b41a8d1f -r 2ee1b330f2d3 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c	Mon May 16 11:50:46 2011 +0100
+++ b/tools/libxc/xc_domain_restore.c	Mon May 16 11:50:46 2011 +0100
@@ -839,7 +839,8 @@ static int pagebuf_get_one(xc_interface 
 
     countpages = count;
     for (i = oldcount; i < buf->nr_pages; ++i)
-        if ((buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) == XEN_DOMCTL_PFINFO_XTAB)
+        if ((buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) == XEN_DOMCTL_PFINFO_XTAB
+            ||(buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) == XEN_DOMCTL_PFINFO_XALLOC)
             --countpages;
 
     if (!countpages)
@@ -917,6 +918,7 @@ static int apply_batch(xc_interface *xch
         pfn      = pagebuf->pfn_types[i + curbatch] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
         pagetype = pagebuf->pfn_types[i + curbatch] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
 
+        /* For allocation purposes, treat XEN_DOMCTL_PFINFO_XALLOC as a normal page */
         if ( (pagetype != XEN_DOMCTL_PFINFO_XTAB) && 
              (ctx->p2m[pfn] == INVALID_P2M_ENTRY) )
         {
@@ -1028,21 +1030,21 @@ static int apply_batch(xc_interface *xch
         pfn      = pagebuf->pfn_types[i + curbatch] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
         pagetype = pagebuf->pfn_types[i + curbatch] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
 
-        if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
+        if ( pagetype != XEN_DOMCTL_PFINFO_XTAB
+             && ctx->p2m[pfn] == (INVALID_P2M_ENTRY-1) )
+        {
+            /* We just allocated a new mfn above; update p2m */
+            ctx->p2m[pfn] = ctx->p2m_batch[nr_mfns++]; 
+            ctx->nr_pfns++; 
+        }
+
+        /* setup region_mfn[] for batch map, if necessary.
+         * For HVM guests, this interface takes PFNs, not MFNs */
+        if ( pagetype == XEN_DOMCTL_PFINFO_XTAB
+             || pagetype == XEN_DOMCTL_PFINFO_XALLOC )
             region_mfn[i] = ~0UL; /* map will fail but we don't care */
-        else 
-        {
-            if ( ctx->p2m[pfn] == (INVALID_P2M_ENTRY-1) )
-            {
-                /* We just allocated a new mfn above; update p2m */
-                ctx->p2m[pfn] = ctx->p2m_batch[nr_mfns++]; 
-                ctx->nr_pfns++; 
-            }
-
-            /* setup region_mfn[] for batch map.
-             * For HVM guests, this interface takes PFNs, not MFNs */
+        else
             region_mfn[i] = hvm ? pfn : ctx->p2m[pfn]; 
-        }
     }
 
     /* Map relevant mfns */
@@ -1062,8 +1064,9 @@ static int apply_batch(xc_interface *xch
         pfn      = pagebuf->pfn_types[i + curbatch] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
         pagetype = pagebuf->pfn_types[i + curbatch] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
 
-        if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
-            /* a bogus/unmapped page: skip it */
+        if ( pagetype == XEN_DOMCTL_PFINFO_XTAB 
+             || pagetype == XEN_DOMCTL_PFINFO_XALLOC)
+            /* a bogus/unmapped/allocate-only page: skip it */
             continue;
 
         if (pfn_err[i])
diff -r a629b41a8d1f -r 2ee1b330f2d3 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c	Mon May 16 11:50:46 2011 +0100
+++ b/tools/libxc/xc_domain_save.c	Mon May 16 11:50:46 2011 +0100
@@ -1258,13 +1258,15 @@ int xc_domain_save(xc_interface *xch, in
                 }
                 else
                 {
-                    if ( !last_iter &&
+                    int dont_skip = (last_iter || (superpages && iter==1));
+
+                    if ( !dont_skip &&
                          test_bit(n, to_send) &&
                          test_bit(n, to_skip) )
                         skip_this_iter++; /* stats keeping */
 
                     if ( !((test_bit(n, to_send) && !test_bit(n, to_skip)) ||
-                           (test_bit(n, to_send) && last_iter) ||
+                           (test_bit(n, to_send) && dont_skip) ||
                            (test_bit(n, to_fix)  && last_iter)) )
                         continue;
 
@@ -1277,7 +1279,7 @@ int xc_domain_save(xc_interface *xch, in
                     /*
                     ** we get here if:
                     **  1. page is marked to_send & hasn't already been re-dirtied
-                    **  2. (ignore to_skip in last iteration)
+                    **  2. (ignore to_skip in first and last iterations)
                     **  3. add in pages that still need fixup (net bufs)
                     */
 
@@ -1301,7 +1303,7 @@ int xc_domain_save(xc_interface *xch, in
                         set_bit(n, to_fix);
                         continue;
                     }
-
+                    
                     if ( last_iter &&
                          test_bit(n, to_fix) &&
                          !test_bit(n, to_send) )
@@ -1346,6 +1348,7 @@ int xc_domain_save(xc_interface *xch, in
                 {
                     if ( pfn_type[j] == XEN_DOMCTL_PFINFO_XTAB )
                         continue;
+
                     DPRINTF("map fail: page %i mfn %08lx err %d\n",
                             j, gmfn, pfn_err[j]);
                     pfn_type[j] = XEN_DOMCTL_PFINFO_XTAB;
@@ -1358,6 +1361,9 @@ int xc_domain_save(xc_interface *xch, in
                     continue;
                 }
 
+                if ( superpages && iter==1 && test_bit(gmfn, to_skip))
+                    pfn_type[j] = XEN_DOMCTL_PFINFO_XALLOC;
+
                 /* canonicalise mfn->pfn */
                 pfn_type[j] |= pfn_batch[j];
                 ++run;
@@ -1432,8 +1438,9 @@ int xc_domain_save(xc_interface *xch, in
                     }
                 }
 
-                /* skip pages that aren't present */
-                if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
+                /* skip pages that aren't present or are alloc-only */
+                if ( pagetype == XEN_DOMCTL_PFINFO_XTAB
+                    || pagetype == XEN_DOMCTL_PFINFO_XALLOC )
                     continue;
 
                 pagetype &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;
diff -r a629b41a8d1f -r 2ee1b330f2d3 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h	Mon May 16 11:50:46 2011 +0100
+++ b/xen/include/public/domctl.h	Mon May 16 11:50:46 2011 +0100
@@ -133,6 +133,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getme
 #define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28)
 #define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
 #define XEN_DOMCTL_PFINFO_XTAB    (0xfU<<28) /* invalid page */
+#define XEN_DOMCTL_PFINFO_XALLOC  (0xeU<<28) /* allocate-only page */
 #define XEN_DOMCTL_PFINFO_PAGEDTAB (0x8U<<28)
 #define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 0 of 4] Xen 4.1 backport of cs 23420-23423
@ 2012-07-13  9:21 Juergen Gross
  2012-07-13  9:21 ` [PATCH 1 of 4] tools: libxc: Detect superpages on domain restore Juergen Gross
                   ` (3 more replies)
  0 siblings, 4 replies; 6+ messages in thread
From: Juergen Gross @ 2012-07-13  9:21 UTC (permalink / raw)
  To: xen-devel; +Cc: george.dunlap

This is a backport of chagesets 23420-23423 from xen-unstable to support
allocation of superpages for hvm domains on restore.

Original patches by George Dunlap

6 files changed, 155 insertions(+), 40 deletions(-)
tools/libxc/xc_domain_restore.c         |  157 ++++++++++++++++++++++++-------
tools/libxc/xc_domain_save.c            |   31 ++++--
tools/libxl/libxl_dom.c                 |    2 
tools/python/xen/xend/XendCheckpoint.py |    2 
tools/xcutils/xc_restore.c              |    2 
xen/include/public/domctl.h             |    1 

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1 of 4] tools: libxc: Detect superpages on domain restore
  2012-07-13  9:21 [PATCH 0 of 4] Xen 4.1 backport of cs 23420-23423 Juergen Gross
@ 2012-07-13  9:21 ` Juergen Gross
  2012-07-13  9:21 ` [PATCH 2 of 4] tools: Save superpages in the same batch, to make detection easier Juergen Gross
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 6+ messages in thread
From: Juergen Gross @ 2012-07-13  9:21 UTC (permalink / raw)
  To: xen-devel; +Cc: george.dunlap

[-- Attachment #1: Type: text/plain, Size: 449 bytes --]

When receiving pages, look for contiguous 2-meg aligned regions and
attempt to allocate a superpage for that region, falling back to
4k pages if the allocation fails.

(Minor conflict fixed up. -iwj)

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>


1 file changed, 106 insertions(+), 16 deletions(-)
tools/libxc/xc_domain_restore.c |  122 +++++++++++++++++++++++++++++++++------



[-- Attachment #2: xen-4.1-testing.hg-4.patch --]
[-- Type: text/x-patch, Size: 7581 bytes --]

# HG changeset patch
# User Juergen Gross <juergen.gross@ts.fujitsu.com>
# Date 1342171237 -7200
# Node ID 83fd911db89ee2d57db1c22537188d6b5691f807
# Parent  3ce155e77f39d0c3cc787c1cc3d6bab1ef45a1dc
tools: libxc: Detect superpages on domain restore

When receiving pages, look for contiguous 2-meg aligned regions and
attempt to allocate a superpage for that region, falling back to
4k pages if the allocation fails.

(Minor conflict fixed up. -iwj)

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>

diff -r 3ce155e77f39 -r 83fd911db89e tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c	Mon Jul 09 10:30:44 2012 +0100
+++ b/tools/libxc/xc_domain_restore.c	Fri Jul 13 11:20:37 2012 +0200
@@ -47,6 +47,11 @@ struct restore_ctx {
 };
 
 #define HEARTBEAT_MS 1000
+
+#define SUPERPAGE_PFN_SHIFT  9
+#define SUPERPAGE_NR_PFNS    (1UL << SUPERPAGE_PFN_SHIFT)
+
+#define SUPER_PAGE_START(pfn)    (((pfn) & (SUPERPAGE_NR_PFNS-1)) == 0 )
 
 #ifndef __MINIOS__
 static ssize_t rdexact(xc_interface *xch, struct restore_ctx *ctx,
@@ -893,9 +898,11 @@ static int apply_batch(xc_interface *xch
 static int apply_batch(xc_interface *xch, uint32_t dom, struct restore_ctx *ctx,
                        xen_pfn_t* region_mfn, unsigned long* pfn_type, int pae_extended_cr3,
                        unsigned int hvm, struct xc_mmu* mmu,
-                       pagebuf_t* pagebuf, int curbatch)
+                       pagebuf_t* pagebuf, int curbatch, int superpages)
 {
     int i, j, curpage, nr_mfns;
+    int k, scount;
+    unsigned long superpage_start=INVALID_P2M_ENTRY;
     /* used by debug verify code */
     unsigned long buf[PAGE_SIZE/sizeof(unsigned long)];
     /* Our mapping of the current region (batch) */
@@ -913,8 +920,8 @@ static int apply_batch(xc_interface *xch
     if (j > MAX_BATCH_SIZE)
         j = MAX_BATCH_SIZE;
 
-    /* First pass for this batch: work out how much memory to alloc */
-    nr_mfns = 0; 
+    /* First pass for this batch: work out how much memory to alloc, and detect superpages */
+    nr_mfns = scount = 0;
     for ( i = 0; i < j; i++ )
     {
         unsigned long pfn, pagetype;
@@ -925,19 +932,103 @@ static int apply_batch(xc_interface *xch
              (ctx->p2m[pfn] == INVALID_P2M_ENTRY) )
         {
             /* Have a live PFN which hasn't had an MFN allocated */
+
+            /* Logic if we're in the middle of detecting a candidate superpage */
+            if ( superpage_start != INVALID_P2M_ENTRY )
+            {
+                /* Is this the next expected continuation? */
+                if ( pfn == superpage_start + scount )
+                {
+                    if ( !superpages )
+                    {
+                        ERROR("Unexpexted codepath with no superpages");
+                        return -1;
+                    }
+
+                    scount++;
+
+                    /* If we've found a whole superpage, allocate it and update p2m */
+                    if ( scount  == SUPERPAGE_NR_PFNS )
+                    {
+                        unsigned long supermfn;
+
+
+                        supermfn=superpage_start;
+                        if ( xc_domain_populate_physmap_exact(xch, dom, 1,
+                                         SUPERPAGE_PFN_SHIFT, 0, &supermfn) != 0 )
+                        {
+                            DPRINTF("No 2M page available for pfn 0x%lx, fall back to 4K page.\n",
+                                    superpage_start);
+                            /* If we're falling back from a failed allocation, subtract one
+                             * from count, since the last page == pfn, which will behandled
+                             * anyway. */
+                            scount--;
+                            goto fallback;
+                        }
+
+                        DPRINTF("Mapping superpage (%d) pfn %lx, mfn %lx\n", scount, superpage_start, supermfn);
+                        for (k=0; k<scount; k++)
+                        {
+                            /* We just allocated a new mfn above; update p2m */
+                            ctx->p2m[superpage_start+k] = supermfn+k;
+                            ctx->nr_pfns++;
+                            /* region_map[] will be set below */
+                        }
+                        superpage_start=INVALID_P2M_ENTRY;
+                        scount=0;
+                    }
+                    continue;
+                }
+                
+            fallback:
+                DPRINTF("Falling back %d pages pfn %lx\n", scount, superpage_start);
+                for (k=0; k<scount; k++)
+                {
+                    ctx->p2m_batch[nr_mfns++] = superpage_start+k; 
+                    ctx->p2m[superpage_start+k]--;
+                }
+                superpage_start = INVALID_P2M_ENTRY;
+                scount=0;
+            }
+
+            /* Are we ready to start a new superpage candidate? */
+            if ( superpages && SUPER_PAGE_START(pfn) )
+            {
+                superpage_start=pfn;
+                scount++;
+                continue;
+            }
+            
+            /* Add the current pfn to pfn_batch */
             ctx->p2m_batch[nr_mfns++] = pfn; 
             ctx->p2m[pfn]--;
         }
-    } 
+    }
+
+    /* Clean up any partial superpage candidates */
+    if ( superpage_start != INVALID_P2M_ENTRY )
+    {
+        DPRINTF("Falling back %d pages pfn %lx\n", scount, superpage_start);
+        for (k=0; k<scount; k++)
+        {
+            ctx->p2m_batch[nr_mfns++] = superpage_start+k; 
+            ctx->p2m[superpage_start+k]--;
+        }
+        superpage_start = INVALID_P2M_ENTRY;
+    }
 
     /* Now allocate a bunch of mfns for this batch */
-    if ( nr_mfns &&
-         (xc_domain_populate_physmap_exact(xch, dom, nr_mfns, 0,
-                                            0, ctx->p2m_batch) != 0) )
-    { 
-        ERROR("Failed to allocate memory for batch.!\n"); 
-        errno = ENOMEM;
-        return -1;
+    if ( nr_mfns )
+    {
+        DPRINTF("Mapping order 0,  %d; first pfn %lx\n", nr_mfns, ctx->p2m_batch[0]);
+    
+        if(xc_domain_populate_physmap_exact(xch, dom, nr_mfns, 0,
+                                            0, ctx->p2m_batch) != 0) 
+        { 
+            ERROR("Failed to allocate memory for batch.!\n"); 
+            errno = ENOMEM;
+            return -1;
+        }
     }
 
     /* Second pass for this batch: update p2m[] and region_mfn[] */
@@ -988,7 +1079,8 @@ static int apply_batch(xc_interface *xch
 
         if (pfn_err[i])
         {
-            ERROR("unexpected PFN mapping failure");
+            ERROR("unexpected PFN mapping failure pfn %lx map_mfn %lx p2m_mfn %lx",
+                  pfn, region_mfn[i], ctx->p2m[pfn]);
             goto err_mapped;
         }
 
@@ -1159,9 +1251,6 @@ int xc_domain_restore(xc_interface *xch,
     /* For info only */
     ctx->nr_pfns = 0;
 
-    if ( superpages )
-        return 1;
-
     ctxt = xc_hypercall_buffer_alloc(xch, ctxt, sizeof(*ctxt));
 
     if ( ctxt == NULL )
@@ -1309,7 +1398,8 @@ int xc_domain_restore(xc_interface *xch,
             int brc;
 
             brc = apply_batch(xch, dom, ctx, region_mfn, pfn_type,
-                              pae_extended_cr3, hvm, mmu, &pagebuf, curbatch);
+                              pae_extended_cr3, hvm, mmu, &pagebuf, curbatch,
+                              superpages);
             if ( brc < 0 )
                 goto out;
 

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 2 of 4] tools: Save superpages in the same batch, to make detection easier
  2012-07-13  9:21 [PATCH 0 of 4] Xen 4.1 backport of cs 23420-23423 Juergen Gross
  2012-07-13  9:21 ` [PATCH 1 of 4] tools: libxc: Detect superpages on domain restore Juergen Gross
@ 2012-07-13  9:21 ` Juergen Gross
  2012-07-13  9:21 ` [PATCH 3 of 4] tools: Introduce "allocate-only" page type for migration Juergen Gross
  2012-07-13  9:21 ` [PATCH 4 of 4] tools: Enable superpages for HVM domains by default Juergen Gross
  3 siblings, 0 replies; 6+ messages in thread
From: Juergen Gross @ 2012-07-13  9:21 UTC (permalink / raw)
  To: xen-devel; +Cc: george.dunlap

[-- Attachment #1: Type: text/plain, Size: 415 bytes --]

On the first time through (when pfns are mostly allocated on
the receiving side), try to keep superpages together in the same
batch by ending a batch early if we see the first page of a
potential superpage and there isn't enough room in the batch
for a full superpage.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>


1 file changed, 12 insertions(+)
tools/libxc/xc_domain_save.c |   12 ++++++++++++



[-- Attachment #2: xen-4.1-testing.hg-4.patch --]
[-- Type: text/x-patch, Size: 2171 bytes --]

# HG changeset patch
# User Juergen Gross <juergen.gross@ts.fujitsu.com>
# Date 1342171239 -7200
# Node ID 80c3e053c1726eeeddf11204ce7f25aefc2fd021
# Parent  83fd911db89ee2d57db1c22537188d6b5691f807
tools: Save superpages in the same batch, to make detection easier

On the first time through (when pfns are mostly allocated on
the receiving side), try to keep superpages together in the same
batch by ending a batch early if we see the first page of a
potential superpage and there isn't enough room in the batch
for a full superpage.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>

diff -r 83fd911db89e -r 80c3e053c172 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c	Fri Jul 13 11:20:37 2012 +0200
+++ b/tools/libxc/xc_domain_save.c	Fri Jul 13 11:20:39 2012 +0200
@@ -81,6 +81,11 @@ struct outbuf {
     (((_mfn) < (ctx->max_mfn)) &&                \
      ((mfn_to_pfn(_mfn) < (dinfo->p2m_size)) &&   \
       (pfn_to_mfn(mfn_to_pfn(_mfn)) == (_mfn))))
+
+#define SUPERPAGE_PFN_SHIFT  9
+#define SUPERPAGE_NR_PFNS    (1UL << SUPERPAGE_PFN_SHIFT)
+
+#define SUPER_PAGE_START(pfn)    (((pfn) & (SUPERPAGE_NR_PFNS-1)) == 0 )
 
 /*
 ** During (live) save/migrate, we maintain a number of bitmaps to track
@@ -906,6 +911,7 @@ int xc_domain_save(xc_interface *xch, in
     int rc = 1, frc, i, j, last_iter = 0, iter = 0;
     int live  = (flags & XCFLAGS_LIVE);
     int debug = (flags & XCFLAGS_DEBUG);
+    int superpages = !!hvm;
     int race = 0, sent_last_iter, skip_this_iter = 0;
     unsigned int sent_this_iter = 0;
     int tmem_saved = 0;
@@ -1261,6 +1267,12 @@ int xc_domain_save(xc_interface *xch, in
                            (test_bit(n, to_send) && last_iter) ||
                            (test_bit(n, to_fix)  && last_iter)) )
                         continue;
+
+                    /* First time through, try to keep superpages in the same batch */
+                    if ( superpages && iter == 1
+                         && SUPER_PAGE_START(n)
+                         && batch + SUPERPAGE_NR_PFNS > MAX_BATCH_SIZE )
+                        break;
 
                     /*
                     ** we get here if:

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 3 of 4] tools: Introduce "allocate-only" page type for migration
  2012-07-13  9:21 [PATCH 0 of 4] Xen 4.1 backport of cs 23420-23423 Juergen Gross
  2012-07-13  9:21 ` [PATCH 1 of 4] tools: libxc: Detect superpages on domain restore Juergen Gross
  2012-07-13  9:21 ` [PATCH 2 of 4] tools: Save superpages in the same batch, to make detection easier Juergen Gross
@ 2012-07-13  9:21 ` Juergen Gross
  2012-07-13  9:21 ` [PATCH 4 of 4] tools: Enable superpages for HVM domains by default Juergen Gross
  3 siblings, 0 replies; 6+ messages in thread
From: Juergen Gross @ 2012-07-13  9:21 UTC (permalink / raw)
  To: xen-devel; +Cc: george.dunlap

[-- Attachment #1: Type: text/plain, Size: 911 bytes --]

To detect presence of superpages on the receiver side, we need
to have strings of sequential pfns sent across on the first iteration
through the memory.  However, as we go through the memory, more and
more of it will be marked dirty, making it wasteful to send those pages.

This patch introduces a new PFINFO type, "XALLOC".  Like PFINFO_XTAB, it
indicates that there is no corresponding page present in the subsquent
page buffer.  However, unlike PFINFO_XTAB, it contains a pfn which should be
allocated.

This new type is only used for migration; but it's placed in
xen/public/domctl.h so that the value isn't reused.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>


3 files changed, 33 insertions(+), 22 deletions(-)
tools/libxc/xc_domain_restore.c |   35 +++++++++++++++++++----------------
tools/libxc/xc_domain_save.c    |   19 +++++++++++++------
xen/include/public/domctl.h     |    1 +



[-- Attachment #2: xen-4.1-testing.hg-4.patch --]
[-- Type: text/x-patch, Size: 7610 bytes --]

# HG changeset patch
# User Juergen Gross <juergen.gross@ts.fujitsu.com>
# Date 1342171240 -7200
# Node ID 0d1fbddee973517d24dcc080be93d2e10a42b98f
# Parent  80c3e053c1726eeeddf11204ce7f25aefc2fd021
tools: Introduce "allocate-only" page type for migration

To detect presence of superpages on the receiver side, we need
to have strings of sequential pfns sent across on the first iteration
through the memory.  However, as we go through the memory, more and
more of it will be marked dirty, making it wasteful to send those pages.

This patch introduces a new PFINFO type, "XALLOC".  Like PFINFO_XTAB, it
indicates that there is no corresponding page present in the subsquent
page buffer.  However, unlike PFINFO_XTAB, it contains a pfn which should be
allocated.

This new type is only used for migration; but it's placed in
xen/public/domctl.h so that the value isn't reused.

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>

diff -r 80c3e053c172 -r 0d1fbddee973 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c	Fri Jul 13 11:20:39 2012 +0200
+++ b/tools/libxc/xc_domain_restore.c	Fri Jul 13 11:20:40 2012 +0200
@@ -850,7 +850,8 @@ static int pagebuf_get_one(xc_interface 
 
     countpages = count;
     for (i = oldcount; i < buf->nr_pages; ++i)
-        if ((buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) == XEN_DOMCTL_PFINFO_XTAB)
+        if ((buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) == XEN_DOMCTL_PFINFO_XTAB
+            ||(buf->pfn_types[i] & XEN_DOMCTL_PFINFO_LTAB_MASK) == XEN_DOMCTL_PFINFO_XALLOC)
             --countpages;
 
     if (!countpages)
@@ -928,6 +929,7 @@ static int apply_batch(xc_interface *xch
         pfn      = pagebuf->pfn_types[i + curbatch] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
         pagetype = pagebuf->pfn_types[i + curbatch] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
 
+        /* For allocation purposes, treat XEN_DOMCTL_PFINFO_XALLOC as a normal page */
         if ( (pagetype != XEN_DOMCTL_PFINFO_XTAB) && 
              (ctx->p2m[pfn] == INVALID_P2M_ENTRY) )
         {
@@ -1039,21 +1041,21 @@ static int apply_batch(xc_interface *xch
         pfn      = pagebuf->pfn_types[i + curbatch] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
         pagetype = pagebuf->pfn_types[i + curbatch] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
 
-        if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
+        if ( pagetype != XEN_DOMCTL_PFINFO_XTAB
+             && ctx->p2m[pfn] == (INVALID_P2M_ENTRY-1) )
+        {
+            /* We just allocated a new mfn above; update p2m */
+            ctx->p2m[pfn] = ctx->p2m_batch[nr_mfns++]; 
+            ctx->nr_pfns++; 
+        }
+
+        /* setup region_mfn[] for batch map, if necessary.
+         * For HVM guests, this interface takes PFNs, not MFNs */
+        if ( pagetype == XEN_DOMCTL_PFINFO_XTAB
+             || pagetype == XEN_DOMCTL_PFINFO_XALLOC )
             region_mfn[i] = ~0UL; /* map will fail but we don't care */
-        else 
-        {
-            if ( ctx->p2m[pfn] == (INVALID_P2M_ENTRY-1) )
-            {
-                /* We just allocated a new mfn above; update p2m */
-                ctx->p2m[pfn] = ctx->p2m_batch[nr_mfns++]; 
-                ctx->nr_pfns++; 
-            }
-
-            /* setup region_mfn[] for batch map.
-             * For HVM guests, this interface takes PFNs, not MFNs */
+        else
             region_mfn[i] = hvm ? pfn : ctx->p2m[pfn]; 
-        }
     }
 
     /* Map relevant mfns */
@@ -1073,8 +1075,9 @@ static int apply_batch(xc_interface *xch
         pfn      = pagebuf->pfn_types[i + curbatch] & ~XEN_DOMCTL_PFINFO_LTAB_MASK;
         pagetype = pagebuf->pfn_types[i + curbatch] &  XEN_DOMCTL_PFINFO_LTAB_MASK;
 
-        if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
-            /* a bogus/unmapped page: skip it */
+        if ( pagetype == XEN_DOMCTL_PFINFO_XTAB 
+             || pagetype == XEN_DOMCTL_PFINFO_XALLOC)
+            /* a bogus/unmapped/allocate-only page: skip it */
             continue;
 
         if (pfn_err[i])
diff -r 80c3e053c172 -r 0d1fbddee973 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c	Fri Jul 13 11:20:39 2012 +0200
+++ b/tools/libxc/xc_domain_save.c	Fri Jul 13 11:20:40 2012 +0200
@@ -1258,13 +1258,15 @@ int xc_domain_save(xc_interface *xch, in
                 }
                 else
                 {
-                    if ( !last_iter &&
+                    int dont_skip = (last_iter || (superpages && iter==1));
+
+                    if ( !dont_skip &&
                          test_bit(n, to_send) &&
                          test_bit(n, to_skip) )
                         skip_this_iter++; /* stats keeping */
 
                     if ( !((test_bit(n, to_send) && !test_bit(n, to_skip)) ||
-                           (test_bit(n, to_send) && last_iter) ||
+                           (test_bit(n, to_send) && dont_skip) ||
                            (test_bit(n, to_fix)  && last_iter)) )
                         continue;
 
@@ -1277,7 +1279,7 @@ int xc_domain_save(xc_interface *xch, in
                     /*
                     ** we get here if:
                     **  1. page is marked to_send & hasn't already been re-dirtied
-                    **  2. (ignore to_skip in last iteration)
+                    **  2. (ignore to_skip in first and last iterations)
                     **  3. add in pages that still need fixup (net bufs)
                     */
 
@@ -1301,7 +1303,7 @@ int xc_domain_save(xc_interface *xch, in
                         set_bit(n, to_fix);
                         continue;
                     }
-
+                    
                     if ( last_iter &&
                          test_bit(n, to_fix) &&
                          !test_bit(n, to_send) )
@@ -1346,6 +1348,7 @@ int xc_domain_save(xc_interface *xch, in
                 {
                     if ( pfn_type[j] == XEN_DOMCTL_PFINFO_XTAB )
                         continue;
+
                     DPRINTF("map fail: page %i mfn %08lx err %d\n",
                             j, gmfn, pfn_err[j]);
                     pfn_type[j] = XEN_DOMCTL_PFINFO_XTAB;
@@ -1357,6 +1360,9 @@ int xc_domain_save(xc_interface *xch, in
                     DPRINTF("type fail: page %i mfn %08lx\n", j, gmfn);
                     continue;
                 }
+
+                if ( superpages && iter==1 && test_bit(gmfn, to_skip))
+                    pfn_type[j] = XEN_DOMCTL_PFINFO_XALLOC;
 
                 /* canonicalise mfn->pfn */
                 pfn_type[j] |= pfn_batch[j];
@@ -1432,8 +1438,9 @@ int xc_domain_save(xc_interface *xch, in
                     }
                 }
 
-                /* skip pages that aren't present */
-                if ( pagetype == XEN_DOMCTL_PFINFO_XTAB )
+                /* skip pages that aren't present or are alloc-only */
+                if ( pagetype == XEN_DOMCTL_PFINFO_XTAB
+                    || pagetype == XEN_DOMCTL_PFINFO_XALLOC )
                     continue;
 
                 pagetype &= XEN_DOMCTL_PFINFO_LTABTYPE_MASK;
diff -r 80c3e053c172 -r 0d1fbddee973 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h	Fri Jul 13 11:20:39 2012 +0200
+++ b/xen/include/public/domctl.h	Fri Jul 13 11:20:40 2012 +0200
@@ -133,6 +133,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getme
 #define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28)
 #define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
 #define XEN_DOMCTL_PFINFO_XTAB    (0xfU<<28) /* invalid page */
+#define XEN_DOMCTL_PFINFO_XALLOC  (0xeU<<28) /* allocate-only page */
 #define XEN_DOMCTL_PFINFO_PAGEDTAB (0x8U<<28)
 #define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
 

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 4 of 4] tools: Enable superpages for HVM domains by default
  2012-07-13  9:21 [PATCH 0 of 4] Xen 4.1 backport of cs 23420-23423 Juergen Gross
                   ` (2 preceding siblings ...)
  2012-07-13  9:21 ` [PATCH 3 of 4] tools: Introduce "allocate-only" page type for migration Juergen Gross
@ 2012-07-13  9:21 ` Juergen Gross
  3 siblings, 0 replies; 6+ messages in thread
From: Juergen Gross @ 2012-07-13  9:21 UTC (permalink / raw)
  To: xen-devel; +Cc: george.dunlap

[-- Attachment #1: Type: text/plain, Size: 322 bytes --]

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>


3 files changed, 4 insertions(+), 2 deletions(-)
tools/libxl/libxl_dom.c                 |    2 +-
tools/python/xen/xend/XendCheckpoint.py |    2 ++
tools/xcutils/xc_restore.c              |    2 +-



[-- Attachment #2: xen-4.1-testing.hg-4.patch --]
[-- Type: text/x-patch, Size: 2117 bytes --]

# HG changeset patch
# User Juergen Gross <juergen.gross@ts.fujitsu.com>
# Date 1342171241 -7200
# Node ID 0cfc9e2b7da38c2d4f7cd180ddb9fceae6adbc68
# Parent  0d1fbddee973517d24dcc080be93d2e10a42b98f
tools: Enable superpages for HVM domains by default

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>

diff -r 0d1fbddee973 -r 0cfc9e2b7da3 tools/libxl/libxl_dom.c
--- a/tools/libxl/libxl_dom.c	Fri Jul 13 11:20:40 2012 +0200
+++ b/tools/libxl/libxl_dom.c	Fri Jul 13 11:20:41 2012 +0200
@@ -308,7 +308,7 @@ int libxl__domain_restore_common(libxl_c
     rc = xc_domain_restore(ctx->xch, fd, domid,
                              state->store_port, &state->store_mfn,
                              state->console_port, &state->console_mfn,
-                             info->hvm, info->u.hvm.pae, 0);
+                             info->hvm, info->u.hvm.pae, !!info->hvm);
     if ( rc ) {
         LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "restoring domain");
         return ERROR_FAIL;
diff -r 0d1fbddee973 -r 0cfc9e2b7da3 tools/python/xen/xend/XendCheckpoint.py
--- a/tools/python/xen/xend/XendCheckpoint.py	Fri Jul 13 11:20:40 2012 +0200
+++ b/tools/python/xen/xend/XendCheckpoint.py	Fri Jul 13 11:20:41 2012 +0200
@@ -298,6 +298,8 @@ def restore(xd, fd, dominfo = None, paus
         dominfo.info['shadow_memory'] = shadow_cur
 
         superpages = restore_image.superpages
+        if is_hvm:
+            superpages = 1
 
         cmd = map(str, [xen.util.auxbin.pathTo(XC_RESTORE),
                         fd, dominfo.getDomid(),
diff -r 0d1fbddee973 -r 0cfc9e2b7da3 tools/xcutils/xc_restore.c
--- a/tools/xcutils/xc_restore.c	Fri Jul 13 11:20:40 2012 +0200
+++ b/tools/xcutils/xc_restore.c	Fri Jul 13 11:20:41 2012 +0200
@@ -43,7 +43,7 @@ main(int argc, char **argv)
     if ( argc == 9 )
 	    superpages = atoi(argv[8]);
     else
-	    superpages = 0;
+	    superpages = !!hvm;
 
     ret = xc_domain_restore(xch, io_fd, domid, store_evtchn, &store_mfn,
                             console_evtchn, &console_mfn, hvm, pae, superpages);

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2012-07-13  9:21 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-07-13  9:21 [PATCH 0 of 4] Xen 4.1 backport of cs 23420-23423 Juergen Gross
2012-07-13  9:21 ` [PATCH 1 of 4] tools: libxc: Detect superpages on domain restore Juergen Gross
2012-07-13  9:21 ` [PATCH 2 of 4] tools: Save superpages in the same batch, to make detection easier Juergen Gross
2012-07-13  9:21 ` [PATCH 3 of 4] tools: Introduce "allocate-only" page type for migration Juergen Gross
2012-07-13  9:21 ` [PATCH 4 of 4] tools: Enable superpages for HVM domains by default Juergen Gross
  -- strict thread matches above, loose matches on Subject: below --
2011-05-16 10:51 [PATCH 0 of 4] RESEND Use superpages on restore/migrate George Dunlap
2011-05-16 10:51 ` [PATCH 3 of 4] tools: Introduce "allocate-only" page type for migration George Dunlap

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).