xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Juergen Gross <juergen.gross@ts.fujitsu.com>
To: xen-devel@lists.xensource.com
Cc: george.dunlap@eu.citrix.com
Subject: [PATCH 1 of 4] xen 4.0: tools: libxc: Detect superpages on domain	restore
Date: Fri, 13 Jul 2012 11:28:14 +0200	[thread overview]
Message-ID: <b2d7c4238c2f488d6d19.1342171694@nehalem1> (raw)
In-Reply-To: <patchbomb.1342171693@nehalem1>

[-- Attachment #1: Type: text/plain, Size: 509 bytes --]

When receiving pages, look for contiguous 2-meg aligned regions and
attempt to allocate a superpage for that region, falling back to
4k pages if the allocation fails.

(Minor conflict fixed up. -iwj)

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>


1 file changed, 106 insertions(+), 16 deletions(-)
tools/libxc/xc_domain_restore.c |  122 +++++++++++++++++++++++++++++++++------



[-- Attachment #2: xen-4.0-testing.hg-4.patch --]
[-- Type: text/x-patch, Size: 7774 bytes --]

# HG changeset patch
# User Juergen Gross <juergen.gross@ts.fujitsu.com>
# Date 1342171661 -7200
# Node ID b2d7c4238c2f488d6d19b4b1be5e836872e276bd
# Parent  30c9bcaec782d200113dfaebb97d55a9e73cd869
xen 4.0: tools: libxc: Detect superpages on domain restore

When receiving pages, look for contiguous 2-meg aligned regions and
attempt to allocate a superpage for that region, falling back to
4k pages if the allocation fails.

(Minor conflict fixed up. -iwj)

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
Signed-off-by: Ian Jackson <ian.jackson@eu.citrix.com>
Signed-off-by: Juergen Gross <juergen.gross@ts.fujitsu.com>

diff -r 30c9bcaec782 -r b2d7c4238c2f tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c	Tue Jul 03 13:51:14 2012 +0100
+++ b/tools/libxc/xc_domain_restore.c	Fri Jul 13 11:27:41 2012 +0200
@@ -45,6 +45,11 @@ struct restore_ctx {
 };
 
 #define HEARTBEAT_MS 1000
+
+#define SUPERPAGE_PFN_SHIFT  9
+#define SUPERPAGE_NR_PFNS    (1UL << SUPERPAGE_PFN_SHIFT)
+
+#define SUPER_PAGE_START(pfn)    (((pfn) & (SUPERPAGE_NR_PFNS-1)) == 0 )
 
 #ifndef __MINIOS__
 static ssize_t read_exact_timed(struct restore_ctx *ctx,
@@ -800,9 +805,11 @@ static int apply_batch(int xc_handle, ui
 static int apply_batch(int xc_handle, uint32_t dom, struct restore_ctx *ctx,
                        xen_pfn_t* region_mfn, unsigned long* pfn_type, int pae_extended_cr3,
                        unsigned int hvm, struct xc_mmu* mmu,
-                       pagebuf_t* pagebuf, int curbatch)
+                       pagebuf_t* pagebuf, int curbatch, int superpages)
 {
     int i, j, curpage, nr_mfns;
+    int k, scount;
+    unsigned long superpage_start=INVALID_P2M_ENTRY;
     /* used by debug verify code */
     unsigned long buf[PAGE_SIZE/sizeof(unsigned long)];
     /* Our mapping of the current region (batch) */
@@ -820,8 +827,8 @@ static int apply_batch(int xc_handle, ui
     if (j > MAX_BATCH_SIZE)
         j = MAX_BATCH_SIZE;
 
-    /* First pass for this batch: work out how much memory to alloc */
-    nr_mfns = 0; 
+    /* First pass for this batch: work out how much memory to alloc, and detect superpages */
+    nr_mfns = scount = 0;
     for ( i = 0; i < j; i++ )
     {
         unsigned long pfn, pagetype;
@@ -832,19 +839,103 @@ static int apply_batch(int xc_handle, ui
              (ctx->p2m[pfn] == INVALID_P2M_ENTRY) )
         {
             /* Have a live PFN which hasn't had an MFN allocated */
+ 
+             /* Logic if we're in the middle of detecting a candidate superpage */
+             if ( superpage_start != INVALID_P2M_ENTRY )
+             {
+                 /* Is this the next expected continuation? */
+                 if ( pfn == superpage_start + scount )
+                 {
+                     if ( !superpages )
+                     {
+                         ERROR("Unexpexted codepath with no superpages");
+                         return -1;
+                     }
+ 
+                     scount++;
+ 
+                     /* If we've found a whole superpage, allocate it and update p2m */
+                     if ( scount  == SUPERPAGE_NR_PFNS )
+                     {
+                         unsigned long supermfn;
+ 
+ 
+                         supermfn=superpage_start;
+                         if ( xc_domain_memory_populate_physmap(xc_handle, dom, 1,
+                                          SUPERPAGE_PFN_SHIFT, 0, &supermfn) != 0 )
+                         {
+                             DPRINTF("No 2M page available for pfn 0x%lx, fall back to 4K page.\n",
+                                     superpage_start);
+                             /* If we're falling back from a failed allocation, subtract one
+                              * from count, since the last page == pfn, which will behandled
+                              * anyway. */
+                             scount--;
+                             goto fallback;
+                         }
+ 
+                         DPRINTF("Mapping superpage (%d) pfn %lx, mfn %lx\n", scount, superpage_start, supermfn);
+                         for (k=0; k<scount; k++)
+                         {
+                             /* We just allocated a new mfn above; update p2m */
+                             ctx->p2m[superpage_start+k] = supermfn+k;
+                             ctx->nr_pfns++;
+                             /* region_map[] will be set below */
+                         }
+                         superpage_start=INVALID_P2M_ENTRY;
+                         scount=0;
+                     }
+                     continue;
+                 }
+                 
+             fallback:
+                 DPRINTF("Falling back %d pages pfn %lx\n", scount, superpage_start);
+                 for (k=0; k<scount; k++)
+                 {
+                     ctx->p2m_batch[nr_mfns++] = superpage_start+k; 
+                     ctx->p2m[superpage_start+k]--;
+                 }
+                 superpage_start = INVALID_P2M_ENTRY;
+                 scount=0;
+             }
+ 
+             /* Are we ready to start a new superpage candidate? */
+             if ( superpages && SUPER_PAGE_START(pfn) )
+             {
+                 superpage_start=pfn;
+                 scount++;
+                 continue;
+             }
+             
+             /* Add the current pfn to pfn_batch */
             ctx->p2m_batch[nr_mfns++] = pfn; 
             ctx->p2m[pfn]--;
         }
-    } 
+     }
+ 
+     /* Clean up any partial superpage candidates */
+     if ( superpage_start != INVALID_P2M_ENTRY )
+     {
+         DPRINTF("Falling back %d pages pfn %lx\n", scount, superpage_start);
+         for (k=0; k<scount; k++)
+         {
+             ctx->p2m_batch[nr_mfns++] = superpage_start+k; 
+             ctx->p2m[superpage_start+k]--;
+         }
+         superpage_start = INVALID_P2M_ENTRY;
+     }
 
     /* Now allocate a bunch of mfns for this batch */
-    if ( nr_mfns &&
-         (xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0,
-                                            0, ctx->p2m_batch) != 0) )
-    { 
-        ERROR("Failed to allocate memory for batch.!\n"); 
-        errno = ENOMEM;
-        return -1;
+     if ( nr_mfns )
+     {
+         DPRINTF("Mapping order 0,  %d; first pfn %lx\n", nr_mfns, ctx->p2m_batch[0]);
+     
+         if(xc_domain_memory_populate_physmap(xc_handle, dom, nr_mfns, 0,
+                                             0, ctx->p2m_batch) != 0) 
+         { 
+             ERROR("Failed to allocate memory for batch.!\n"); 
+             errno = ENOMEM;
+             return -1;
+         }
     }
 
     /* Second pass for this batch: update p2m[] and region_mfn[] */
@@ -895,7 +986,8 @@ static int apply_batch(int xc_handle, ui
 
         if (pfn_err[i])
         {
-            ERROR("unexpected PFN mapping failure");
+            ERROR("unexpected PFN mapping failure pfn %lx map_mfn %lx p2m_mfn %lx",
+                  pfn, region_mfn[i], ctx->p2m[pfn]);
             goto err_mapped;
         }
 
@@ -1058,9 +1150,6 @@ int xc_domain_restore(int xc_handle, int
     /* For info only */
     ctx->nr_pfns = 0;
 
-    if ( superpages )
-        return 1;
-
     if ( read_exact(io_fd, &dinfo->p2m_size, sizeof(unsigned long)) )
     {
         ERROR("read: p2m_size");
@@ -1209,7 +1298,8 @@ int xc_domain_restore(int xc_handle, int
             int brc;
 
             brc = apply_batch(xc_handle, dom, ctx, region_mfn, pfn_type,
-                              pae_extended_cr3, hvm, mmu, &pagebuf, curbatch);
+                              pae_extended_cr3, hvm, mmu, &pagebuf, curbatch,
+                              superpages);
             if ( brc < 0 )
                 goto out;
 

[-- Attachment #3: Type: text/plain, Size: 126 bytes --]

_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

  reply	other threads:[~2012-07-13  9:28 UTC|newest]

Thread overview: 5+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2012-07-13  9:28 [PATCH 0 of 4] Xen 4.0 backport of cs 23420-23423 Juergen Gross
2012-07-13  9:28 ` Juergen Gross [this message]
2012-07-13  9:28 ` [PATCH 2 of 4] xen 4.0: tools: Save superpages in the same batch, to make detection easier Juergen Gross
2012-07-13  9:28 ` [PATCH 3 of 4] xen 4.0: tools: Introduce "allocate-only" page type for migration Juergen Gross
2012-07-13  9:28 ` [PATCH 4 of 4] xen 4.0: tools: Enable superpages for HVM domains by default Juergen Gross

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b2d7c4238c2f488d6d19.1342171694@nehalem1 \
    --to=juergen.gross@ts.fujitsu.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=xen-devel@lists.xensource.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).