From: Olaf Hering <olaf@aepfle.de>
To: Wei Liu <wei.liu2@citrix.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>,
Ian Jackson <ian.jackson@eu.citrix.com>,
xen-devel@lists.xen.org
Subject: Re: [PATCH v2 3/3] tools/libxc: use superpages during restore of HVM guest
Date: Wed, 23 Aug 2017 16:32:29 +0200 [thread overview]
Message-ID: <20170823143229.GE6372@aepfle.de> (raw)
In-Reply-To: <20170823134430.GD6372@aepfle.de>
[-- Attachment #1.1: Type: text/plain, Size: 4561 bytes --]
On Wed, Aug 23, Olaf Hering wrote:
> The value of p2m_size does not represent the actual number of pages
> assigned to a domU. This info is stored in getdomaininfo.max_pages,
> which is currently not used by restore. I will see if using this value
> will avoid triggering the Over-allocation check.
This untested change ontop of this series (done with git diff -w -b
base..HEAD) does some accounting to avoid Over-allocation:
diff --git a/tools/libxc/xc_sr_common.h b/tools/libxc/xc_sr_common.h
index 26c45fdd6d..e0321ea224 100644
--- a/tools/libxc/xc_sr_common.h
+++ b/tools/libxc/xc_sr_common.h
@@ -234,6 +234,8 @@ struct xc_sr_context
int send_back_fd;
unsigned long p2m_size;
+ unsigned long max_pages;
+ unsigned long tot_pages;
xc_hypercall_buffer_t dirty_bitmap_hbuf;
/* From Image Header. */
@@ -375,6 +377,7 @@ static inline bool xc_sr_bitmap_resize(struct xc_sr_bitmap *bm, unsigned long bi
static inline void xc_sr_bitmap_free(struct xc_sr_bitmap *bm)
{
free(bm->p);
+ bm->p = NULL;
}
static inline bool xc_sr_set_bit(unsigned long bit, struct xc_sr_bitmap *bm)
diff --git a/tools/libxc/xc_sr_restore.c b/tools/libxc/xc_sr_restore.c
index 1f9fe25b8f..eff24d3805 100644
--- a/tools/libxc/xc_sr_restore.c
+++ b/tools/libxc/xc_sr_restore.c
@@ -758,6 +758,9 @@ int xc_domain_restore(xc_interface *xch, int io_fd, uint32_t dom,
return -1;
}
+ /* See xc_domain_getinfo */
+ ctx.restore.max_pages = ctx.dominfo.max_memkb >> (PAGE_SHIFT-10);
+ ctx.restore.tot_pages = ctx.dominfo.nr_pages;
ctx.restore.p2m_size = nr_pfns;
if ( ctx.dominfo.hvm )
diff --git a/tools/libxc/xc_sr_restore_x86_hvm.c b/tools/libxc/xc_sr_restore_x86_hvm.c
index 60454148db..f2932dafb7 100644
--- a/tools/libxc/xc_sr_restore_x86_hvm.c
+++ b/tools/libxc/xc_sr_restore_x86_hvm.c
@@ -278,7 +278,8 @@ static int pfn_set_allocated(struct xc_sr_context *ctx, xen_pfn_t pfn)
static int x86_hvm_allocate_pfn(struct xc_sr_context *ctx, xen_pfn_t pfn)
{
xc_interface *xch = ctx->xch;
- bool success = false;
+ struct xc_sr_bitmap *bm;
+ bool success = false, do_sp;
int rc = -1, done;
unsigned int order;
unsigned long i;
@@ -303,15 +304,18 @@ static int x86_hvm_allocate_pfn(struct xc_sr_context *ctx, xen_pfn_t pfn)
return -1;
}
DPRINTF("idx_1g %lu idx_2m %lu\n", idx_1g, idx_2m);
- if (!xc_sr_test_and_set_bit(idx_1g, &ctx->x86_hvm.restore.attempted_1g)) {
+
+ bm = &ctx->x86_hvm.restore.attempted_1g;
order = SUPERPAGE_1GB_SHIFT;
count = 1UL << order;
+ do_sp = ctx->restore.tot_pages + count <= ctx->restore.max_pages;
+ if ( do_sp && !xc_sr_test_and_set_bit(idx_1g, bm) ) {
base_pfn = (pfn >> order) << order;
extnt = base_pfn;
done = xc_domain_populate_physmap(xch, ctx->domid, 1, order, 0, &extnt);
DPRINTF("1G base_pfn %" PRI_xen_pfn " done %d\n", base_pfn, done);
if ( done > 0 ) {
- struct xc_sr_bitmap *bm = &ctx->x86_hvm.restore.attempted_2m;
+ bm = &ctx->x86_hvm.restore.attempted_2m;
success = true;
stat_1g = done;
for ( i = 0; i < (count >> SUPERPAGE_2MB_SHIFT); i++ )
@@ -319,9 +323,11 @@ static int x86_hvm_allocate_pfn(struct xc_sr_context *ctx, xen_pfn_t pfn)
}
}
- if (!xc_sr_test_and_set_bit(idx_2m, &ctx->x86_hvm.restore.attempted_2m)) {
+ bm = &ctx->x86_hvm.restore.attempted_2m;
order = SUPERPAGE_2MB_SHIFT;
count = 1UL << order;
+ do_sp = ctx->restore.tot_pages + count <= ctx->restore.max_pages;
+ if ( do_sp && !xc_sr_test_and_set_bit(idx_2m, bm) ) {
base_pfn = (pfn >> order) << order;
extnt = base_pfn;
done = xc_domain_populate_physmap(xch, ctx->domid, 1, order, 0, &extnt);
@@ -344,6 +350,7 @@ static int x86_hvm_allocate_pfn(struct xc_sr_context *ctx, xen_pfn_t pfn)
if ( success == true ) {
do {
count--;
+ ctx->restore.tot_pages++;
rc = pfn_set_allocated(ctx, base_pfn + count);
if ( rc )
break;
@@ -396,6 +403,7 @@ static int x86_hvm_populate_pfns(struct xc_sr_context *ctx, unsigned count,
PERROR("Failed to release pfn %" PRI_xen_pfn, min_pfn);
goto err;
}
+ ctx->restore.tot_pages--;
}
min_pfn++;
}
Olaf
[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 195 bytes --]
[-- Attachment #2: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
prev parent reply other threads:[~2017-08-23 14:32 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-08-17 17:01 [PATCH v2 0/3] tools/libxc: use superpages Olaf Hering
2017-08-17 17:01 ` [PATCH v2 1/3] tools/libxc: move SUPERPAGE macros to common header Olaf Hering
2017-08-22 14:23 ` Wei Liu
2017-08-17 17:01 ` [PATCH v2 2/3] tools/libxc: add API for bitmap access for restore Olaf Hering
2017-08-22 14:34 ` Wei Liu
2017-08-22 15:01 ` Wei Liu
2017-08-24 6:36 ` Olaf Hering
2017-08-24 11:13 ` Wei Liu
2017-08-17 17:01 ` [PATCH v2 3/3] tools/libxc: use superpages during restore of HVM guest Olaf Hering
2017-08-22 15:31 ` Wei Liu
2017-08-22 15:53 ` Olaf Hering
2017-08-23 8:05 ` Olaf Hering
2017-08-23 10:33 ` Wei Liu
2017-08-23 10:33 ` Wei Liu
2017-08-23 13:44 ` Olaf Hering
2017-08-23 14:32 ` Olaf Hering [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20170823143229.GE6372@aepfle.de \
--to=olaf@aepfle.de \
--cc=andrew.cooper3@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).