From: Elena Ufimtseva <ufimtseva@gmail.com>
To: xen-devel@lists.xen.org
Cc: Ian.Campbell@citrix.com, stefano.stabellini@eu.citrix.com,
george.dunlap@eu.citrix.com, dario.faggioli@citrix.com,
lccycc123@gmail.com, ian.jackson@eu.citrix.com,
Elena Ufimtseva <ufimtseva@gmail.com>,
sw@linux.com
Subject: [PATCH RFC v2 3/7] libxc/vNUMA: vnodes allocation on NUMA nodes.
Date: Fri, 13 Sep 2013 04:50:08 -0400 [thread overview]
Message-ID: <1379062208-13816-1-git-send-email-ufimtseva@gmail.com> (raw)
vNUMA nodes allocation on NUMA nodes.
If vnuma topology is defined and running on hardware NUMA machine,
allocate vnodes on physical numa nodes based on vnode_to_pnode map.
Signed-off-by: Elena Ufimtseva <ufimtseva@gmail.com>
---
Changes since v1:
* fixed incorrect output of number of pages per vnode for domain
in case of a failure.
* fixed returned error codes.
TODO:
* vNUMA nodes memory allocation for domain with superpages;
---
tools/libxc/xc_dom.h | 1 +
tools/libxc/xc_dom_x86.c | 77 ++++++++++++++++++++++++++++++++++++++++------
tools/libxc/xg_private.h | 1 +
3 files changed, 70 insertions(+), 9 deletions(-)
diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h
index 790f145..751357a 100644
--- a/tools/libxc/xc_dom.h
+++ b/tools/libxc/xc_dom.h
@@ -370,6 +370,7 @@ static inline xen_pfn_t xc_dom_p2m_guest(struct xc_dom_image *dom,
int arch_setup_meminit(struct xc_dom_image *dom);
int arch_setup_bootearly(struct xc_dom_image *dom);
int arch_setup_bootlate(struct xc_dom_image *dom);
+int arch_boot_numa_alloc(struct xc_dom_image *dom);
/*
* Local variables:
diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
index 126c0f8..7a22f91 100644
--- a/tools/libxc/xc_dom_x86.c
+++ b/tools/libxc/xc_dom_x86.c
@@ -789,27 +789,47 @@ int arch_setup_meminit(struct xc_dom_image *dom)
else
{
/* try to claim pages for early warning of insufficient memory avail */
+ rc = 0;
if ( dom->claim_enabled ) {
rc = xc_domain_claim_pages(dom->xch, dom->guest_domid,
dom->total_pages);
if ( rc )
+ {
+ xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+ "%s: Failed to claim mem for dom\n",
+ __FUNCTION__);
return rc;
+ }
}
/* setup initial p2m */
for ( pfn = 0; pfn < dom->total_pages; pfn++ )
dom->p2m_host[pfn] = pfn;
/* allocate guest memory */
- for ( i = rc = allocsz = 0;
- (i < dom->total_pages) && !rc;
- i += allocsz )
+ if (dom->nr_vnodes > 0)
{
- allocsz = dom->total_pages - i;
- if ( allocsz > 1024*1024 )
- allocsz = 1024*1024;
- rc = xc_domain_populate_physmap_exact(
- dom->xch, dom->guest_domid, allocsz,
- 0, 0, &dom->p2m_host[i]);
+ rc = arch_boot_numa_alloc(dom);
+ if ( rc )
+ {
+ xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+ "%s: Failed to allocate memory on NUMA nodes\n",
+ __FUNCTION__);
+ return rc;
+ }
+ }
+ else
+ {
+ for ( i = rc = allocsz = 0;
+ (i < dom->total_pages) && !rc;
+ i += allocsz )
+ {
+ allocsz = dom->total_pages - i;
+ if ( allocsz > 1024*1024 )
+ allocsz = 1024*1024;
+ rc = xc_domain_populate_physmap_exact(
+ dom->xch, dom->guest_domid, allocsz,
+ 0, 0, &dom->p2m_host[i]);
+ }
}
/* Ensure no unclaimed pages are left unused.
@@ -817,7 +837,46 @@ int arch_setup_meminit(struct xc_dom_image *dom)
(void)xc_domain_claim_pages(dom->xch, dom->guest_domid,
0 /* cancels the claim */);
}
+ return rc;
+}
+
+int arch_boot_numa_alloc(struct xc_dom_image *dom)
+{
+ int rc, n;
+ unsigned long long guest_pages;
+ unsigned long allocsz, i, k;
+ unsigned long memflags;
+ rc = allocsz = k = 0;
+ for(n = 0; n < dom->nr_vnodes; n++)
+ {
+ memflags = 0;
+ if ( dom->vnode_to_pnode[n] != NUMA_NO_NODE )
+ {
+ memflags |= XENMEMF_exact_node(dom->vnode_to_pnode[n]);
+ memflags |= XENMEMF_exact_node_request;
+ }
+ guest_pages = dom->vmemsizes[n] >> PAGE_SHIFT_X86;
+ for ( i = 0;
+ (i < guest_pages) && !rc;
+ i += allocsz )
+ {
+ allocsz = guest_pages - i;
+ if ( allocsz > 1024*1024 )
+ allocsz = 1024*1024;
+ rc = xc_domain_populate_physmap_exact(
+ dom->xch, dom->guest_domid, allocsz,
+ 0, memflags, &dom->p2m_host[i + k]);
+ k += allocsz;
+ }
+ if ( rc )
+ {
+ xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+ "%s: Failed allocation of %Lu pages for vnode %d on pnode %d out of %lu\n",
+ __FUNCTION__, guest_pages, n, dom->vnode_to_pnode[n], dom->total_pages);
+ return rc;
+ }
+ }
return rc;
}
diff --git a/tools/libxc/xg_private.h b/tools/libxc/xg_private.h
index db02ccf..a8d8e19 100644
--- a/tools/libxc/xg_private.h
+++ b/tools/libxc/xg_private.h
@@ -127,6 +127,7 @@ typedef uint64_t l4_pgentry_64_t;
#define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
#define NRPAGES(x) (ROUNDUP(x, PAGE_SHIFT) >> PAGE_SHIFT)
+#define NUMA_NO_NODE ~((uint32_t)0)
/* XXX SMH: following skanky macros rely on variable p2m_size being set */
/* XXX TJD: also, "guest_width" should be the guest's sizeof(unsigned long) */
--
1.7.10.4
next reply other threads:[~2013-09-13 8:50 UTC|newest]
Thread overview: 2+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-09-13 8:50 Elena Ufimtseva [this message]
2013-09-17 13:36 ` [PATCH RFC v2 3/7] libxc/vNUMA: vnodes allocation on NUMA nodes George Dunlap
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1379062208-13816-1-git-send-email-ufimtseva@gmail.com \
--to=ufimtseva@gmail.com \
--cc=Ian.Campbell@citrix.com \
--cc=dario.faggioli@citrix.com \
--cc=george.dunlap@eu.citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=lccycc123@gmail.com \
--cc=stefano.stabellini@eu.citrix.com \
--cc=sw@linux.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).