From: Elena Ufimtseva <ufimtseva@gmail.com>
To: xen-devel@lists.xen.org
Cc: keir@xen.org, Ian.Campbell@citrix.com,
stefano.stabellini@eu.citrix.com, george.dunlap@eu.citrix.com,
msw@linux.com, dario.faggioli@citrix.com, lccycc123@gmail.com,
ian.jackson@eu.citrix.com, JBeulich@suse.com,
Elena Ufimtseva <ufimtseva@gmail.com>
Subject: [PATCH v4 5/7] libxc: vnuma memory domain allocation
Date: Wed, 4 Dec 2013 00:47:13 -0500 [thread overview]
Message-ID: <1386136035-19544-6-git-send-email-ufimtseva@gmail.com> (raw)
In-Reply-To: <1386136035-19544-1-git-send-email-ufimtseva@gmail.com>
domain memory allocation with vnuma enabled.
Every pv domain has at least one vnuma node
and the vnode to pnode will be taken into account.
if not it works as default allocation without
using XENMEMF_exact_node memflags.
Signed-off-by: Elena Ufimtseva <ufimtseva@gmail.com>
---
tools/libxc/xc_dom.h | 10 ++++++++
tools/libxc/xc_dom_x86.c | 63 +++++++++++++++++++++++++++++++++++++---------
tools/libxc/xg_private.h | 1 +
3 files changed, 62 insertions(+), 12 deletions(-)
diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h
index a183e62..6d07071 100644
--- a/tools/libxc/xc_dom.h
+++ b/tools/libxc/xc_dom.h
@@ -114,6 +114,15 @@ struct xc_dom_image {
struct xc_dom_phys *phys_pages;
int realmodearea_log;
+ /*
+ * vNUMA topology and memory allocation structure.
+ * Defines the way to allocate memory on per NUMA
+ * physical nodes that is defined by vnode_to_pnode.
+ */
+ uint32_t nr_vnodes;
+ uint64_t *vnuma_memszs;
+ unsigned int *vnode_to_pnode;
+
/* malloc memory pool */
struct xc_dom_mem *memblocks;
@@ -369,6 +378,7 @@ static inline xen_pfn_t xc_dom_p2m_guest(struct xc_dom_image *dom,
int arch_setup_meminit(struct xc_dom_image *dom);
int arch_setup_bootearly(struct xc_dom_image *dom);
int arch_setup_bootlate(struct xc_dom_image *dom);
+int arch_boot_numa_alloc(struct xc_dom_image *dom);
/*
* Local variables:
diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
index e034d62..803e460 100644
--- a/tools/libxc/xc_dom_x86.c
+++ b/tools/libxc/xc_dom_x86.c
@@ -759,7 +759,7 @@ static int x86_shadow(xc_interface *xch, domid_t domid)
int arch_setup_meminit(struct xc_dom_image *dom)
{
int rc;
- xen_pfn_t pfn, allocsz, i, j, mfn;
+ xen_pfn_t pfn, i, j, mfn;
rc = x86_compat(dom->xch, dom->guest_domid, dom->guest_type);
if ( rc )
@@ -802,6 +802,7 @@ int arch_setup_meminit(struct xc_dom_image *dom)
else
{
/* try to claim pages for early warning of insufficient memory avail */
+ rc = 0;
if ( dom->claim_enabled ) {
rc = xc_domain_claim_pages(dom->xch, dom->guest_domid,
dom->total_pages);
@@ -813,23 +814,61 @@ int arch_setup_meminit(struct xc_dom_image *dom)
dom->p2m_host[pfn] = pfn;
/* allocate guest memory */
- for ( i = rc = allocsz = 0;
- (i < dom->total_pages) && !rc;
- i += allocsz )
- {
- allocsz = dom->total_pages - i;
- if ( allocsz > 1024*1024 )
- allocsz = 1024*1024;
- rc = xc_domain_populate_physmap_exact(
- dom->xch, dom->guest_domid, allocsz,
- 0, 0, &dom->p2m_host[i]);
- }
+ rc = arch_boot_numa_alloc(dom);
+ if ( rc )
+ return rc;
/* Ensure no unclaimed pages are left unused.
* OK to call if hadn't done the earlier claim call. */
(void)xc_domain_claim_pages(dom->xch, dom->guest_domid,
0 /* cancels the claim */);
}
+ return rc;
+}
+
+/*
+ * Any pv guest will have at least one vnuma node
+ * with vnuma_memszs[0] = domain memory and the rest
+ * topology initialized with default values.
+ */
+int arch_boot_numa_alloc(struct xc_dom_image *dom)
+{
+ int rc;
+ unsigned int n;
+ unsigned long long vnode_pages;
+ unsigned long long allocsz = 0, node_pfn_base, i;
+ unsigned long memflags;
+
+ rc = allocsz = node_pfn_base = 0;
+
+ allocsz = 0;
+ for ( n = 0; n < dom->nr_vnodes; n++ )
+ {
+ memflags = 0;
+ if ( dom->vnode_to_pnode[n] != VNUMA_NO_NODE )
+ {
+ memflags |= XENMEMF_exact_node(dom->vnode_to_pnode[n]);
+ memflags |= XENMEMF_exact_node_request;
+ }
+ vnode_pages = (dom->vnuma_memszs[n] << 20) >> PAGE_SHIFT_X86;
+ for ( i = 0; (i < vnode_pages) && !rc; i += allocsz )
+ {
+ allocsz = vnode_pages - i;
+ if ( allocsz > 1024*1024 )
+ allocsz = 1024*1024;
+ rc = xc_domain_populate_physmap_exact(
+ dom->xch, dom->guest_domid, allocsz,
+ 0, memflags, &dom->p2m_host[node_pfn_base + i]);
+ }
+ if ( rc )
+ {
+ xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+ "%s: Failed allocation of %Lu pages for vnode %d on pnode %d out of %lu\n",
+ __FUNCTION__, vnode_pages, n, dom->vnode_to_pnode[n], dom->total_pages);
+ return rc;
+ }
+ node_pfn_base += i;
+ }
return rc;
}
diff --git a/tools/libxc/xg_private.h b/tools/libxc/xg_private.h
index 5ff2124..9554b71 100644
--- a/tools/libxc/xg_private.h
+++ b/tools/libxc/xg_private.h
@@ -127,6 +127,7 @@ typedef uint64_t l4_pgentry_64_t;
#define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
#define NRPAGES(x) (ROUNDUP(x, PAGE_SHIFT) >> PAGE_SHIFT)
+#define VNUMA_NO_NODE ~((unsigned int)0)
/* XXX SMH: following skanky macros rely on variable p2m_size being set */
/* XXX TJD: also, "guest_width" should be the guest's sizeof(unsigned long) */
--
1.7.10.4
next prev parent reply other threads:[~2013-12-04 5:47 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-12-04 5:47 [PATCH v4 0/7] vNUMA introduction Elena Ufimtseva
2013-12-04 5:47 ` [PATCH v4 1/7] xen: vNUMA support for PV guests Elena Ufimtseva
2013-12-04 11:34 ` Jan Beulich
2013-12-04 18:02 ` Elena Ufimtseva
2013-12-04 5:47 ` [PATCH v4 2/7] libxc: Plumb Xen with vNUMA topology for domain Elena Ufimtseva
2013-12-16 19:16 ` Konrad Rzeszutek Wilk
2013-12-04 5:47 ` [PATCH v4 3/7] xl: vnuma memory parsing and supplement functions Elena Ufimtseva
2013-12-16 19:57 ` Konrad Rzeszutek Wilk
2013-12-04 5:47 ` [PATCH v4 4/7] xl: vnuma distance, vcpu and pnode masks parser Elena Ufimtseva
2013-12-04 5:47 ` Elena Ufimtseva [this message]
2013-12-04 5:47 ` [PATCH v4 6/7] libxl: vNUMA supporting interface Elena Ufimtseva
2013-12-04 5:47 ` [PATCH v4 7/7] xen: adds vNUMA info debug-key u Elena Ufimtseva
2013-12-04 11:23 ` Jan Beulich
2014-02-13 12:49 ` [PATCH v4 0/7] vNUMA introduction Li Yechen
2014-02-13 16:26 ` Elena Ufimtseva
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1386136035-19544-6-git-send-email-ufimtseva@gmail.com \
--to=ufimtseva@gmail.com \
--cc=Ian.Campbell@citrix.com \
--cc=JBeulich@suse.com \
--cc=dario.faggioli@citrix.com \
--cc=george.dunlap@eu.citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=keir@xen.org \
--cc=lccycc123@gmail.com \
--cc=msw@linux.com \
--cc=stefano.stabellini@eu.citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).