xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Elena Ufimtseva <ufimtseva@gmail.com>
To: xen-devel@lists.xen.org
Cc: keir@xen.org, Ian.Campbell@citrix.com,
	stefano.stabellini@eu.citrix.com, george.dunlap@eu.citrix.com,
	msw@linux.com, dario.faggioli@citrix.com, lccycc123@gmail.com,
	ian.jackson@eu.citrix.com, JBeulich@suse.com,
	Elena Ufimtseva <ufimtseva@gmail.com>
Subject: [PATCH v5 5/8] libxc: allocate domain vnuma nodes
Date: Tue,  3 Jun 2014 00:53:17 -0400	[thread overview]
Message-ID: <1401771200-11448-7-git-send-email-ufimtseva@gmail.com> (raw)
In-Reply-To: <1401771200-11448-1-git-send-email-ufimtseva@gmail.com>

vnuma-aware domain memory allocation based on built
vnode to pnode mask.
Every pv domain has at least one vnuma node
and the vnode to pnode will be taken into account.

Signed-off-by: Elena Ufimtseva <ufimtseva@gmail.com>
---
 tools/libxc/xc_dom.h     |   10 +++++++
 tools/libxc/xc_dom_x86.c |   69 ++++++++++++++++++++++++++++++++++++++--------
 tools/libxc/xg_private.h |    1 +
 3 files changed, 68 insertions(+), 12 deletions(-)

diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h
index c9af0ce..e628a0e 100644
--- a/tools/libxc/xc_dom.h
+++ b/tools/libxc/xc_dom.h
@@ -122,6 +122,15 @@ struct xc_dom_image {
     struct xc_dom_phys *phys_pages;
     int realmodearea_log;
 
+    /*
+     * vNUMA topology and memory allocation structure.
+     * Defines the way to allocate memory on per NUMA
+     * physical nodes that is defined by vnode_to_pnode.
+     */
+    uint32_t nr_nodes;
+    uint64_t *numa_memszs;
+    unsigned int *vnode_to_pnode;
+
     /* malloc memory pool */
     struct xc_dom_mem *memblocks;
 
@@ -377,6 +386,7 @@ static inline xen_pfn_t xc_dom_p2m_guest(struct xc_dom_image *dom,
 int arch_setup_meminit(struct xc_dom_image *dom);
 int arch_setup_bootearly(struct xc_dom_image *dom);
 int arch_setup_bootlate(struct xc_dom_image *dom);
+int arch_boot_numa_alloc(struct xc_dom_image *dom);
 
 /*
  * Local variables:
diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
index e034d62..1992dfd 100644
--- a/tools/libxc/xc_dom_x86.c
+++ b/tools/libxc/xc_dom_x86.c
@@ -759,7 +759,7 @@ static int x86_shadow(xc_interface *xch, domid_t domid)
 int arch_setup_meminit(struct xc_dom_image *dom)
 {
     int rc;
-    xen_pfn_t pfn, allocsz, i, j, mfn;
+    xen_pfn_t pfn, i, j, mfn;
 
     rc = x86_compat(dom->xch, dom->guest_domid, dom->guest_type);
     if ( rc )
@@ -802,34 +802,79 @@ int arch_setup_meminit(struct xc_dom_image *dom)
     else
     {
         /* try to claim pages for early warning of insufficient memory avail */
+        rc = 0;
         if ( dom->claim_enabled ) {
             rc = xc_domain_claim_pages(dom->xch, dom->guest_domid,
                                        dom->total_pages);
             if ( rc )
+            {
+                xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+                             "%s: Failed to claim mem for dom\n",
+                             __FUNCTION__);
                 return rc;
+            }
         }
         /* setup initial p2m */
         for ( pfn = 0; pfn < dom->total_pages; pfn++ )
             dom->p2m_host[pfn] = pfn;
         
         /* allocate guest memory */
-        for ( i = rc = allocsz = 0;
-              (i < dom->total_pages) && !rc;
-              i += allocsz )
-        {
-            allocsz = dom->total_pages - i;
-            if ( allocsz > 1024*1024 )
-                allocsz = 1024*1024;
-            rc = xc_domain_populate_physmap_exact(
-                dom->xch, dom->guest_domid, allocsz,
-                0, 0, &dom->p2m_host[i]);
-        }
+        rc = arch_boot_numa_alloc(dom);
+        if ( rc )
+            return rc;
 
         /* Ensure no unclaimed pages are left unused.
          * OK to call if hadn't done the earlier claim call. */
         (void)xc_domain_claim_pages(dom->xch, dom->guest_domid,
                                     0 /* cancels the claim */);
     }
+    return rc;
+}
+
+/*
+ * Any pv guest will have at least one vnuma node
+ * with vnuma_memszs[0] = domain memory and the rest
+ * topology initialized with default values.
+ */
+int arch_boot_numa_alloc(struct xc_dom_image *dom)
+{
+    int rc;
+    unsigned int n, memflags;
+    unsigned long long vnode_pages;
+    unsigned long long allocsz = 0, node_pfn_base, i;
+
+    rc = allocsz = node_pfn_base = 0;
+
+    allocsz = 0;
+    for ( n = 0; n < dom->nr_nodes; n++ )
+    {
+        memflags = 0;
+        if ( dom->vnode_to_pnode[n] != VNUMA_NO_NODE )
+        {
+            memflags |= XENMEMF_exact_node(dom->vnode_to_pnode[n]);
+            memflags |= XENMEMF_exact_node_request;
+        }
+        vnode_pages = (dom->numa_memszs[n] << 20) >> PAGE_SHIFT_X86;
+        for ( i = 0;
+            (i < vnode_pages) && !rc;
+                i += allocsz )
+        {
+            allocsz = vnode_pages - i;
+            if ( allocsz > 1024*1024 )
+                allocsz = 1024*1024;
+                rc = xc_domain_populate_physmap_exact(
+                                    dom->xch, dom->guest_domid, allocsz,
+                                    0, memflags, &dom->p2m_host[node_pfn_base + i]);
+        }
+        if ( rc )
+        {
+            xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
+                    "%s: Failed allocation of %Lu pages for vnode %d on pnode %d out of %lu\n",
+                    __FUNCTION__, vnode_pages, n, dom->vnode_to_pnode[n], dom->total_pages);
+            return rc;
+        }
+        node_pfn_base += i;
+    }
 
     return rc;
 }
diff --git a/tools/libxc/xg_private.h b/tools/libxc/xg_private.h
index f5755fd..15ee876 100644
--- a/tools/libxc/xg_private.h
+++ b/tools/libxc/xg_private.h
@@ -123,6 +123,7 @@ typedef uint64_t l4_pgentry_64_t;
 #define ROUNDUP(_x,_w) (((unsigned long)(_x)+(1UL<<(_w))-1) & ~((1UL<<(_w))-1))
 #define NRPAGES(x) (ROUNDUP(x, PAGE_SHIFT) >> PAGE_SHIFT)
 
+#define VNUMA_NO_NODE ~((unsigned int)0)
 
 /* XXX SMH: following skanky macros rely on variable p2m_size being set */
 /* XXX TJD: also, "guest_width" should be the guest's sizeof(unsigned long) */
-- 
1.7.10.4

  parent reply	other threads:[~2014-06-03  4:53 UTC|newest]

Thread overview: 15+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-06-03  4:53 [PATCH v5 0/8] vnuma introduction Elena Ufimtseva
2014-06-03  4:53 ` [PATCH v5 8/8] add vnuma info for debug-key Elena Ufimtseva
2014-06-03  9:04   ` Jan Beulich
2014-06-04  4:13     ` Elena Ufimtseva
2014-06-03  4:53 ` [PATCH v5 1/8] xen: vnuma topoplogy and subop hypercalls Elena Ufimtseva
2014-06-03  8:55   ` Jan Beulich
2014-06-03  4:53 ` [PATCH v5 2/8] libxc: Plumb Xen with vnuma topology Elena Ufimtseva
2014-06-03  4:53 ` [PATCH v5 3/8] vnuma xl.cfg.pod and idl config options Elena Ufimtseva
2014-06-03  4:53 ` [PATCH v5 4/8] vnuma topology parsing routines Elena Ufimtseva
2014-06-03  4:53 ` Elena Ufimtseva [this message]
2014-06-03  4:53 ` [PATCH v5 6/8] libxl: build e820 map for vnodes Elena Ufimtseva
2014-06-03  4:53 ` [PATCH v5 7/8] libxl: place vnuma domain nodes on numa nodes Elena Ufimtseva
2014-06-03  4:53 ` [PATCH v5 8/8] add vnuma info out on debug-key Elena Ufimtseva
2014-06-03 11:37 ` [PATCH v5 0/8] vnuma introduction Wei Liu
2014-06-04  4:05   ` Elena Ufimtseva

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1401771200-11448-7-git-send-email-ufimtseva@gmail.com \
    --to=ufimtseva@gmail.com \
    --cc=Ian.Campbell@citrix.com \
    --cc=JBeulich@suse.com \
    --cc=dario.faggioli@citrix.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=keir@xen.org \
    --cc=lccycc123@gmail.com \
    --cc=msw@linux.com \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).