xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: Elena Ufimtseva <ufimtseva@gmail.com>
To: xen-devel@lists.xen.org
Cc: keir@xen.org, Ian.Campbell@citrix.com,
	stefano.stabellini@eu.citrix.com, george.dunlap@eu.citrix.com,
	msw@linux.com, dario.faggioli@citrix.com, lccycc123@gmail.com,
	ian.jackson@eu.citrix.com, JBeulich@suse.com,
	Elena Ufimtseva <ufimtseva@gmail.com>
Subject: [PATCH v3 4/7] libxl: vNUMA supporting interface
Date: Mon, 18 Nov 2013 15:24:19 -0500	[thread overview]
Message-ID: <1384806262-12532-5-git-send-email-ufimtseva@gmail.com> (raw)
In-Reply-To: <1384806262-12532-1-git-send-email-ufimtseva@gmail.com>

* Checks and sets if incorrect user defined map for physical
NUMA nodes allocation. If fails, tries use automatic NUMA placement
machanism, otherwise falls to default, not bound to any nodes,
allocation. If user define allocation map can be used, disables
automatic numa placement.

* Verifies the correctness of memory ranges pfns for PV guest
by requesting the e820 map for that domain;

* Provides vNUMA topology information to Xen about vNUMA topology
and allocation map used for vnodes;

Signed-off-by: Elena Ufimtseva <ufimtseva@gmail.com>

Changes since v2:
*   Added vnuma memory pfn alignment which takes into
account e820_host option and non-contiguous e820 memory map
in that case;
---
 tools/libxl/libxl.c          |   20 ++++
 tools/libxl/libxl.h          |   20 ++++
 tools/libxl/libxl_arch.h     |   13 +++
 tools/libxl/libxl_dom.c      |  210 ++++++++++++++++++++++++++++++++++++++++++
 tools/libxl/libxl_internal.h |    4 +
 tools/libxl/libxl_types.idl  |    6 +-
 tools/libxl/libxl_vnuma.h    |   12 +++
 tools/libxl/libxl_x86.c      |  123 +++++++++++++++++++++++++
 8 files changed, 407 insertions(+), 1 deletion(-)
 create mode 100644 tools/libxl/libxl_vnuma.h

diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index 0de1112..2bd3653 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -4324,6 +4324,26 @@ static int libxl__set_vcpuonline_qmp(libxl__gc *gc, uint32_t domid,
     return 0;
 }
 
+int libxl_domain_setvnuma(libxl_ctx *ctx,
+                            uint32_t domid,
+                            uint16_t nr_vnodes,
+                            uint16_t nr_vcpus,
+                            vmemrange_t *vmemrange,
+                            unsigned int *vdistance,
+                            unsigned int *vcpu_to_vnode,
+                            unsigned int *vnode_to_pnode)
+{
+    GC_INIT(ctx);
+    int ret;
+    ret = xc_domain_setvnuma(ctx->xch, domid, nr_vnodes,
+                                nr_vcpus, vmemrange,
+                                vdistance, 
+                                vcpu_to_vnode,
+                                vnode_to_pnode);
+    GC_FREE;
+    return ret;
+}
+
 int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_bitmap *cpumap)
 {
     GC_INIT(ctx);
diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
index c7dceda..58aed8f 100644
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -281,11 +281,14 @@
 #include <netinet/in.h>
 #include <sys/wait.h> /* for pid_t */
 
+#include <xen/memory.h>
 #include <xentoollog.h>
 
 #include <libxl_uuid.h>
 #include <_libxl_list.h>
 
+#include <xen/vnuma.h>
+
 /* API compatibility. */
 #ifdef LIBXL_API_VERSION
 #if LIBXL_API_VERSION != 0x040200 && LIBXL_API_VERSION != 0x040300 && \
@@ -382,6 +385,14 @@
 #define LIBXL_EXTERNAL_CALLERS_ONLY /* disappears for callers outside libxl */
 #endif
 
+/*
+ * LIBXL_HAVE_BUILDINFO_VNUMA indicates that vnuma topology will be
+ * build for the guest upon request and with VM configuration.
+ * It will try to define best allocation for vNUMA
+ * nodes on real NUMA nodes.
+ */
+#define LIBXL_HAVE_BUILDINFO_VNUMA 1
+
 typedef uint8_t libxl_mac[6];
 #define LIBXL_MAC_FMT "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx"
 #define LIBXL_MAC_FMTLEN ((2*6)+5) /* 6 hex bytes plus 5 colons */
@@ -741,6 +752,15 @@ void libxl_vcpuinfo_list_free(libxl_vcpuinfo *, int nr_vcpus);
 void libxl_device_vtpm_list_free(libxl_device_vtpm*, int nr_vtpms);
 void libxl_vtpminfo_list_free(libxl_vtpminfo *, int nr_vtpms);
 
+int libxl_domain_setvnuma(libxl_ctx *ctx,
+                           uint32_t domid,
+                           uint16_t nr_vnodes,
+                           uint16_t nr_vcpus,
+                           vmemrange_t *vmemrange,
+                           unsigned int *vdistance,
+                           unsigned int *vcpu_to_vnode,
+                           unsigned int *vnode_to_pnode);
+
 /*
  * Devices
  * =======
diff --git a/tools/libxl/libxl_arch.h b/tools/libxl/libxl_arch.h
index abe6685..b95abab 100644
--- a/tools/libxl/libxl_arch.h
+++ b/tools/libxl/libxl_arch.h
@@ -19,4 +19,17 @@
 int libxl__arch_domain_create(libxl__gc *gc, libxl_domain_config *d_config,
                uint32_t domid);
 
+int libxl__vnuma_align_mem(libxl__gc *gc,
+                            uint32_t domid,
+                            struct libxl_domain_build_info *b_info,
+                            vmemrange_t *memblks); 
+
+unsigned long e820_memory_hole_size(unsigned long start,
+                                    unsigned long end,
+                                    struct e820entry e820[],
+                                    int nr);
+
+unsigned int libxl__vnodemap_is_usable(libxl__gc *gc,
+                                libxl_domain_build_info *info);
+
 #endif
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index a1c16b0..378249e 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -23,6 +23,7 @@
 #include <xc_dom.h>
 #include <xen/hvm/hvm_info_table.h>
 #include <xen/hvm/hvm_xs_strings.h>
+#include <libxl_vnuma.h>
 
 libxl_domain_type libxl__domain_type(libxl__gc *gc, uint32_t domid)
 {
@@ -201,6 +202,88 @@ static int numa_place_domain(libxl__gc *gc, uint32_t domid,
     return rc;
 }
 
+/* prepares vnode to pnode map for domain vNUMA memory allocation */
+int libxl__init_vnode_to_pnode(libxl__gc *gc, uint32_t domid,
+                        libxl_domain_build_info *info)
+{
+    int i, n, nr_nodes, rc;
+    uint64_t *mems;
+    unsigned long long *claim = NULL;
+    libxl_numainfo *ninfo = NULL;
+
+    rc = -1;
+    if (info->vnode_to_pnode == NULL) {
+        info->vnode_to_pnode = calloc(info->nr_vnodes,
+                                      sizeof(*info->vnode_to_pnode));
+        if (info->vnode_to_pnode == NULL)                                    
+            return rc; 
+    }
+
+    /* default setting */
+    for (i = 0; i < info->nr_vnodes; i++)
+        info->vnode_to_pnode[i] = VNUMA_NO_NODE;
+
+    /* Get NUMA info */
+    ninfo = libxl_get_numainfo(CTX, &nr_nodes);
+    if (ninfo == NULL || nr_nodes == 0) {
+        LOG(DEBUG, "No HW NUMA found.\n");
+        rc = 0;
+        goto vnmapout;
+    }
+
+    /* 
+     * check if we have any hardware NUMA nodes selected,
+     * otherwise VNUMA_NO_NODE set and used default allocation
+     */ 
+    if (libxl_bitmap_is_empty(&info->nodemap))
+        return 0;
+    mems = info->vnuma_memszs;
+    
+    /* check if all vnodes will fit in one node */
+    libxl_for_each_set_bit(n, info->nodemap) {
+        if(ninfo[n].free/1024 >= info->max_memkb  && 
+           libxl_bitmap_test(&info->nodemap, n))
+           {
+               /* 
+                * all domain v-nodes will fit one p-node n, 
+                * p-node n is a best candidate selected by automatic 
+                * NUMA placement.
+                */
+               for (i = 0; i < info->nr_vnodes; i++)
+                    info->vnode_to_pnode[i] = n;
+               /* we can exit, as we are happy with placement */
+               return 0;
+           }
+    }
+    /* Determine the best nodes to fit vNUMA nodes */
+    /* TODO: change algorithm. The current just fits the nodes
+     * Will be nice to have them also sorted by size 
+     * If no p-node found, will be set to NUMA_NO_NODE
+     */
+    claim = calloc(info->nr_vnodes, sizeof(*claim));
+    if (claim == NULL)
+        return rc;
+     
+    libxl_for_each_set_bit(n, info->nodemap)
+    {
+        for (i = 0; i < info->nr_vnodes; i++)
+        {
+            if (((claim[n] + (mems[i] << 20)) <= ninfo[n].free) &&
+                 /*vnode was not set yet */
+                 (info->vnode_to_pnode[i] == VNUMA_NO_NODE ) )
+            {
+                info->vnode_to_pnode[i] = n;
+                claim[n] += (mems[i] << 20);
+            }
+        }
+    }
+
+    rc = 0;
+ vnmapout:
+    if (claim) free(claim);
+    return rc;
+}
+
 int libxl__build_pre(libxl__gc *gc, uint32_t domid,
               libxl_domain_config *d_config, libxl__domain_build_state *state)
 {
@@ -235,6 +318,66 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid,
         if (rc)
             return rc;
     }
+
+    if (info->nr_vnodes > 0) {
+        /* The memory blocks will be formed here from sizes */
+        struct vmemrange *memrange = libxl__calloc(gc, info->nr_vnodes,
+                                                sizeof(*memrange));
+        if (memrange == NULL) {
+            LOG(DETAIL, "Failed to allocate memory for memory ranges.\n");
+            return ERROR_FAIL;
+        }
+
+        if (libxl__vnuma_align_mem(gc, domid, info, memrange) < 0) {
+            LOG(DETAIL, "Failed to align memory map.\n");
+            return ERROR_FAIL;
+        }
+
+        /* 
+        * If vNUMA vnode_to_pnode map defined, determine if we
+        * can disable automatic numa placement and place vnodes
+        * on specified pnodes.
+        * For now, if vcpu affinity specified, we will use 
+        * specified vnode to pnode map.
+        */
+        if (libxl__vnodemap_is_usable(gc, info) == 1) {
+
+            /* Will use user-defined vnode to pnode mask */
+        
+            libxl_defbool_set(&info->numa_placement, false);
+        }
+        else {
+            LOG(ERROR, "The allocation mask for vnuma nodes cannot be used.\n");
+            if (libxl_defbool_val(info->vnuma_placement)) {
+                
+                LOG(DETAIL, "Switching to automatic vnuma to pnuma placement\n.");
+                /* Construct the vnode to pnode mapping if possible */
+                if (libxl__init_vnode_to_pnode(gc, domid, info) < 0) {
+                    LOG(DETAIL, "Failed to call init_vnodemap\n");
+                    /* vnuma_nodemap will not be used if nr_vnodes == 0 */
+                    return ERROR_FAIL;
+                }
+            }
+            else {
+                LOG(ERROR, "The vnodes cannot be mapped to pnodes this way\n.");
+                info->nr_vnodes = 0;
+                return ERROR_FAIL;
+            }
+        }
+        /* plumb domain with vNUMA topology */
+        if (xc_domain_setvnuma(ctx->xch, domid, info->nr_vnodes,
+                                info->max_vcpus, memrange,
+                                info->vdistance, info->vcpu_to_vnode,
+                                info->vnode_to_pnode) < 0) {
+        
+           LOG(DETAIL, "Failed to set vnuma topology for domain from\n.");
+           info->nr_vnodes = 0;
+           return ERROR_FAIL;
+        }
+    }
+    else
+        LOG(DEBUG, "Will not construct vNUMA topology.\n");
+    
     libxl_domain_set_nodeaffinity(ctx, domid, &info->nodemap);
     libxl_set_vcpuaffinity_all(ctx, domid, info->max_vcpus, &info->cpumap);
 
@@ -256,6 +399,56 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid,
     return rc;
 }
 
+/* Checks if vnuma_nodemap defined in info can be used
+ * for allocation of vnodes on physical NUMA nodes by
+ * verifying that there is enough memory on corresponding
+ * NUMA nodes.
+ */
+unsigned int libxl__vnodemap_is_usable(libxl__gc *gc, libxl_domain_build_info *info)
+{
+    unsigned int i;
+    libxl_numainfo *ninfo = NULL;
+    unsigned long long *claim;
+    unsigned int node;
+    uint64_t *mems;
+    int rc, nr_nodes;
+
+    rc = nr_nodes = 0;
+    if (info->vnode_to_pnode == NULL || info->vnuma_memszs == NULL)
+        return rc;
+    /*
+     * Cannot use specified mapping if not NUMA machine 
+     */
+    ninfo = libxl_get_numainfo(CTX, &nr_nodes);
+    if (ninfo == NULL) {
+        return rc;   
+    }
+    mems = info->vnuma_memszs;   
+    claim = calloc(info->nr_vnodes, sizeof(*claim));
+    if (claim == NULL)
+        return rc;
+    /* Sum memory request on per pnode basis */ 
+    for (i = 0; i < info->nr_vnodes; i++)
+    {
+        node = info->vnode_to_pnode[i];
+        /* Correct pnode number? */
+        if (node < nr_nodes)
+            claim[node] += (mems[i] << 20);
+        else
+            goto vmapu;
+   }
+   for (i = 0; i < nr_nodes; i++) {
+       if (claim[i] > ninfo[i].free)
+          /* Cannot complete user request, falling to default */
+          goto vmapu;
+   }
+   rc = 1;
+
+ vmapu:
+   if(claim) free(claim);
+   return rc;
+}
+
 int libxl__build_post(libxl__gc *gc, uint32_t domid,
                       libxl_domain_build_info *info,
                       libxl__domain_build_state *state,
@@ -381,7 +574,24 @@ int libxl__build_pv(libxl__gc *gc, uint32_t domid,
             }
         }
     }
+    if ( info->nr_vnodes > 0 ) { 
+        dom->vnode_to_pnode = (unsigned int *)malloc(
+                                info->nr_vnodes * sizeof(*info->vnode_to_pnode));
+        dom->vnuma_memszs = (uint64_t *)malloc(
+                                info->nr_vnodes * sizeof(*info->vnuma_memszs));
 
+        if ( dom->vnuma_memszs == NULL || dom->vnode_to_pnode == NULL ) {
+            info->nr_vnodes = 0;
+            if (dom->vnode_to_pnode) free(dom->vnode_to_pnode);
+            if (dom->vnuma_memszs) free(dom->vnuma_memszs);
+            goto out;
+        }
+        memcpy(dom->vnuma_memszs, info->vnuma_memszs,
+               sizeof(*info->vnuma_memszs) * info->nr_vnodes);
+        memcpy(dom->vnode_to_pnode, info->vnode_to_pnode,
+               sizeof(*info->vnode_to_pnode) * info->nr_vnodes);
+        dom->nr_vnodes = info->nr_vnodes;
+    }
     dom->flags = flags;
     dom->console_evtchn = state->console_port;
     dom->console_domid = state->console_domid;
diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
index 23ff265..0df742b 100644
--- a/tools/libxl/libxl_internal.h
+++ b/tools/libxl/libxl_internal.h
@@ -2882,6 +2882,10 @@ void libxl__numa_candidate_put_nodemap(libxl__gc *gc,
     libxl_bitmap_copy(CTX, &cndt->nodemap, nodemap);
 }
 
+int libxl__init_vnode_to_pnode(libxl__gc *gc, uint32_t domid,
+                                libxl_domain_build_info *info);
+
+
 /*
  * Inserts "elm_new" into the sorted list "head".
  *
diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
index de5bac3..86ad14c 100644
--- a/tools/libxl/libxl_types.idl
+++ b/tools/libxl/libxl_types.idl
@@ -310,7 +310,11 @@ libxl_domain_build_info = Struct("domain_build_info",[
     ("disable_migrate", libxl_defbool),
     ("cpuid",           libxl_cpuid_policy_list),
     ("blkdev_start",    string),
-    
+    ("vnuma_memszs",    Array(uint64, "nr_vnodes")),
+    ("vcpu_to_vnode",   Array(uint32, "nr_vnodemap")),
+    ("vdistance",       Array(uint32, "nr_vdist")),
+    ("vnode_to_pnode",  Array(uint32, "nr_vnode_to_pnode")),
+    ("vnuma_placement", libxl_defbool),
     ("device_model_version", libxl_device_model_version),
     ("device_model_stubdomain", libxl_defbool),
     # if you set device_model you must set device_model_version too
diff --git a/tools/libxl/libxl_vnuma.h b/tools/libxl/libxl_vnuma.h
new file mode 100644
index 0000000..6034cd7
--- /dev/null
+++ b/tools/libxl/libxl_vnuma.h
@@ -0,0 +1,12 @@
+#include "libxl_osdeps.h" /* must come before any other headers */
+
+#define VNUMA_NO_NODE ~((unsigned int)0)
+
+/* 
+ * Max vNUMA node size in Mb is taken 64Mb even now Linux lets
+ * 32Mb, thus letting some slack. Will be modified to match Linux.
+ */
+#define MIN_VNODE_SIZE  64U
+
+#define MAX_VNUMA_NODES (unsigned int)1 << 10
+
diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c
index 87a8110..206f5be 100644
--- a/tools/libxl/libxl_x86.c
+++ b/tools/libxl/libxl_x86.c
@@ -310,3 +310,126 @@ int libxl__arch_domain_create(libxl__gc *gc, libxl_domain_config *d_config,
 
     return ret;
 }
+
+/*
+ * Checks for the beginnig and end of RAM in e820 map for domain
+ * and aligns start of first and end of last vNUMA memory block to
+ * that map. vnode memory size are passed here Megabytes.
+ * For PV guest e820 map has fixed hole sizes.
+ */
+int libxl__vnuma_align_mem(libxl__gc *gc,
+                            uint32_t domid,
+                            libxl_domain_build_info *b_info, /* IN: mem sizes */
+                            vmemrange_t *memblks)        /* OUT: linux numa blocks in pfn */
+{
+    int i, j, rc;
+    uint64_t next_start_pfn, end_max = 0, size, mem_hole;
+    uint32_t nr;
+    struct e820entry map[E820MAX];
+    
+    if (b_info->nr_vnodes == 0)
+        return -EINVAL;
+    libxl_ctx *ctx = libxl__gc_owner(gc);
+
+    /* retreive e820 map for this host */
+    rc = xc_get_machine_memory_map(ctx->xch, map, E820MAX);
+
+    if (rc < 0) {
+        errno = rc;
+        return -EINVAL;
+    }
+    nr = rc;
+    rc = e820_sanitize(ctx, map, &nr, b_info->target_memkb,
+                       (b_info->max_memkb - b_info->target_memkb) +
+                       b_info->u.pv.slack_memkb);
+    if (rc)
+    {   
+        errno = rc;
+        return -EINVAL;
+    }
+
+    /* max pfn for this host */
+    for (j = nr - 1; j >= 0; j--)
+        if (map[j].type == E820_RAM) {
+            end_max = map[j].addr + map[j].size;
+            break;
+        }
+    
+    memset(memblks, 0, sizeof(*memblks) * b_info->nr_vnodes);
+    next_start_pfn = 0;
+
+    memblks[0].start = map[0].addr;
+
+    for(i = 0; i < b_info->nr_vnodes; i++) {
+        /* start can be not zero */
+        memblks[i].start += next_start_pfn; 
+        memblks[i].end = memblks[i].start + (b_info->vnuma_memszs[i] << 20);
+        
+        size = memblks[i].end - memblks[i].start;
+        /*
+         * For pv host with e820_host option turned on we need
+         * to rake into account memory holes. For pv host with
+         * e820_host disabled or unset, the map is contiguous 
+         * RAM region.
+         */ 
+        if (libxl_defbool_val(b_info->u.pv.e820_host)) {
+            while (mem_hole = e820_memory_hole_size(memblks[i].start,
+                                                 memblks[i].end, map, nr),
+                    memblks[i].end - memblks[i].start - mem_hole < size)
+            {
+                memblks[i].end += mem_hole;
+
+                if (memblks[i].end > end_max) {
+                    memblks[i].end = end_max;
+                    break;
+                }
+            }
+        }
+        next_start_pfn = memblks[i].end;
+    }
+    
+    if (memblks[i-1].end > end_max)
+        memblks[i-1].end = end_max;
+
+    return 0;
+}
+
+/* 
+ * Used for PV guest with e802_host enabled and thus
+ * having non-contiguous e820 memory map.
+ */
+unsigned long e820_memory_hole_size(unsigned long start,
+                                    unsigned long end,
+                                    struct e820entry e820[],
+                                    int nr)
+{
+    int i;
+    unsigned long absent, start_pfn, end_pfn;
+
+    absent = end - start;
+    for(i = 0; i < nr; i++) {
+        /* if not E820_RAM region, skip it and dont substract from absent */
+        if(e820[i].type == E820_RAM) {
+            start_pfn = e820[i].addr;
+            end_pfn =   e820[i].addr + e820[i].size;
+            /* beginning pfn is in this region? */
+            if (start >= start_pfn && start <= end_pfn) {
+                if (end > end_pfn)
+                    absent -= end_pfn - start;
+                else
+                    /* fit the region? then no absent pages */
+                    absent -= end - start;
+                continue;
+            }
+            /* found the end of range in this region? */
+            if (end <= end_pfn && end >= start_pfn) {
+                absent -= end - start_pfn;
+                /* no need to look for more ranges */
+                break;
+            }
+        }
+    }
+    return absent;
+}
+
+
-- 
1.7.10.4

  parent reply	other threads:[~2013-11-18 20:24 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-11-18 20:24 [PATCH v3 0/7] vNUMA introduction Elena Ufimtseva
2013-11-18 20:24 ` [PATCH v3 1/7] xen: vNUMA support for PV guests Elena Ufimtseva
2013-11-19  7:41   ` Dario Faggioli
2013-11-19 14:01   ` Jan Beulich
2013-11-19 14:35     ` Dario Faggioli
2013-11-19 14:48       ` Jan Beulich
2013-11-19 15:42         ` Dario Faggioli
2013-11-19 15:54           ` Jan Beulich
2013-11-19 16:36             ` Dario Faggioli
2013-11-19 16:43               ` Jan Beulich
2013-11-26 21:59                 ` Elena Ufimtseva
2013-11-27  1:23                   ` Dario Faggioli
2013-11-27  8:14                   ` Jan Beulich
2013-12-02 17:06                     ` Elena Ufimtseva
2013-12-02 17:09                       ` Jan Beulich
2013-12-02 17:27                         ` Elena Ufimtseva
2013-11-18 20:24 ` [PATCH v3 2/7] libxc: Plumb Xen with vNUMA topology for domain Elena Ufimtseva
2013-11-19  8:37   ` Dario Faggioli
2013-11-19 14:03     ` Konrad Rzeszutek Wilk
2013-11-19 22:06       ` Elena Ufimtseva
2013-11-18 20:24 ` [PATCH v3 3/7] libxc: vnodes allocation on NUMA nodes Elena Ufimtseva
2013-11-19 14:22   ` Dario Faggioli
2013-11-18 20:24 ` Elena Ufimtseva [this message]
2013-11-19 18:37   ` [PATCH v3 4/7] libxl: vNUMA supporting interface Dario Faggioli
2013-11-21  9:59     ` Li Yechen
2013-11-26 22:14       ` Elena Ufimtseva
2013-11-26 23:21         ` Dario Faggioli
2013-12-02 18:14     ` Elena Ufimtseva
2013-11-18 20:24 ` [PATCH v3 5/7] libxl: vNUMA configuration parser Elena Ufimtseva
2013-11-19 17:20   ` Dario Faggioli
2013-11-20 22:48   ` Matthew Daley
2013-11-21  3:20     ` Elena Ufimtseva
2013-11-18 20:24 ` [PATCH v3 6/7] xen: adds vNUMA info debug-key u Elena Ufimtseva
2013-11-22 18:15   ` Dario Faggioli
2013-11-18 20:24 ` [PATCH v3 7/7] xl: docs for xl config vnuma options Elena Ufimtseva
2013-11-19 17:23   ` Dario Faggioli
2013-11-19 17:26     ` George Dunlap

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1384806262-12532-5-git-send-email-ufimtseva@gmail.com \
    --to=ufimtseva@gmail.com \
    --cc=Ian.Campbell@citrix.com \
    --cc=JBeulich@suse.com \
    --cc=dario.faggioli@citrix.com \
    --cc=george.dunlap@eu.citrix.com \
    --cc=ian.jackson@eu.citrix.com \
    --cc=keir@xen.org \
    --cc=lccycc123@gmail.com \
    --cc=msw@linux.com \
    --cc=stefano.stabellini@eu.citrix.com \
    --cc=xen-devel@lists.xen.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).