xen-devel.lists.xenproject.org archive mirror
 help / color / mirror / Atom feed
From: vijay.kilari@gmail.com
To: julien.grall@arm.com, sstabellini@kernel.org,
	andre.przywara@arm.com, dario.faggioli@citrix.com
Cc: xen-devel@lists.xenproject.org, Vijaya Kumar K <Vijaya.Kumar@cavium.com>
Subject: [RFC PATCH v1 07/21] ARM: NUMA: Parse memory NUMA information
Date: Thu,  9 Feb 2017 21:26:59 +0530	[thread overview]
Message-ID: <1486655834-9708-8-git-send-email-vijay.kilari@gmail.com> (raw)
In-Reply-To: <1486655834-9708-1-git-send-email-vijay.kilari@gmail.com>

From: Vijaya Kumar K <Vijaya.Kumar@cavium.com>

Parse memory node and fetch numa-node-id information.
For each memory range, store in node_memblk_range[]
along with node id.

Signed-off-by: Vijaya Kumar K <Vijaya.Kumar@cavium.com>
---
 xen/arch/arm/bootfdt.c        |  4 +--
 xen/arch/arm/dt_numa.c        | 84 ++++++++++++++++++++++++++++++++++++++++++-
 xen/common/numa.c             |  8 +++++
 xen/include/xen/device_tree.h |  3 ++
 xen/include/xen/numa.h        |  1 +
 5 files changed, 97 insertions(+), 3 deletions(-)

diff --git a/xen/arch/arm/bootfdt.c b/xen/arch/arm/bootfdt.c
index d1122d8..5e2df92 100644
--- a/xen/arch/arm/bootfdt.c
+++ b/xen/arch/arm/bootfdt.c
@@ -56,8 +56,8 @@ static bool_t __init device_tree_node_compatible(const void *fdt, int node,
     return 0;
 }
 
-static void __init device_tree_get_reg(const __be32 **cell, u32 address_cells,
-                                       u32 size_cells, u64 *start, u64 *size)
+void __init device_tree_get_reg(const __be32 **cell, u32 address_cells,
+                                u32 size_cells, u64 *start, u64 *size)
 {
     *start = dt_next_cell(address_cells, cell);
     *size = dt_next_cell(size_cells, cell);
diff --git a/xen/arch/arm/dt_numa.c b/xen/arch/arm/dt_numa.c
index 4b94c36..fce9e67 100644
--- a/xen/arch/arm/dt_numa.c
+++ b/xen/arch/arm/dt_numa.c
@@ -27,6 +27,7 @@
 #include <xen/numa.h>
 
 nodemask_t numa_nodes_parsed;
+extern struct node node_memblk_range[NR_NODE_MEMBLKS];
 
 /*
  * Even though we connect cpus to numa domains later in SMP
@@ -48,11 +49,73 @@ static int __init dt_numa_process_cpu_node(const void *fdt, int node,
     return 0;
 }
 
+static int __init dt_numa_process_memory_node(const void *fdt, int node,
+                                              const char *name,
+                                              u32 address_cells,
+                                              u32 size_cells)
+{
+    const struct fdt_property *prop;
+    int i, ret, banks;
+    const __be32 *cell;
+    paddr_t start, size;
+    u32 reg_cells = address_cells + size_cells;
+    u32 nid;
+
+    if ( address_cells < 1 || size_cells < 1 )
+    {
+        printk(XENLOG_WARNING
+               "fdt: node `%s': invalid #address-cells or #size-cells", name);
+        return -EINVAL;
+    }
+
+    nid = device_tree_get_u32(fdt, node, "numa-node-id", MAX_NUMNODES);
+    if ( nid >= MAX_NUMNODES) {
+        /*
+         * No node id found. Skip this memory node.
+         */
+        return 0;
+    }
+
+    prop = fdt_get_property(fdt, node, "reg", NULL);
+    if ( !prop )
+    {
+        printk(XENLOG_WARNING "fdt: node `%s': missing `reg' property\n",
+               name);
+        return -EINVAL;
+    }
+
+    cell = (const __be32 *)prop->data;
+    banks = fdt32_to_cpu(prop->len) / (reg_cells * sizeof (u32));
+
+    for ( i = 0; i < banks; i++ )
+    {
+        device_tree_get_reg(&cell, address_cells, size_cells, &start, &size);
+        if ( !size )
+            continue;
+
+        /* It is fine to add this area to the nodes data it will be used later*/
+        ret = conflicting_memblks(start, start + size);
+        if (ret < 0)
+             numa_add_memblk(nid, start, size);
+        else
+        {
+             printk(XENLOG_ERR
+                    "NUMA DT: node %u (%"PRIx64"-%"PRIx64") overlaps with ret %d (%"PRIx64"-%"PRIx64")\n",
+                    nid, start, start + size, ret,
+                    node_memblk_range[i].start, node_memblk_range[i].end);
+             return -EINVAL;
+        }
+    }
+
+    node_set(nid, numa_nodes_parsed);
+
+    return 0;
+}
+
 static int __init dt_numa_scan_cpu_node(const void *fdt, int node,
                                         const char *name, int depth,
                                         u32 address_cells, u32 size_cells,
                                         void *data)
-
 {
     if ( device_tree_node_matches(fdt, node, "cpu") )
         return dt_numa_process_cpu_node(fdt, node, name, address_cells,
@@ -61,6 +124,18 @@ static int __init dt_numa_scan_cpu_node(const void *fdt, int node,
     return 0;
 }
 
+static int __init dt_numa_scan_memory_node(const void *fdt, int node,
+                                           const char *name, int depth,
+                                           u32 address_cells, u32 size_cells,
+                                           void *data)
+{
+    if ( device_tree_node_matches(fdt, node, "memory") )
+        return dt_numa_process_memory_node(fdt, node, name, address_cells,
+                                           size_cells);
+
+    return 0;
+}
+
 int __init dt_numa_init(void)
 {
     int ret;
@@ -68,5 +143,12 @@ int __init dt_numa_init(void)
     nodes_clear(numa_nodes_parsed);
     ret = device_tree_for_each_node((void *)device_tree_flattened,
                                     dt_numa_scan_cpu_node, NULL);
+
+    if ( ret )
+        return ret;
+
+    ret = device_tree_for_each_node((void *)device_tree_flattened,
+                                    dt_numa_scan_memory_node, NULL);
+
     return ret;
 }
diff --git a/xen/common/numa.c b/xen/common/numa.c
index 9b9cf9c..62c76af 100644
--- a/xen/common/numa.c
+++ b/xen/common/numa.c
@@ -55,6 +55,14 @@ struct node node_memblk_range[NR_NODE_MEMBLKS];
 nodeid_t memblk_nodeid[NR_NODE_MEMBLKS];
 struct node nodes[MAX_NUMNODES] __initdata;
 
+void __init numa_add_memblk(nodeid_t nodeid, u64 start, u64 size)
+{
+    node_memblk_range[num_node_memblks].start = start;
+    node_memblk_range[num_node_memblks].end = start + size;
+    memblk_nodeid[num_node_memblks] = nodeid;
+    num_node_memblks++;
+}
+
 int valid_numa_range(u64 start, u64 end, nodeid_t node)
 {
 #ifdef CONFIG_NUMA
diff --git a/xen/include/xen/device_tree.h b/xen/include/xen/device_tree.h
index de6b351..d92e47e 100644
--- a/xen/include/xen/device_tree.h
+++ b/xen/include/xen/device_tree.h
@@ -192,6 +192,9 @@ bool_t device_tree_node_matches(const void *fdt, int node,
                                 const char *match);
 u32 device_tree_get_u32(const void *fdt, int node,
                         const char *prop_name, u32 dflt);
+void device_tree_get_reg(const __be32 **cell, u32 address_cells,
+                         u32 size_cells, u64 *start, u64 *size);
+
 /**
  * dt_unflatten_host_device_tree - Unflatten the host device tree
  *
diff --git a/xen/include/xen/numa.h b/xen/include/xen/numa.h
index 77c5cfd..9392a89 100644
--- a/xen/include/xen/numa.h
+++ b/xen/include/xen/numa.h
@@ -67,6 +67,7 @@ static inline __attribute__((pure)) nodeid_t phys_to_nid(paddr_t addr)
 #define clear_node_cpumask(cpu) do {} while (0)
 #endif /* CONFIG_NUMA */
 
+extern void numa_add_memblk(nodeid_t nodeid, u64 start, u64 size);
 extern int valid_numa_range(u64 start, u64 end, nodeid_t node);
 extern int conflicting_memblks(u64 start, u64 end);
 extern void cutoff_node(int i, u64 start, u64 end);
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel

  parent reply	other threads:[~2017-02-09 15:59 UTC|newest]

Thread overview: 91+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-02-09 15:56 [RFC PATCH v1 00/21] ARM: Add Xen NUMA support vijay.kilari
2017-02-09 15:56 ` [RFC PATCH v1 01/21] ARM: NUMA: Add existing ARM numa code under CONFIG_NUMA vijay.kilari
2017-02-20 11:39   ` Julien Grall
2017-02-22  9:18     ` Vijay Kilari
2017-02-22 10:49       ` Julien Grall
2017-02-09 15:56 ` [RFC PATCH v1 02/21] x86: NUMA: Refactor NUMA code vijay.kilari
2017-02-09 16:11   ` Jan Beulich
2017-02-20 11:41     ` Julien Grall
2017-02-27 11:43     ` Vijay Kilari
2017-02-27 14:58       ` Jan Beulich
2017-02-20 12:37   ` Julien Grall
2017-02-22 10:04     ` Vijay Kilari
2017-02-22 10:55       ` Julien Grall
2017-02-09 15:56 ` [RFC PATCH v1 03/21] NUMA: Move arch specific NUMA code as common vijay.kilari
2017-02-09 16:15   ` Jan Beulich
2017-02-20 12:47   ` Julien Grall
2017-02-22 10:08     ` Vijay Kilari
2017-02-22 11:07       ` Julien Grall
2017-02-09 15:56 ` [RFC PATCH v1 04/21] NUMA: Refactor generic and arch specific code of numa_setup vijay.kilari
2017-02-20 13:39   ` Julien Grall
2017-02-22 10:27     ` Vijay Kilari
2017-02-22 11:09       ` Julien Grall
2017-02-09 15:56 ` [RFC PATCH v1 05/21] ARM: efi: Do not delete memory node from fdt vijay.kilari
2017-02-20 13:42   ` Julien Grall
2017-02-09 15:56 ` [RFC PATCH v1 06/21] ARM: NUMA: Parse CPU NUMA information vijay.kilari
2017-02-20 17:32   ` Julien Grall
2017-02-22 10:46     ` Vijay Kilari
2017-02-22 11:10       ` Julien Grall
2017-02-20 17:36   ` Julien Grall
2017-02-09 15:56 ` vijay.kilari [this message]
2017-02-20 18:05   ` [RFC PATCH v1 07/21] ARM: NUMA: Parse memory " Julien Grall
2017-03-02 12:25     ` Vijay Kilari
2017-03-02 14:48       ` Julien Grall
2017-03-02 15:08         ` Vijay Kilari
2017-03-02 15:19           ` Julien Grall
2017-02-09 15:57 ` [RFC PATCH v1 08/21] ARM: NUMA: Parse NUMA distance information vijay.kilari
2017-02-20 18:28   ` Julien Grall
2017-02-22 11:38     ` Vijay Kilari
2017-02-22 11:44       ` Julien Grall
2017-03-02 12:10         ` Vijay Kilari
2017-03-02 12:17           ` Julien Grall
2017-02-09 15:57 ` [RFC PATCH v1 09/21] ARM: NUMA: Add CPU NUMA support vijay.kilari
2017-02-20 18:32   ` Julien Grall
2017-02-09 15:57 ` [RFC PATCH v1 10/21] ARM: NUMA: Add memory " vijay.kilari
2017-03-02 16:05   ` Julien Grall
2017-03-02 16:23     ` Vijay Kilari
2017-02-09 15:57 ` [RFC PATCH v1 11/21] ARM: NUMA: Add fallback on NUMA failure vijay.kilari
2017-03-02 16:09   ` Julien Grall
2017-03-02 16:25     ` Vijay Kilari
2017-02-09 15:57 ` [RFC PATCH v1 12/21] ARM: NUMA: Do not expose numa info to DOM0 vijay.kilari
2017-02-20 18:36   ` Julien Grall
2017-03-02 12:30     ` Vijay Kilari
2017-02-09 15:57 ` [RFC PATCH v1 13/21] ACPI: Refactor acpi SRAT and SLIT table handling code vijay.kilari
2017-03-02 15:30   ` Julien Grall
2017-03-02 16:31     ` Vijay Kilari
2017-03-02 16:32       ` Julien Grall
2017-02-09 15:57 ` [RFC PATCH v1 14/21] ACPI: Move srat_disabled to common code vijay.kilari
2017-02-09 15:57 ` [RFC PATCH v1 15/21] ARM: NUMA: Extract MPIDR from MADT table vijay.kilari
2017-03-02 16:28   ` Julien Grall
2017-03-02 16:41     ` Vijay Kilari
2017-03-02 16:49       ` Julien Grall
2017-02-09 15:57 ` [RFC PATCH v1 16/21] ARM: NUMA: Extract proximity from SRAT table vijay.kilari
2017-03-02 17:21   ` Julien Grall
2017-03-03 12:39     ` Vijay Kilari
2017-03-03 13:44       ` Julien Grall
2017-03-03 13:50         ` Vijay Kilari
2017-03-03 13:52           ` Julien Grall
2017-03-03 14:45             ` Vijay Kilari
2017-03-03 14:52               ` Julien Grall
2017-03-03 15:16                 ` Vijay Kilari
2017-03-03 15:22                   ` Jan Beulich
2017-03-10 10:53                     ` Vijay Kilari
2017-02-09 15:57 ` [RFC PATCH v1 17/21] ARM: NUMA: Extract memory " vijay.kilari
2017-02-10 17:33   ` Konrad Rzeszutek Wilk
2017-02-10 17:35     ` Konrad Rzeszutek Wilk
2017-03-02 14:41       ` Vijay Kilari
2017-02-09 15:57 ` [RFC PATCH v1 18/21] ARM: NUMA: update node_distance with ACPI support vijay.kilari
2017-03-02 17:24   ` Julien Grall
2017-03-03 12:43     ` Vijay Kilari
2017-03-03 13:46       ` Julien Grall
2017-02-09 15:57 ` [RFC PATCH v1 19/21] ARM: NUMA: Initialize ACPI NUMA vijay.kilari
2017-03-02 17:25   ` Julien Grall
2017-03-03 12:44     ` Vijay Kilari
2017-02-09 15:57 ` [RFC PATCH v1 20/21] ARM: NUMA: Enable CONFIG_NUMA config vijay.kilari
2017-03-02 17:27   ` Julien Grall
2017-02-09 15:57 ` [RFC PATCH v1 21/21] ARM: NUMA: Enable CONFIG_ACPI_NUMA config vijay.kilari
2017-03-02 17:31   ` Julien Grall
2017-02-09 16:31 ` [RFC PATCH v1 00/21] ARM: Add Xen NUMA support Julien Grall
2017-02-09 16:59   ` Vijay Kilari
2017-02-10 17:30 ` Konrad Rzeszutek Wilk
2017-03-02 14:49   ` Vijay Kilari

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1486655834-9708-8-git-send-email-vijay.kilari@gmail.com \
    --to=vijay.kilari@gmail.com \
    --cc=Vijaya.Kumar@cavium.com \
    --cc=andre.przywara@arm.com \
    --cc=dario.faggioli@citrix.com \
    --cc=julien.grall@arm.com \
    --cc=sstabellini@kernel.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).