From: vijay.kilari@gmail.com
To: xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, wei.liu2@citrix.com,
George.Dunlap@eu.citrix.com, andrew.cooper3@citrix.com,
ian.jackson@eu.citrix.com, tim@xen.org, julien.grall@arm.com,
jbeulich@suse.com, Vijaya Kumar K <Vijaya.Kumar@cavium.com>
Subject: [RFC PATCH v2 22/25] ARM: NUMA: Extract proximity from SRAT table
Date: Tue, 28 Mar 2017 21:23:30 +0530 [thread overview]
Message-ID: <1490716413-19796-23-git-send-email-vijay.kilari@gmail.com> (raw)
In-Reply-To: <1490716413-19796-1-git-send-email-vijay.kilari@gmail.com>
From: Vijaya Kumar K <Vijaya.Kumar@cavium.com>
Register SRAT entry handler for type
ACPI_SRAT_TYPE_GICC_AFFINITY to parse SRAT table
and extract proximity for all CPU IDs.
Signed-off-by: Vijaya Kumar <Vijaya.Kumar@cavium.com>
---
xen/arch/arm/acpi/boot.c | 2 +
xen/arch/arm/numa/acpi_numa.c | 126 +++++++++++++++++++++++++++++++++++++++++-
xen/drivers/acpi/numa.c | 15 +++++
xen/include/acpi/actbl1.h | 17 +++++-
xen/include/asm-arm/numa.h | 9 +++
xen/include/xen/numa.h | 4 ++
6 files changed, 171 insertions(+), 2 deletions(-)
diff --git a/xen/arch/arm/acpi/boot.c b/xen/arch/arm/acpi/boot.c
index 889208a..835c44e 100644
--- a/xen/arch/arm/acpi/boot.c
+++ b/xen/arch/arm/acpi/boot.c
@@ -31,6 +31,7 @@
#include <acpi/actables.h>
#include <xen/mm.h>
#include <xen/device_tree.h>
+#include <xen/numa.h>
#include <asm/acpi.h>
#include <asm/smp.h>
@@ -117,6 +118,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
return;
}
+ numa_set_node(enabled_cpus, acpi_get_nodeid(mpidr));
/* map the logical cpu id to cpu MPIDR */
cpu_logical_map(enabled_cpus) = mpidr;
diff --git a/xen/arch/arm/numa/acpi_numa.c b/xen/arch/arm/numa/acpi_numa.c
index 6fd937d..8f51ed0 100644
--- a/xen/arch/arm/numa/acpi_numa.c
+++ b/xen/arch/arm/numa/acpi_numa.c
@@ -28,19 +28,71 @@
#include <asm/page.h>
#include <asm/acpi.h>
+extern nodemask_t processor_nodes_parsed;
+
/* Holds CPUID to MPIDR mapping read from MADT table. */
struct cpuid_to_hwid {
uint32_t cpuid;
uint64_t hwid;
};
+/* Holds NODE to MPIDR mapping. */
+struct node_to_hwid {
+ nodeid_t nodeid;
+ uint64_t hwid;
+};
+
#define PHYS_CPUID_INVALID 0xff
/* Holds mapping of CPU id to MPIDR read from MADT */
static struct cpuid_to_hwid __read_mostly cpuid_to_hwid_map[NR_CPUS] =
{ [0 ... NR_CPUS - 1] = {PHYS_CPUID_INVALID, MPIDR_INVALID} };
+static struct node_to_hwid __read_mostly node_to_hwid_map[NR_CPUS] =
+ { [0 ... NR_CPUS - 1] = {NUMA_NO_NODE, MPIDR_INVALID} };
+static unsigned int cpus_in_srat;
static unsigned int num_cpuid_to_hwid;
+nodeid_t __init acpi_get_nodeid(uint64_t hwid)
+{
+ unsigned int i;
+
+ for ( i = 0; i < cpus_in_srat; i++ )
+ {
+ if ( node_to_hwid_map[i].hwid == hwid )
+ return node_to_hwid_map[i].nodeid;
+ }
+
+ return NUMA_NO_NODE;
+}
+
+static uint64_t acpi_get_cpu_hwid(int cid)
+{
+ unsigned int i;
+
+ for ( i = 0; i < num_cpuid_to_hwid; i++ )
+ {
+ if ( cpuid_to_hwid_map[i].cpuid == cid )
+ return cpuid_to_hwid_map[i].hwid;
+ }
+
+ return MPIDR_INVALID;
+}
+
+static void __init acpi_map_node_to_hwid(nodeid_t nodeid, uint64_t hwid)
+{
+ if ( nodeid >= MAX_NUMNODES )
+ {
+ printk(XENLOG_WARNING
+ "ACPI: NUMA: nodeid out of range %d with MPIDR 0x%lx\n",
+ nodeid, hwid);
+ numa_failed();
+ return;
+ }
+
+ node_to_hwid_map[cpus_in_srat].nodeid = nodeid;
+ node_to_hwid_map[cpus_in_srat].hwid = hwid;
+}
+
static void __init acpi_map_cpu_to_hwid(uint32_t cpuid, uint64_t mpidr)
{
if ( mpidr == MPIDR_INVALID )
@@ -76,15 +128,87 @@ static int __init acpi_parse_madt_handler(struct acpi_subtable_header *header,
return 0;
}
+/* Callback for Proximity Domain -> ACPI processor UID mapping */
+static void __init
+acpi_numa_gicc_affinity_init(const struct acpi_srat_gicc_affinity *pa)
+{
+ int pxm, node;
+ uint64_t mpidr;
+
+ if ( srat_disabled() )
+ return;
+
+ if ( pa->header.length < sizeof(struct acpi_srat_gicc_affinity) )
+ {
+ printk(XENLOG_WARNING "SRAT: Invalid SRAT header length: %d\n",
+ pa->header.length);
+ numa_failed();
+ return;
+ }
+
+ if ( !(pa->flags & ACPI_SRAT_GICC_ENABLED) )
+ return;
+
+ if ( cpus_in_srat >= NR_CPUS )
+ {
+ printk(XENLOG_ERR
+ "SRAT: cpu_to_node_map[%d] is too small to fit all cpus\n",
+ NR_CPUS);
+ return;
+ }
+
+ pxm = pa->proximity_domain;
+ node = acpi_setup_node(pxm);
+ if ( node == NUMA_NO_NODE )
+ {
+ numa_failed();
+ return;
+ }
+
+ mpidr = acpi_get_cpu_hwid(pa->acpi_processor_uid);
+ if ( mpidr == MPIDR_INVALID )
+ {
+ printk(XENLOG_ERR
+ "SRAT: PXM %d with ACPI ID %d has no valid MPIDR in MADT\n",
+ pxm, pa->acpi_processor_uid);
+ numa_failed();
+ return;
+ }
+
+ acpi_map_node_to_hwid(node, mpidr);
+ node_set(node, processor_nodes_parsed);
+ cpus_in_srat++;
+ set_acpi_numa(1);
+ printk(XENLOG_INFO "SRAT: PXM %d -> MPIDR 0x%lx -> Node %d\n",
+ pxm, mpidr, node);
+}
+
void __init acpi_map_uid_to_mpidr(void)
{
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
acpi_parse_madt_handler, NR_CPUS);
}
+static int __init
+acpi_parse_gicc_affinity(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ const struct acpi_srat_gicc_affinity *processor_affinity
+ = (struct acpi_srat_gicc_affinity *)header;
+
+ if (!processor_affinity)
+ return -EINVAL;
+
+ acpi_table_print_srat_entry(header);
+ acpi_numa_gicc_affinity_init(processor_affinity);
+
+ return 0;
+}
+
void __init arch_table_parse_srat(void)
{
- return;
+ acpi_table_parse_srat(ACPI_SRAT_TYPE_GICC_AFFINITY,
+ acpi_parse_gicc_affinity, NR_CPUS);
}
void __init acpi_numa_arch_fixup(void) {}
diff --git a/xen/drivers/acpi/numa.c b/xen/drivers/acpi/numa.c
index 0adc32c..b48d91d 100644
--- a/xen/drivers/acpi/numa.c
+++ b/xen/drivers/acpi/numa.c
@@ -104,6 +104,21 @@ void __init acpi_table_print_srat_entry(struct acpi_subtable_header * header)
}
#endif /* ACPI_DEBUG_OUTPUT */
break;
+ case ACPI_SRAT_TYPE_GICC_AFFINITY:
+#ifdef ACPI_DEBUG_OUTPUT
+ {
+ struct acpi_srat_gicc_affinity *p =
+ (struct acpi_srat_gicc_affinity *)header;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "SRAT Processor (acpi id[0x%04x]) in"
+ " proximity domain %d %s\n",
+ p->acpi_processor_uid,
+ p->proximity_domain,
+ (p->flags & ACPI_SRAT_GICC_ENABLED) ?
+ "enabled" : "disabled");
+ }
+#endif /* ACPI_DEBUG_OUTPUT */
+ break;
default:
printk(KERN_WARNING PREFIX
"Found unsupported SRAT entry (type = %#x)\n",
diff --git a/xen/include/acpi/actbl1.h b/xen/include/acpi/actbl1.h
index e199136..b84bfba 100644
--- a/xen/include/acpi/actbl1.h
+++ b/xen/include/acpi/actbl1.h
@@ -949,7 +949,8 @@ enum acpi_srat_type {
ACPI_SRAT_TYPE_CPU_AFFINITY = 0,
ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1,
ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2,
- ACPI_SRAT_TYPE_RESERVED = 3 /* 3 and greater are reserved */
+ ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
+ ACPI_SRAT_TYPE_RESERVED = 4 /* 4 and greater are reserved */
};
/*
@@ -1007,6 +1008,20 @@ struct acpi_srat_x2apic_cpu_affinity {
#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */
+/* 3: GICC Affinity (ACPI 5.1) */
+
+struct acpi_srat_gicc_affinity {
+ struct acpi_subtable_header header;
+ u32 proximity_domain;
+ u32 acpi_processor_uid;
+ u32 flags;
+ u32 clock_domain;
+};
+
+/* Flags for struct acpi_srat_gicc_affinity */
+
+#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */
+
/* Reset to default packing */
#pragma pack()
diff --git a/xen/include/asm-arm/numa.h b/xen/include/asm-arm/numa.h
index 85fbbe8..1d4dc98 100644
--- a/xen/include/asm-arm/numa.h
+++ b/xen/include/asm-arm/numa.h
@@ -13,6 +13,15 @@ extern void dt_numa_process_memory_node(uint32_t nid,paddr_t start,
extern void register_node_distance(uint8_t (fn)(nodeid_t a, nodeid_t b));
extern void init_dt_numa_distance(void);
extern uint8_t __node_distance(nodeid_t a, nodeid_t b);
+#ifdef CONFIG_ACPI_NUMA
+nodeid_t acpi_get_nodeid(uint64_t hwid);
+#else
+static inline nodeid_t acpi_get_nodeid(uint64_t hwid)
+{
+ return 0;
+}
+#endif /* CONFIG_ACPI_NUMA */
+
#ifdef CONFIG_NUMA
extern void numa_init(void);
extern int dt_numa_init(void);
diff --git a/xen/include/xen/numa.h b/xen/include/xen/numa.h
index 851f4a7..c3b4adc 100644
--- a/xen/include/xen/numa.h
+++ b/xen/include/xen/numa.h
@@ -24,7 +24,11 @@ extern int compute_memnode_shift(struct node *nodes, int numnodes,
nodeid_t *nodeids, unsigned int *shift);
extern void numa_init_array(void);
extern bool_t srat_disabled(void);
+#ifdef CONFIG_NUMA
extern void numa_set_node(int cpu, nodeid_t node);
+#else
+static inline void numa_set_node(int cpu, nodeid_t node) { }
+#endif
extern void srat_detect_node(int cpu);
extern void setup_node_bootmem(nodeid_t nodeid, paddr_t start, paddr_t end);
extern void init_cpu_to_node(void);
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-03-28 15:53 UTC|newest]
Thread overview: 71+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-03-28 15:53 [RFC PATCH v2 00/25] ARM: Add Xen NUMA support vijay.kilari
2017-03-28 15:53 ` [RFC PATCH v2 01/25] x86: NUMA: Clean up: Drop trailing spaces vijay.kilari
2017-03-28 16:44 ` Wei Liu
2017-05-31 10:20 ` Jan Beulich
2017-05-31 10:21 ` Jan Beulich
2017-03-28 15:53 ` [RFC PATCH v2 02/25] x86: NUMA: Fix datatypes and attributes vijay.kilari
2017-03-28 16:44 ` Wei Liu
2017-05-31 10:35 ` Jan Beulich
2017-03-28 15:53 ` [RFC PATCH v2 03/25] x86: NUMA: Rename and sanitize some common functions vijay.kilari
2017-06-30 14:05 ` Jan Beulich
2017-07-11 10:16 ` Vijay Kilari
2017-03-28 15:53 ` [RFC PATCH v2 04/25] x86: NUMA: Add accessors for acpi_numa, numa_off and numa_fake variables vijay.kilari
2017-04-20 15:59 ` Julien Grall
2017-04-25 6:54 ` Vijay Kilari
2017-04-25 12:04 ` Julien Grall
2017-04-25 12:20 ` Vijay Kilari
2017-04-25 12:28 ` Julien Grall
2017-04-25 14:54 ` Vijay Kilari
2017-04-25 15:14 ` Julien Grall
2017-04-25 15:43 ` Jan Beulich
2017-05-02 9:47 ` Vijay Kilari
2017-05-02 9:54 ` Jan Beulich
2017-05-08 17:38 ` Julien Grall
2017-06-30 14:07 ` Jan Beulich
2017-03-28 15:53 ` [RFC PATCH v2 05/25] x86: NUMA: Move generic dummy_numa_init to separate function vijay.kilari
2017-04-20 16:12 ` Julien Grall
2017-04-25 6:59 ` Vijay Kilari
2017-06-30 14:08 ` Jan Beulich
2017-03-28 15:53 ` [RFC PATCH v2 06/25] x86: NUMA: Add accessors for nodes[] and node_memblk_range[] structs vijay.kilari
2017-05-08 14:39 ` Julien Grall
2017-05-09 7:02 ` Vijay Kilari
2017-05-09 8:13 ` Julien Grall
2017-03-28 15:53 ` [RFC PATCH v2 07/25] x86: NUMA: Rename some generic functions vijay.kilari
2017-03-28 15:53 ` [RFC PATCH v2 08/25] x86: NUMA: Sanitize node distance vijay.kilari
2017-03-28 15:53 ` [RFC PATCH v2 09/25] ARM: NUMA: Add existing ARM numa code under CONFIG_NUMA vijay.kilari
2017-05-08 15:58 ` Julien Grall
2017-05-09 7:14 ` Vijay Kilari
2017-05-09 8:21 ` Julien Grall
2017-03-28 15:53 ` [RFC PATCH v2 10/25] x86: NUMA: Move numa code and make it generic vijay.kilari
2017-05-08 16:41 ` Julien Grall
2017-05-09 7:36 ` Vijay Kilari
2017-05-09 8:23 ` Julien Grall
2017-05-08 16:51 ` Julien Grall
2017-05-09 7:39 ` Vijay Kilari
2017-05-09 8:26 ` Julien Grall
2017-03-28 15:53 ` [RFC PATCH v2 11/25] x86: NUMA: Move common code from srat.c vijay.kilari
2017-05-08 17:06 ` Julien Grall
2017-05-10 9:00 ` Vijay Kilari
2017-03-28 15:53 ` [RFC PATCH v2 12/25] ARM: NUMA: Parse CPU NUMA information vijay.kilari
2017-05-08 17:31 ` Julien Grall
2017-05-10 5:24 ` Vijay Kilari
2017-05-10 8:52 ` Julien Grall
2017-03-28 15:53 ` [RFC PATCH v2 13/25] ARM: NUMA: Parse memory " vijay.kilari
2017-03-28 15:53 ` [RFC PATCH v2 14/25] ARM: NUMA: Parse NUMA distance information vijay.kilari
2017-03-28 15:53 ` [RFC PATCH v2 15/25] ARM: NUMA: Add CPU NUMA support vijay.kilari
2017-03-28 15:53 ` [RFC PATCH v2 16/25] ARM: NUMA: Add memory " vijay.kilari
2017-03-28 15:53 ` [RFC PATCH v2 17/25] ARM: NUMA: Add fallback on NUMA failure vijay.kilari
2017-03-28 15:53 ` [RFC PATCH v2 18/25] ARM: NUMA: Do not expose numa info to DOM0 vijay.kilari
2017-03-28 15:53 ` [RFC PATCH v2 19/25] ACPI: Refactor acpi SRAT and SLIT table handling code vijay.kilari
2017-03-28 15:53 ` [RFC PATCH v2 20/25] ARM: NUMA: Extract MPIDR from MADT table vijay.kilari
2017-03-28 15:53 ` [RFC PATCH v2 21/25] ACPI: Move arch specific SRAT parsing vijay.kilari
2017-03-28 15:53 ` vijay.kilari [this message]
2017-03-28 15:53 ` [RFC PATCH v2 23/25] ARM: NUMA: Initialize ACPI NUMA vijay.kilari
2017-03-28 15:53 ` [RFC PATCH v2 24/25] NUMA: Move CONFIG_NUMA to common Kconfig vijay.kilari
2017-05-31 10:04 ` Jan Beulich
2017-05-31 10:18 ` Julien Grall
2017-05-31 10:37 ` Jan Beulich
2017-06-15 7:52 ` Vijay Kilari
2017-06-15 9:00 ` Julien Grall
2017-03-28 15:53 ` [RFC PATCH v2 25/25] NUMA: Enable ACPI_NUMA config vijay.kilari
2017-05-31 10:05 ` Jan Beulich
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1490716413-19796-23-git-send-email-vijay.kilari@gmail.com \
--to=vijay.kilari@gmail.com \
--cc=George.Dunlap@eu.citrix.com \
--cc=Vijaya.Kumar@cavium.com \
--cc=andrew.cooper3@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=julien.grall@arm.com \
--cc=sstabellini@kernel.org \
--cc=tim@xen.org \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).