From: vijay.kilari@gmail.com
To: xen-devel@lists.xen.org
Cc: kevin.tian@intel.com, sstabellini@kernel.org,
wei.liu2@citrix.com, George.Dunlap@eu.citrix.com,
andrew.cooper3@citrix.com, dario.faggioli@citrix.com,
ian.jackson@eu.citrix.com, tim@xen.org, julien.grall@arm.com,
jbeulich@suse.com, Vijaya Kumar K <Vijaya.Kumar@cavium.com>
Subject: [RFC PATCH v3 21/24] ARM: NUMA: ACPI: Extract proximity from SRAT table
Date: Tue, 18 Jul 2017 17:11:43 +0530 [thread overview]
Message-ID: <1500378106-2620-22-git-send-email-vijay.kilari@gmail.com> (raw)
In-Reply-To: <1500378106-2620-1-git-send-email-vijay.kilari@gmail.com>
From: Vijaya Kumar K <Vijaya.Kumar@cavium.com>
Register SRAT entry handler for type
ACPI_SRAT_TYPE_GICC_AFFINITY to parse SRAT table
and extract proximity for all CPU IDs.
Signed-off-by: Vijaya Kumar <Vijaya.Kumar@cavium.com>
---
xen/arch/arm/acpi/boot.c | 2 +
xen/arch/arm/numa/acpi_numa.c | 124 +++++++++++++++++++++++++++++++++++++++++-
xen/drivers/acpi/numa.c | 15 +++++
xen/include/acpi/actbl1.h | 17 +++++-
xen/include/asm-arm/numa.h | 9 +++
5 files changed, 165 insertions(+), 2 deletions(-)
diff --git a/xen/arch/arm/acpi/boot.c b/xen/arch/arm/acpi/boot.c
index 889208a..4e28b16 100644
--- a/xen/arch/arm/acpi/boot.c
+++ b/xen/arch/arm/acpi/boot.c
@@ -31,6 +31,7 @@
#include <acpi/actables.h>
#include <xen/mm.h>
#include <xen/device_tree.h>
+#include <xen/numa.h>
#include <asm/acpi.h>
#include <asm/smp.h>
@@ -117,6 +118,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
return;
}
+ numa_set_cpu_node(enabled_cpus, acpi_get_nodeid(mpidr));
/* map the logical cpu id to cpu MPIDR */
cpu_logical_map(enabled_cpus) = mpidr;
diff --git a/xen/arch/arm/numa/acpi_numa.c b/xen/arch/arm/numa/acpi_numa.c
index 341e20b7..95617f9 100644
--- a/xen/arch/arm/numa/acpi_numa.c
+++ b/xen/arch/arm/numa/acpi_numa.c
@@ -34,13 +34,63 @@ struct cpuid_to_hwid {
uint64_t hwid;
};
+/* Holds NODE to MPIDR mapping. */
+struct node_to_hwid {
+ nodeid_t nodeid;
+ uint64_t hwid;
+};
+
#define PHYS_CPUID_INVALID 0xff
/* Holds mapping of CPU id to MPIDR read from MADT */
static struct cpuid_to_hwid __read_mostly cpuid_to_hwid_map[NR_CPUS] =
{ [0 ... NR_CPUS - 1] = {PHYS_CPUID_INVALID, MPIDR_INVALID} };
+static struct node_to_hwid __read_mostly node_to_hwid_map[NR_CPUS] =
+ { [0 ... NR_CPUS - 1] = {NUMA_NO_NODE, MPIDR_INVALID} };
+static unsigned int cpus_in_srat;
static unsigned int num_cpuid_to_hwid;
+nodeid_t __init acpi_get_nodeid(uint64_t hwid)
+{
+ unsigned int i;
+
+ for ( i = 0; i < cpus_in_srat; i++ )
+ {
+ if ( node_to_hwid_map[i].hwid == hwid )
+ return node_to_hwid_map[i].nodeid;
+ }
+
+ return NUMA_NO_NODE;
+}
+
+static uint64_t acpi_get_cpu_hwid(int cid)
+{
+ unsigned int i;
+
+ for ( i = 0; i < num_cpuid_to_hwid; i++ )
+ {
+ if ( cpuid_to_hwid_map[i].cpuid == cid )
+ return cpuid_to_hwid_map[i].hwid;
+ }
+
+ return MPIDR_INVALID;
+}
+
+static void __init acpi_map_node_to_hwid(nodeid_t nodeid, uint64_t hwid)
+{
+ if ( nodeid >= MAX_NUMNODES )
+ {
+ printk(XENLOG_WARNING
+ "ACPI: NUMA: nodeid out of range %d with MPIDR 0x%lx\n",
+ nodeid, hwid);
+ numa_failed();
+ return;
+ }
+
+ node_to_hwid_map[cpus_in_srat].nodeid = nodeid;
+ node_to_hwid_map[cpus_in_srat].hwid = hwid;
+}
+
static void __init acpi_map_cpu_to_hwid(uint32_t cpuid, uint64_t mpidr)
{
if ( mpidr == MPIDR_INVALID )
@@ -76,15 +126,87 @@ static int __init acpi_parse_madt_handler(struct acpi_subtable_header *header,
return 0;
}
+/* Callback for Proximity Domain -> ACPI processor UID mapping */
+static void __init
+acpi_numa_gicc_affinity_init(const struct acpi_srat_gicc_affinity *pa)
+{
+ int pxm, node;
+ uint64_t mpidr;
+
+ if ( srat_disabled() )
+ return;
+
+ if ( pa->header.length < sizeof(struct acpi_srat_gicc_affinity) )
+ {
+ printk(XENLOG_WARNING "SRAT: Invalid SRAT header length: %d\n",
+ pa->header.length);
+ numa_failed();
+ return;
+ }
+
+ if ( !(pa->flags & ACPI_SRAT_GICC_ENABLED) )
+ return;
+
+ if ( cpus_in_srat >= NR_CPUS )
+ {
+ printk(XENLOG_ERR
+ "SRAT: cpu_to_node_map[%d] is too small to fit all cpus\n",
+ NR_CPUS);
+ return;
+ }
+
+ pxm = pa->proximity_domain;
+ node = acpi_setup_node(pxm);
+ if ( node == NUMA_NO_NODE )
+ {
+ numa_failed();
+ return;
+ }
+
+ mpidr = acpi_get_cpu_hwid(pa->acpi_processor_uid);
+ if ( mpidr == MPIDR_INVALID )
+ {
+ printk(XENLOG_ERR
+ "SRAT: PXM %d with ACPI ID %d has no valid MPIDR in MADT\n",
+ pxm, pa->acpi_processor_uid);
+ numa_failed();
+ return;
+ }
+
+ acpi_map_node_to_hwid(node, mpidr);
+ node_set(node, processor_nodes_parsed);
+ cpus_in_srat++;
+ acpi_numa = 1;
+ printk(XENLOG_INFO "SRAT: PXM %d -> MPIDR 0x%lx -> Node %d\n",
+ pxm, mpidr, node);
+}
+
void __init acpi_map_uid_to_mpidr(void)
{
acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
acpi_parse_madt_handler, NR_CPUS);
}
+static int __init
+acpi_parse_gicc_affinity(struct acpi_subtable_header *header,
+ const unsigned long end)
+{
+ const struct acpi_srat_gicc_affinity *processor_affinity
+ = (struct acpi_srat_gicc_affinity *)header;
+
+ if (!processor_affinity)
+ return -EINVAL;
+
+ acpi_table_print_srat_entry(header);
+ acpi_numa_gicc_affinity_init(processor_affinity);
+
+ return 0;
+}
+
void __init arch_table_parse_srat(void)
{
- return;
+ acpi_table_parse_srat(ACPI_SRAT_TYPE_GICC_AFFINITY,
+ acpi_parse_gicc_affinity, NR_CPUS);
}
void __init acpi_numa_arch_fixup(void) {}
diff --git a/xen/drivers/acpi/numa.c b/xen/drivers/acpi/numa.c
index 0adc32c..b48d91d 100644
--- a/xen/drivers/acpi/numa.c
+++ b/xen/drivers/acpi/numa.c
@@ -104,6 +104,21 @@ void __init acpi_table_print_srat_entry(struct acpi_subtable_header * header)
}
#endif /* ACPI_DEBUG_OUTPUT */
break;
+ case ACPI_SRAT_TYPE_GICC_AFFINITY:
+#ifdef ACPI_DEBUG_OUTPUT
+ {
+ struct acpi_srat_gicc_affinity *p =
+ (struct acpi_srat_gicc_affinity *)header;
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "SRAT Processor (acpi id[0x%04x]) in"
+ " proximity domain %d %s\n",
+ p->acpi_processor_uid,
+ p->proximity_domain,
+ (p->flags & ACPI_SRAT_GICC_ENABLED) ?
+ "enabled" : "disabled");
+ }
+#endif /* ACPI_DEBUG_OUTPUT */
+ break;
default:
printk(KERN_WARNING PREFIX
"Found unsupported SRAT entry (type = %#x)\n",
diff --git a/xen/include/acpi/actbl1.h b/xen/include/acpi/actbl1.h
index e199136..b84bfba 100644
--- a/xen/include/acpi/actbl1.h
+++ b/xen/include/acpi/actbl1.h
@@ -949,7 +949,8 @@ enum acpi_srat_type {
ACPI_SRAT_TYPE_CPU_AFFINITY = 0,
ACPI_SRAT_TYPE_MEMORY_AFFINITY = 1,
ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY = 2,
- ACPI_SRAT_TYPE_RESERVED = 3 /* 3 and greater are reserved */
+ ACPI_SRAT_TYPE_GICC_AFFINITY = 3,
+ ACPI_SRAT_TYPE_RESERVED = 4 /* 4 and greater are reserved */
};
/*
@@ -1007,6 +1008,20 @@ struct acpi_srat_x2apic_cpu_affinity {
#define ACPI_SRAT_CPU_ENABLED (1) /* 00: Use affinity structure */
+/* 3: GICC Affinity (ACPI 5.1) */
+
+struct acpi_srat_gicc_affinity {
+ struct acpi_subtable_header header;
+ u32 proximity_domain;
+ u32 acpi_processor_uid;
+ u32 flags;
+ u32 clock_domain;
+};
+
+/* Flags for struct acpi_srat_gicc_affinity */
+
+#define ACPI_SRAT_GICC_ENABLED (1) /* 00: Use affinity structure */
+
/* Reset to default packing */
#pragma pack()
diff --git a/xen/include/asm-arm/numa.h b/xen/include/asm-arm/numa.h
index 0d3146c..f0a50bd 100644
--- a/xen/include/asm-arm/numa.h
+++ b/xen/include/asm-arm/numa.h
@@ -7,6 +7,15 @@ void dt_numa_process_memory_node(uint32_t nid, paddr_t start, paddr_t size);
void register_node_distance(uint8_t (fn)(nodeid_t a, nodeid_t b));
void init_dt_numa_distance(void);
+#ifdef CONFIG_ACPI_NUMA
+nodeid_t acpi_get_nodeid(uint64_t hwid);
+#else
+static inline nodeid_t acpi_get_nodeid(uint64_t hwid)
+{
+ return 0;
+}
+#endif /* CONFIG_ACPI_NUMA */
+
#ifdef CONFIG_NUMA
void numa_init(void);
int dt_numa_init(void);
--
2.7.4
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-07-18 11:41 UTC|newest]
Thread overview: 109+ messages / expand[flat|nested] mbox.gz Atom feed top
2017-07-18 11:41 [RFC PATCH v3 00/24] ARM: Add Xen NUMA support vijay.kilari
2017-07-18 11:41 ` [RFC PATCH v3 01/24] NUMA: Make number of NUMA nodes configurable vijay.kilari
2017-07-18 15:29 ` Wei Liu
2017-07-18 17:52 ` Julien Grall
2017-07-19 8:17 ` Wei Liu
2017-07-19 15:48 ` Julien Grall
2017-07-28 10:11 ` Jan Beulich
2017-07-18 17:55 ` Julien Grall
2017-07-19 7:00 ` Vijay Kilari
2017-07-19 15:55 ` Julien Grall
2017-07-20 7:30 ` Vijay Kilari
2017-07-20 10:57 ` Julien Grall
2017-07-18 11:41 ` [RFC PATCH v3 02/24] x86: NUMA: Clean up: Fix coding styles and drop unused code vijay.kilari
2017-07-19 16:23 ` Julien Grall
2017-07-19 16:27 ` Wei Liu
2017-07-19 16:34 ` Julien Grall
2017-07-20 7:00 ` Vijay Kilari
2017-07-20 11:00 ` Julien Grall
2017-07-20 12:05 ` Vijay Kilari
2017-07-20 12:09 ` Julien Grall
2017-07-20 12:29 ` Vijay Kilari
2017-07-20 12:33 ` Julien Grall
2017-07-18 11:41 ` [RFC PATCH v3 03/24] x86: NUMA: Fix datatypes and attributes vijay.kilari
2017-07-18 15:29 ` Wei Liu
2017-07-18 11:41 ` [RFC PATCH v3 04/24] x86: NUMA: Rename and sanitize memnode shift code vijay.kilari
2017-07-18 15:29 ` Wei Liu
2017-07-19 17:12 ` Julien Grall
2017-07-20 6:56 ` Vijay Kilari
2017-07-18 11:41 ` [RFC PATCH v3 05/24] x86: NUMA: Add accessors for nodes[] and node_memblk_range[] structs vijay.kilari
2017-07-18 15:29 ` Wei Liu
2017-07-19 6:40 ` Vijay Kilari
2017-07-19 17:18 ` Julien Grall
2017-07-20 7:41 ` Vijay Kilari
2017-07-20 11:03 ` Julien Grall
2017-07-18 11:41 ` [RFC PATCH v3 06/24] x86: NUMA: Rename some generic functions vijay.kilari
2017-07-19 17:23 ` Julien Grall
2017-07-18 11:41 ` [RFC PATCH v3 07/24] ARM: NUMA: Add existing ARM numa code under CONFIG_NUMA vijay.kilari
2017-07-18 18:06 ` Julien Grall
2017-07-20 9:31 ` Vijay Kilari
2017-07-20 11:10 ` Julien Grall
2017-07-18 11:41 ` [RFC PATCH v3 08/24] NUMA: x86: Move numa code and make it generic vijay.kilari
2017-07-18 15:29 ` Wei Liu
2017-07-18 18:16 ` Julien Grall
2017-07-19 6:47 ` Vijay Kilari
2017-07-19 17:41 ` Julien Grall
2017-07-20 8:55 ` Vijay Kilari
2017-07-20 11:14 ` Julien Grall
2017-07-24 20:28 ` Stefano Stabellini
2017-07-18 11:41 ` [RFC PATCH v3 09/24] NUMA: x86: Move common code from srat.c vijay.kilari
2017-07-20 11:17 ` Julien Grall
2017-07-20 11:43 ` Vijay Kilari
2017-07-24 20:35 ` Stefano Stabellini
2017-07-18 11:41 ` [RFC PATCH v3 10/24] NUMA: Allow numa initialization with DT vijay.kilari
2017-07-19 17:58 ` Julien Grall
2017-07-20 10:28 ` Vijay Kilari
2017-07-20 11:20 ` Julien Grall
2017-07-18 11:41 ` [RFC PATCH v3 11/24] ARM: fdt: Export and introduce new fdt functions vijay.kilari
2017-07-18 15:29 ` Wei Liu
2017-07-18 16:29 ` Julien Grall
2017-07-18 11:41 ` [RFC PATCH v3 12/24] ARM: NUMA: DT: Parse CPU NUMA information vijay.kilari
2017-07-19 18:26 ` Julien Grall
2017-07-20 9:20 ` Vijay Kilari
2017-07-18 11:41 ` [RFC PATCH v3 13/24] ARM: NUMA: DT: Parse memory " vijay.kilari
2017-07-19 18:39 ` Julien Grall
2017-07-20 10:37 ` Vijay Kilari
2017-07-20 11:24 ` Julien Grall
2017-07-20 11:26 ` Julien Grall
2017-07-21 11:10 ` Vijay Kilari
2017-07-21 12:35 ` Julien Grall
2017-07-18 11:41 ` [RFC PATCH v3 14/24] ARM: NUMA: DT: Parse NUMA distance information vijay.kilari
2017-07-20 13:02 ` Julien Grall
2017-07-18 11:41 ` [RFC PATCH v3 15/24] ARM: NUMA: DT: Add CPU NUMA support vijay.kilari
2017-07-24 11:24 ` Julien Grall
2017-07-25 6:47 ` Vijay Kilari
2017-07-25 18:38 ` Julien Grall
2017-07-25 18:48 ` Stefano Stabellini
2017-07-25 18:51 ` Julien Grall
2017-07-25 19:06 ` Stefano Stabellini
2017-07-26 17:18 ` Julien Grall
2017-07-26 17:21 ` Stefano Stabellini
2017-07-18 11:41 ` [RFC PATCH v3 16/24] ARM: NUMA: Add memory " vijay.kilari
2017-07-24 12:43 ` Julien Grall
2017-07-18 11:41 ` [RFC PATCH v3 17/24] ARM: NUMA: DT: Do not expose numa info to DOM0 vijay.kilari
2017-07-24 20:48 ` Stefano Stabellini
2017-07-26 17:22 ` Julien Grall
2017-07-18 11:41 ` [RFC PATCH v3 18/24] ACPI: Refactor acpi SRAT and SLIT table handling code vijay.kilari
2017-07-18 15:36 ` Wei Liu
2017-07-19 6:33 ` Vijay Kilari
2017-07-18 11:41 ` [RFC PATCH v3 19/24] ARM: NUMA: Extract MPIDR from MADT table vijay.kilari
2017-07-24 22:17 ` Stefano Stabellini
2017-07-26 18:12 ` Julien Grall
2017-07-18 11:41 ` [RFC PATCH v3 20/24] ACPI: Move arch specific SRAT parsing vijay.kilari
2017-07-24 21:15 ` Stefano Stabellini
2017-07-18 11:41 ` vijay.kilari [this message]
2017-07-24 22:17 ` [RFC PATCH v3 21/24] ARM: NUMA: ACPI: Extract proximity from SRAT table Stefano Stabellini
2017-07-26 18:18 ` Julien Grall
2017-07-18 11:41 ` [RFC PATCH v3 22/24] ARM: NUMA: Initialize ACPI NUMA vijay.kilari
2017-07-24 22:11 ` Stefano Stabellini
2017-07-26 18:23 ` Julien Grall
2017-07-18 11:41 ` [RFC PATCH v3 23/24] NUMA: Move CONFIG_NUMA to common Kconfig vijay.kilari
2017-07-18 16:25 ` Julien Grall
2017-07-18 18:00 ` Julien Grall
2017-07-28 10:08 ` Jan Beulich
2017-07-18 11:41 ` [RFC PATCH v3 24/24] NUMA: Enable ACPI_NUMA config vijay.kilari
2017-07-18 16:18 ` [RFC PATCH v3 00/24] ARM: Add Xen NUMA support Julien Grall
2017-07-19 6:31 ` Vijay Kilari
2017-07-19 7:18 ` Julien Grall
[not found] ` <CALicx6svuo3JXik=8bYuciFzWDu6qmwVi1VXdBgjLp_f_YUhqQ@mail.gmail.com>
2017-10-06 17:09 ` vkilari
2017-10-06 17:30 ` Julien Grall
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1500378106-2620-22-git-send-email-vijay.kilari@gmail.com \
--to=vijay.kilari@gmail.com \
--cc=George.Dunlap@eu.citrix.com \
--cc=Vijaya.Kumar@cavium.com \
--cc=andrew.cooper3@citrix.com \
--cc=dario.faggioli@citrix.com \
--cc=ian.jackson@eu.citrix.com \
--cc=jbeulich@suse.com \
--cc=julien.grall@arm.com \
--cc=kevin.tian@intel.com \
--cc=sstabellini@kernel.org \
--cc=tim@xen.org \
--cc=wei.liu2@citrix.com \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).