From: Tony Luck <tony.luck@intel.com>
To: Fenghua Yu <fenghua.yu@intel.com>,
Reinette Chatre <reinette.chatre@intel.com>,
Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>,
Peter Newman <peternewman@google.com>,
James Morse <james.morse@arm.com>,
Babu Moger <babu.moger@amd.com>,
Drew Fustini <dfustini@baylibre.com>,
Dave Martin <Dave.Martin@arm.com>
Cc: x86@kernel.org, linux-kernel@vger.kernel.org,
patches@lists.linux.dev, Tony Luck <tony.luck@intel.com>
Subject: [PATCH v18 16/17] x86/resctrl: Sub NUMA Cluster detection and enable
Date: Wed, 15 May 2024 15:23:24 -0700 [thread overview]
Message-ID: <20240515222326.74166-17-tony.luck@intel.com> (raw)
In-Reply-To: <20240515222326.74166-1-tony.luck@intel.com>
There isn't a simple hardware bit that indicates whether a CPU is
running in Sub NUMA Cluster (SNC) mode. Infer the state by comparing
number CPUs sharing the L3 cache with CPU0 to the number of CPUs in
the same NUMA node as CPU0.
When SNC mode is detected, reconfigure the RMID counters by updating
the MSR_RMID_SNC_CONFIG MSR on each socket as CPUs are seen.
Clearing bit zero of the MSR divides the RMIDs and renumbers the ones
on the second SNC node to start from zero.
Signed-off-by: Tony Luck <tony.luck@intel.com>
---
arch/x86/include/asm/msr-index.h | 1 +
arch/x86/kernel/cpu/resctrl/core.c | 104 +++++++++++++++++++++++++++++
2 files changed, 105 insertions(+)
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index e022e6eb766c..3cb8dd6311c3 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -1164,6 +1164,7 @@
#define MSR_IA32_QM_CTR 0xc8e
#define MSR_IA32_PQR_ASSOC 0xc8f
#define MSR_IA32_L3_CBM_BASE 0xc90
+#define MSR_RMID_SNC_CONFIG 0xca0
#define MSR_IA32_L2_CBM_BASE 0xd10
#define MSR_IA32_MBA_THRTL_BASE 0xd50
diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c
index dd40c998df72..195f9e29c553 100644
--- a/arch/x86/kernel/cpu/resctrl/core.c
+++ b/arch/x86/kernel/cpu/resctrl/core.c
@@ -21,6 +21,7 @@
#include <linux/err.h>
#include <linux/cacheinfo.h>
#include <linux/cpuhotplug.h>
+#include <linux/mod_devicetable.h>
#include <asm/cpu_device_id.h>
#include <asm/resctrl.h>
@@ -753,11 +754,42 @@ static void clear_closid_rmid(int cpu)
RESCTRL_RESERVED_CLOSID);
}
+/*
+ * The power-on reset value of MSR_RMID_SNC_CONFIG is 0x1
+ * which indicates that RMIDs are configured in legacy mode.
+ * This mode is incompatible with Linux resctrl semantics
+ * as RMIDs are partitioned between SNC nodes, which requires
+ * a user to know which RMID is allocated to a task.
+ * Clearing bit 0 reconfigures the RMID counters for use
+ * in Sub NUMA Cluster mode. This mode is better for Linux.
+ * The RMID space is divided between all SNC nodes with the
+ * RMIDs renumbered to start from zero in each node when
+ * couning operations from tasks. Code to read the counters
+ * must adjust RMID counter numbers based on SNC node. See
+ * __rmid_read() for code that does this.
+ */
+static void snc_remap_rmids(int cpu)
+{
+ u64 val;
+
+ /* Only need to enable once per package. */
+ if (cpumask_first(topology_core_cpumask(cpu)) != cpu)
+ return;
+
+ rdmsrl(MSR_RMID_SNC_CONFIG, val);
+ val &= ~BIT_ULL(0);
+ wrmsrl(MSR_RMID_SNC_CONFIG, val);
+}
+
static int resctrl_arch_online_cpu(unsigned int cpu)
{
struct rdt_resource *r;
mutex_lock(&domain_list_lock);
+
+ if (snc_nodes_per_l3_cache > 1)
+ snc_remap_rmids(cpu);
+
for_each_capable_rdt_resource(r)
domain_add_cpu(cpu, r);
mutex_unlock(&domain_list_lock);
@@ -997,11 +1029,83 @@ static __init bool get_rdt_resources(void)
return (rdt_mon_capable || rdt_alloc_capable);
}
+/* CPU models that support MSR_RMID_SNC_CONFIG */
+static const struct x86_cpu_id snc_cpu_ids[] __initconst = {
+ X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
+ X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, 0),
+ X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, 0),
+ X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, 0),
+ X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, 0),
+ {}
+};
+
+/*
+ * There isn't a simple hardware bit that indicates whether a CPU is running
+ * in Sub NUMA Cluster (SNC) mode. Infer the state by comparing the
+ * number CPUs sharing the L3 cache with CPU0 to the number of CPUs in
+ * the same NUMA node as CPU0.
+ * It is not possible to accurately determine SNC state if the system is
+ * booted with a maxcpus=N parameter. That distorts the ratio of SNC nodes
+ * to L3 caches. It will be OK if system is booted with hyperthreading
+ * disabled (since this doesn't affect the ratio).
+ */
+static __init int snc_get_config(void)
+{
+ struct cpu_cacheinfo *ci = get_cpu_cacheinfo(0);
+ const cpumask_t *node0_cpumask;
+ cpumask_t *l3_cpumask = NULL;
+ int i, ret;
+
+ if (!x86_match_cpu(snc_cpu_ids))
+ return 1;
+
+ cpus_read_lock();
+ if (num_online_cpus() != num_present_cpus())
+ pr_warn("Some CPUs offline, SNC detection may be incorrect\n");
+ cpus_read_unlock();
+
+ for (i = 0; i < ci->num_leaves; i++) {
+ if (ci->info_list[i].level == 3) {
+ if (ci->info_list[i].attributes & CACHE_ID) {
+ l3_cpumask = &ci->info_list[i].shared_cpu_map;
+ break;
+ }
+ }
+ }
+ if (!l3_cpumask) {
+ pr_info("can't get CPU0 L3 mask\n");
+ return 1;
+ }
+
+ node0_cpumask = cpumask_of_node(cpu_to_node(0));
+
+ ret = bitmap_weight(cpumask_bits(l3_cpumask), nr_cpu_ids) /
+ bitmap_weight(cpumask_bits(node0_cpumask), nr_cpu_ids);
+
+ /* sanity check: Only valid results are 1, 2, 3, 4 */
+ switch (ret) {
+ case 1:
+ break;
+ case 2 ... 4:
+ pr_info("Sub-NUMA cluster detected with %d nodes per L3 cache\n", ret);
+ rdt_resources_all[RDT_RESOURCE_L3].r_resctrl.mon_scope = RESCTRL_NODE;
+ break;
+ default:
+ pr_warn("Ignore improbable SNC node count %d\n", ret);
+ ret = 1;
+ break;
+ }
+
+ return ret;
+}
+
static __init void rdt_init_res_defs_intel(void)
{
struct rdt_hw_resource *hw_res;
struct rdt_resource *r;
+ snc_nodes_per_l3_cache = snc_get_config();
+
for_each_rdt_resource(r) {
hw_res = resctrl_to_arch_res(r);
--
2.44.0
next prev parent reply other threads:[~2024-05-15 22:23 UTC|newest]
Thread overview: 42+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-05-15 22:23 [PATCH v18 00/17] Add support for Sub-NUMA cluster (SNC) systems Tony Luck
2024-05-15 22:23 ` [PATCH v18 01/17] x86/resctrl: Prepare for new domain scope Tony Luck
2024-05-15 22:23 ` [PATCH v18 02/17] x86/resctrl: Prepare to split rdt_domain structure Tony Luck
2024-05-15 22:23 ` [PATCH v18 03/17] x86/resctrl: Prepare for different scope for control/monitor operations Tony Luck
2024-05-15 22:23 ` [PATCH v18 04/17] x86/resctrl: Split the rdt_domain and rdt_hw_domain structures Tony Luck
2024-05-15 22:23 ` [PATCH v18 05/17] x86/resctrl: Add node-scope to the options for feature scope Tony Luck
2024-05-15 22:23 ` [PATCH v18 06/17] x86/resctrl: Introduce snc_nodes_per_l3_cache Tony Luck
2024-05-22 21:08 ` Reinette Chatre
2024-05-23 19:04 ` Luck, Tony
2024-05-23 20:56 ` Reinette Chatre
2024-05-23 21:25 ` Luck, Tony
2024-05-23 22:31 ` Reinette Chatre
2024-05-23 23:18 ` Luck, Tony
2024-05-23 23:48 ` Reinette Chatre
2024-05-15 22:23 ` [PATCH v18 07/17] x86/resctrl: Prepare for new Sub-NUMA (SNC) cluster monitor files Tony Luck
2024-05-22 21:12 ` Reinette Chatre
2024-05-15 22:23 ` [PATCH v18 08/17] x86/resctrl: Add and initialize display_id field to struct rdt_mon_domain Tony Luck
2024-05-22 21:13 ` Reinette Chatre
2024-05-15 22:23 ` [PATCH v18 09/17] x86/resctrl: Add new fields to struct rmid_read for summation of domains Tony Luck
2024-05-22 21:14 ` Reinette Chatre
2024-05-15 22:23 ` [PATCH v18 10/17] x86/resctrl: Refactor mkdir_mondata_subdir() with a helper function Tony Luck
2024-05-22 21:15 ` Reinette Chatre
2024-05-15 22:23 ` [PATCH v18 11/17] x86/resctrl: Allocate a new bit in union mon_data_bits Tony Luck
2024-05-22 21:16 ` Reinette Chatre
2024-05-15 22:23 ` [PATCH v18 12/17] x86/resctrl: Create Sub-NUMA (SNC) monitor files Tony Luck
2024-05-22 21:19 ` Reinette Chatre
2024-05-15 22:23 ` [PATCH v18 13/17] x86/resctrl: Handle removing directories in Sub-NUMA (SNC) mode Tony Luck
2024-05-22 21:20 ` Reinette Chatre
2024-05-15 22:23 ` [PATCH v18 14/17] x86/resctrl: Sum monitor data acrss Sub-NUMA (SNC) nodes when needed Tony Luck
2024-05-22 21:24 ` Reinette Chatre
2024-05-15 22:23 ` [PATCH v18 15/17] x86/resctrl: Fix RMID reading sanity check for Sub-NUMA (SNC) mode Tony Luck
2024-05-16 8:59 ` Maciej Wieczor-Retman
2024-05-22 21:25 ` Reinette Chatre
2024-05-22 23:47 ` Tony Luck
2024-05-23 17:03 ` Reinette Chatre
2024-05-15 22:23 ` Tony Luck [this message]
2024-05-22 21:26 ` [PATCH v18 16/17] x86/resctrl: Sub NUMA Cluster detection and enable Reinette Chatre
2024-05-15 22:23 ` [PATCH v18 17/17] x86/resctrl: Update documentation with Sub-NUMA cluster changes Tony Luck
2024-05-16 8:57 ` Maciej Wieczor-Retman
2024-05-22 21:27 ` Reinette Chatre
2024-05-16 9:28 ` [PATCH v18 00/17] Add support for Sub-NUMA cluster (SNC) systems Maciej Wieczor-Retman
2024-05-16 15:52 ` Luck, Tony
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20240515222326.74166-17-tony.luck@intel.com \
--to=tony.luck@intel.com \
--cc=Dave.Martin@arm.com \
--cc=babu.moger@amd.com \
--cc=dfustini@baylibre.com \
--cc=fenghua.yu@intel.com \
--cc=james.morse@arm.com \
--cc=linux-kernel@vger.kernel.org \
--cc=maciej.wieczor-retman@intel.com \
--cc=patches@lists.linux.dev \
--cc=peternewman@google.com \
--cc=reinette.chatre@intel.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox