public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: James Morse <james.morse@arm.com>
To: x86@kernel.org, linux-kernel@vger.kernel.org
Cc: Fenghua Yu <fenghua.yu@intel.com>,
	Reinette Chatre <reinette.chatre@intel.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	H Peter Anvin <hpa@zytor.com>, Babu Moger <Babu.Moger@amd.com>,
	James Morse <james.morse@arm.com>,
	shameerali.kolothum.thodi@huawei.com,
	D Scott Phillips OS <scott@os.amperecomputing.com>,
	carl@os.amperecomputing.com, lcherian@marvell.com,
	bobo.shaobowang@huawei.com, tan.shaopeng@fujitsu.com,
	xingxin.hx@openanolis.org, baolin.wang@linux.alibaba.com,
	Jamie Iles <quic_jiles@quicinc.com>,
	Xin Hao <xhao@linux.alibaba.com>,
	peternewman@google.com, dfustini@baylibre.com,
	amitsinght@marvell.com
Subject: [PATCH v6 08/24] x86/resctrl: Track the number of dirty RMID a CLOSID has
Date: Thu, 14 Sep 2023 17:21:22 +0000	[thread overview]
Message-ID: <20230914172138.11977-9-james.morse@arm.com> (raw)
In-Reply-To: <20230914172138.11977-1-james.morse@arm.com>

MPAM's PMG bits extend its PARTID space, meaning the same PMG value can be
used for different control groups.

This means once a CLOSID is allocated, all its monitoring ids may still be
dirty, and held in limbo.

Keep track of the number of RMID held in limbo each CLOSID has. This will
allow a future helper to find the 'cleanest' CLOSID when allocating.

The array is only needed when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is
defined. This will never be the case on x86.

Reviewed-by: Shaopeng Tan <tan.shaopeng@fujitsu.com>
Tested-by: Shaopeng Tan <tan.shaopeng@fujitsu.com>
Tested-By: Peter Newman <peternewman@google.com>
Signed-off-by: James Morse <james.morse@arm.com>
---
Changes since v4:
 * Moved closid_num_dirty_rmid[] update under entry->busy check
 * Take the mutex in dom_data_init() as the caller doesn't.

Changes since v5:
 * Added braces after an else.
 * Made closid_num_dirty_rmid an unsigned int.
 * Moved mutex_lock() in dom_data_init() to cover the whole function.
---
 arch/x86/kernel/cpu/resctrl/monitor.c | 66 +++++++++++++++++++++++----
 1 file changed, 56 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index d286aba1ee63..0c783301d106 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -51,6 +51,13 @@ struct rmid_entry {
  */
 static LIST_HEAD(rmid_free_lru);
 
+/**
+ * @closid_num_dirty_rmid    The number of dirty RMID each CLOSID has.
+ *     Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined.
+ *     Indexed by CLOSID. Protected by rdtgroup_mutex.
+ */
+static unsigned int *closid_num_dirty_rmid;
+
 /**
  * @rmid_limbo_count     count of currently unused but (potentially)
  *     dirty RMIDs.
@@ -293,6 +300,17 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
 	return 0;
 }
 
+static void limbo_release_entry(struct rmid_entry *entry)
+{
+	lockdep_assert_held(&rdtgroup_mutex);
+
+	rmid_limbo_count--;
+	list_add_tail(&entry->list, &rmid_free_lru);
+
+	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
+		closid_num_dirty_rmid[entry->closid]--;
+}
+
 /*
  * Check the RMIDs that are marked as busy for this domain. If the
  * reported LLC occupancy is below the threshold clear the busy bit and
@@ -329,10 +347,8 @@ void __check_limbo(struct rdt_domain *d, bool force_free)
 
 		if (force_free || !rmid_dirty) {
 			clear_bit(idx, d->rmid_busy_llc);
-			if (!--entry->busy) {
-				rmid_limbo_count--;
-				list_add_tail(&entry->list, &rmid_free_lru);
-			}
+			if (!--entry->busy)
+				limbo_release_entry(entry);
 		}
 		cur_idx = idx + 1;
 	}
@@ -400,6 +416,8 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
 	u64 val = 0;
 	u32 idx;
 
+	lockdep_assert_held(&rdtgroup_mutex);
+
 	idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid);
 
 	entry->busy = 0;
@@ -425,10 +443,13 @@ static void add_rmid_to_limbo(struct rmid_entry *entry)
 	}
 	put_cpu();
 
-	if (entry->busy)
+	if (entry->busy) {
 		rmid_limbo_count++;
-	else
+		if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
+			closid_num_dirty_rmid[entry->closid]++;
+	} else {
 		list_add_tail(&entry->list, &rmid_free_lru);
+	}
 }
 
 void free_rmid(u32 closid, u32 rmid)
@@ -796,13 +817,30 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms)
 static int dom_data_init(struct rdt_resource *r)
 {
 	u32 idx_limit = resctrl_arch_system_num_rmid_idx();
+	u32 num_closid = resctrl_arch_get_num_closid(r);
 	struct rmid_entry *entry = NULL;
+	int err = 0, i;
 	u32 idx;
-	int i;
+
+	mutex_lock(&rdtgroup_mutex);
+	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
+		int *tmp;
+
+		tmp = kcalloc(num_closid, sizeof(int), GFP_KERNEL);
+		if (!tmp) {
+			err = -ENOMEM;
+			goto out_unlock;
+		}
+
+		closid_num_dirty_rmid = tmp;
+	}
 
 	rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL);
-	if (!rmid_ptrs)
-		return -ENOMEM;
+	if (!rmid_ptrs) {
+		kfree(closid_num_dirty_rmid);
+		err = -ENOMEM;
+		goto out_unlock;
+	}
 
 	for (i = 0; i < idx_limit; i++) {
 		entry = &rmid_ptrs[i];
@@ -822,13 +860,21 @@ static int dom_data_init(struct rdt_resource *r)
 	entry = __rmid_entry(idx);
 	list_del(&entry->list);
 
-	return 0;
+out_unlock:
+	mutex_unlock(&rdtgroup_mutex);
+
+	return err;
 }
 
 void resctrl_exit_mon_l3_config(struct rdt_resource *r)
 {
 	mutex_lock(&rdtgroup_mutex);
 
+	if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
+		kfree(closid_num_dirty_rmid);
+		closid_num_dirty_rmid = NULL;
+	}
+
 	kfree(rmid_ptrs);
 	rmid_ptrs = NULL;
 
-- 
2.39.2


  parent reply	other threads:[~2023-09-14 17:22 UTC|newest]

Thread overview: 80+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-09-14 17:21 [PATCH v6 00/24] x86/resctrl: monitored closid+rmid together, separate arch/fs locking James Morse
2023-09-14 17:21 ` [PATCH v6 01/24] tick/nohz: Move tick_nohz_full_mask declaration outside the #ifdef James Morse
2023-09-26 14:31   ` Fenghua Yu
2023-10-03 21:05   ` Reinette Chatre
2023-09-14 17:21 ` [PATCH v6 02/24] x86/resctrl: kfree() rmid_ptrs from rdtgroup_exit() James Morse
2023-10-02 17:00   ` Reinette Chatre
2023-10-05 17:05     ` James Morse
2023-10-05 18:04       ` Reinette Chatre
2023-10-25 17:56         ` James Morse
2023-10-04 18:00   ` Moger, Babu
2023-10-05 17:06     ` James Morse
2023-09-14 17:21 ` [PATCH v6 03/24] x86/resctrl: Create helper for RMID allocation and mondata dir creation James Morse
2023-10-03 21:07   ` Reinette Chatre
2023-09-14 17:21 ` [PATCH v6 04/24] x86/resctrl: Move rmid allocation out of mkdir_rdt_prepare() James Morse
2023-10-03 21:07   ` Reinette Chatre
2023-10-04 18:01   ` Moger, Babu
2023-10-05 17:06     ` James Morse
2023-09-14 17:21 ` [PATCH v6 05/24] x86/resctrl: Track the closid with the rmid James Morse
2023-10-03 21:11   ` Reinette Chatre
2023-09-14 17:21 ` [PATCH v6 06/24] x86/resctrl: Access per-rmid structures by index James Morse
2023-10-03 21:12   ` Reinette Chatre
2023-10-24  9:28   ` Maciej Wieczór-Retman
2023-09-14 17:21 ` [PATCH v6 07/24] x86/resctrl: Allow RMID allocation to be scoped by CLOSID James Morse
2023-10-03 21:12   ` Reinette Chatre
2023-09-14 17:21 ` James Morse [this message]
2023-10-03 21:13   ` [PATCH v6 08/24] x86/resctrl: Track the number of dirty RMID a CLOSID has Reinette Chatre
2023-10-05 17:07     ` James Morse
2023-09-14 17:21 ` [PATCH v6 09/24] x86/resctrl: Use set_bit()/clear_bit() instead of open coding James Morse
2023-09-17 21:00   ` David Laight
2023-09-29 16:13     ` James Morse
2023-10-03 21:14   ` Reinette Chatre
2023-10-04 20:38   ` Moger, Babu
2023-10-05 17:07     ` James Morse
2023-09-14 17:21 ` [PATCH v6 10/24] x86/resctrl: Allocate the cleanest CLOSID by searching closid_num_dirty_rmid James Morse
2023-10-03 21:14   ` Reinette Chatre
2023-10-05 20:13   ` Moger, Babu
2023-10-25 17:56     ` James Morse
2023-10-05 20:26   ` Moger, Babu
2023-10-25 17:56     ` James Morse
2023-10-24 12:06   ` Maciej Wieczór-Retman
2023-09-14 17:21 ` [PATCH v6 11/24] x86/resctrl: Move CLOSID/RMID matching and setting to use helpers James Morse
2023-10-03 21:15   ` Reinette Chatre
2023-09-14 17:21 ` [PATCH v6 12/24] x86/resctrl: Add cpumask_any_housekeeping() for limbo/overflow James Morse
2023-10-03 21:15   ` Reinette Chatre
2023-10-05 17:07     ` James Morse
2023-09-14 17:21 ` [PATCH v6 13/24] x86/resctrl: Queue mon_event_read() instead of sending an IPI James Morse
2023-10-03 21:17   ` Reinette Chatre
2023-10-25 17:56     ` James Morse
2023-09-14 17:21 ` [PATCH v6 14/24] x86/resctrl: Allow resctrl_arch_rmid_read() to sleep James Morse
2023-10-03 21:18   ` Reinette Chatre
2023-10-25 17:57     ` James Morse
2023-10-05 21:33   ` Moger, Babu
2023-09-14 17:21 ` [PATCH v6 15/24] x86/resctrl: Allow arch to allocate memory needed in resctrl_arch_rmid_read() James Morse
2023-10-03 21:18   ` Reinette Chatre
2023-10-05 21:46   ` Moger, Babu
2023-10-25 17:58     ` James Morse
2023-09-14 17:21 ` [PATCH v6 16/24] x86/resctrl: Make resctrl_mounted checks explicit James Morse
2023-10-03 21:19   ` Reinette Chatre
2023-09-14 17:21 ` [PATCH v6 17/24] x86/resctrl: Move alloc/mon static keys into helpers James Morse
2023-10-03 21:19   ` Reinette Chatre
2023-09-14 17:21 ` [PATCH v6 18/24] x86/resctrl: Make rdt_enable_key the arch's decision to switch James Morse
2023-10-03 21:19   ` Reinette Chatre
2023-09-14 17:21 ` [PATCH v6 19/24] x86/resctrl: Add helpers for system wide mon/alloc capable James Morse
2023-10-03 21:19   ` Reinette Chatre
2023-09-14 17:21 ` [PATCH v6 20/24] x86/resctrl: Add CPU online callback for resctrl work James Morse
2023-10-03 21:20   ` Reinette Chatre
2023-09-14 17:21 ` [PATCH v6 21/24] x86/resctrl: Allow overflow/limbo handlers to be scheduled on any-but cpu James Morse
2023-10-03 21:22   ` Reinette Chatre
2023-10-25 17:57     ` James Morse
2023-10-27 21:20       ` Reinette Chatre
2023-09-14 17:21 ` [PATCH v6 22/24] x86/resctrl: Add cpu offline callback for resctrl work James Morse
2023-10-03 21:23   ` Reinette Chatre
2023-10-25 17:57     ` James Morse
2023-09-14 17:21 ` [PATCH v6 23/24] x86/resctrl: Move domain helper migration into resctrl_offline_cpu() James Morse
2023-10-03 21:23   ` Reinette Chatre
2023-09-14 17:21 ` [PATCH v6 24/24] x86/resctrl: Separate arch and fs resctrl locks James Morse
2023-10-03 21:28   ` Reinette Chatre
2023-10-25 17:55     ` James Morse
2023-09-27  7:38 ` [PATCH v6 00/24] x86/resctrl: monitored closid+rmid together, separate arch/fs locking Shaopeng Tan (Fujitsu)
2023-09-29 16:13   ` James Morse

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230914172138.11977-9-james.morse@arm.com \
    --to=james.morse@arm.com \
    --cc=Babu.Moger@amd.com \
    --cc=amitsinght@marvell.com \
    --cc=baolin.wang@linux.alibaba.com \
    --cc=bobo.shaobowang@huawei.com \
    --cc=bp@alien8.de \
    --cc=carl@os.amperecomputing.com \
    --cc=dfustini@baylibre.com \
    --cc=fenghua.yu@intel.com \
    --cc=hpa@zytor.com \
    --cc=lcherian@marvell.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=peternewman@google.com \
    --cc=quic_jiles@quicinc.com \
    --cc=reinette.chatre@intel.com \
    --cc=scott@os.amperecomputing.com \
    --cc=shameerali.kolothum.thodi@huawei.com \
    --cc=tan.shaopeng@fujitsu.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    --cc=xhao@linux.alibaba.com \
    --cc=xingxin.hx@openanolis.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox