linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: David Carrillo-Cisneros <davidcc@google.com>
To: linux-kernel@vger.kernel.org
Cc: "x86@kernel.org" <x86@kernel.org>, Ingo Molnar <mingo@redhat.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Andi Kleen <ak@linux.intel.com>, Kan Liang <kan.liang@intel.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Vegard Nossum <vegard.nossum@gmail.com>,
	Marcelo Tosatti <mtosatti@redhat.com>,
	Nilay Vaish <nilayvaish@gmail.com>, Borislav Petkov <bp@suse.de>,
	Vikas Shivappa <vikas.shivappa@linux.intel.com>,
	Ravi V Shankar <ravi.v.shankar@intel.com>,
	Fenghua Yu <fenghua.yu@intel.com>, Paul Turner <pjt@google.com>,
	Stephane Eranian <eranian@google.com>,
	David Carrillo-Cisneros <davidcc@google.com>
Subject: [PATCH v3 11/46] perf/x86/intel/cmt: add cmt_user_flags (uflags) to monr
Date: Sat, 29 Oct 2016 17:38:08 -0700	[thread overview]
Message-ID: <1477787923-61185-12-git-send-email-davidcc@google.com> (raw)
In-Reply-To: <1477787923-61185-1-git-send-email-davidcc@google.com>

uflags allow users to signal special behavior for a pmonr. This patch
series introduces two uflags that provide new behavior and are relevant
to users:
  1) CMT_UF_NOLAZY_RMID: signal that rmids must be reserved immediately.
  2) CMT_UF_NOSTEAL_RMID: rmids cannot be stolen.

A monr mantains one field of cmt_user_flags at "monr level" and a set of
"package level" ones, one per possible hardware package.

The effective uflags for a pmonr is the OR of its monr level uflags and
the package level one of pmonr's pkgd.

A user passes uflags for all pmonrs in an event's monr by setting them
in the perf_event_attr::config1 field. In future patches in this series,
users could specify per package uflags through attributes in the
perf cgroup fs.

This patch only introduces infrastructure to mantain uflags and the first,
uflag: CMT_UF_HAS_USER, that marks monrs and pmonrs as in use by
a cgroup or event. This flag is special because is always taken as set
for a perf event, regardless of the value in event->attr.config1.

Signed-off-by: David Carrillo-Cisneros <davidcc@google.com>
---
 arch/x86/events/intel/cmt.c | 166 ++++++++++++++++++++++++++++++++++++++++++--
 arch/x86/events/intel/cmt.h |  18 +++++
 2 files changed, 180 insertions(+), 4 deletions(-)

diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c
index 06e6325..07560e5 100644
--- a/arch/x86/events/intel/cmt.c
+++ b/arch/x86/events/intel/cmt.c
@@ -29,6 +29,13 @@ static unsigned int __min_max_rmid;	/* minimum max_rmid across all pkgs. */
 /* Root for system-wide hierarchy of MONitored Resources (monr). */
 static struct monr *monr_hrchy_root;
 
+/* Flags for root monr and all its pmonrs while being monitored. */
+static enum cmt_user_flags root_monr_uflags = CMT_UF_HAS_USER;
+
+/* Auxiliar flags */
+static enum cmt_user_flags *pkg_uflags_zeroes;
+static size_t pkg_uflags_size;
+
 /* Array of packages (array of pkgds). It's protected by RCU or cmt_mutex. */
 static struct pkg_data **cmt_pkgs_data;
 
@@ -128,10 +135,19 @@ static struct pmonr *pmonr_alloc(struct pkg_data *pkgd)
 	return pmonr;
 }
 
+static inline bool monr_is_root(struct monr *monr)
+{
+	return monr_hrchy_root == monr;
+}
+
 static void monr_dealloc(struct monr *monr)
 {
 	u16 p, nr_pkgs = topology_max_packages();
 
+	if (WARN_ON_ONCE(monr->nr_has_user) ||
+	    WARN_ON_ONCE(monr->mon_events))
+		return;
+
 	for (p = 0; p < nr_pkgs; p++) {
 		/* out of monr_hrchy, so no need for rcu or lock protection. */
 		if (!monr->pmonrs[p])
@@ -150,7 +166,8 @@ static struct monr *monr_alloc(void)
 
 	lockdep_assert_held(&cmt_mutex);
 
-	monr = kzalloc(sizeof(*monr), GFP_KERNEL);
+	/* Extra space for pkg_uflags. */
+	monr = kzalloc(sizeof(*monr) + pkg_uflags_size, GFP_KERNEL);
 	if (!monr)
 		return ERR_PTR(-ENOMEM);
 
@@ -183,14 +200,118 @@ static struct monr *monr_alloc(void)
 	return monr;
 }
 
+static enum cmt_user_flags pmonr_uflags(struct pmonr *pmonr)
+{
+	struct monr *monr = pmonr->monr;
+
+	return monr->uflags | monr->pkg_uflags[pmonr->pkgd->pkgid];
+}
+
+static int __pmonr_apply_uflags(struct pmonr *pmonr,
+		enum cmt_user_flags pmonr_uflags)
+{
+	if (monr_is_root(pmonr->monr) && (~pmonr_uflags & root_monr_uflags))
+		return -EINVAL;
+
+	return 0;
+}
+
+static bool pkg_uflags_has_user(enum cmt_user_flags *uflags)
+{
+	int p, nr_pkgs = topology_max_packages();
+
+	for (p = 0; p < nr_pkgs; p++)
+		if (uflags[p] & CMT_UF_HAS_USER)
+			return true;
+	return false;
+}
+
+static bool monr_has_user(struct monr *monr)
+{
+	return monr->uflags & CMT_UF_HAS_USER ||
+	       pkg_uflags_has_user(monr->pkg_uflags);
+}
+
+static int __monr_apply_uflags(struct monr *monr, enum cmt_user_flags *puflags)
+{
+	enum cmt_user_flags pmonr_uflags;
+	struct pkg_data *pkgd = NULL;
+	struct pmonr *pmonr;
+	int p, err;
+
+	while ((pkgd = cmt_pkgs_data_next_rcu(pkgd))) {
+		p = pkgd->pkgid;
+		pmonr_uflags = monr->uflags |
+				(puflags ? puflags[p] : monr->pkg_uflags[p]);
+		pmonr = pkgd_pmonr(pkgd, monr);
+		err = __pmonr_apply_uflags(pmonr, pmonr_uflags);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+/* Apply puflags for all packages or rollback and fail. */
+static int monr_apply_uflags(struct monr *monr, enum cmt_user_flags *puflags)
+{
+	int p, err;
+	unsigned long flags;
+
+	monr_hrchy_assert_held_mutexes();
+	monr_hrchy_acquire_locks(&flags);
+
+	err = __monr_apply_uflags(monr, puflags);
+	if (err)
+		goto exit;
+
+	/* Proceed to exit if no uflags to update to pkg_uflags. */
+	if (!puflags)
+		goto exit;
+
+	/*
+	 * Now that we've succeded to apply puflags to online packages, we
+	 * store new puflags in all packages, even those not online. It's
+	 * up to CPU hot plug to try apply the pkg_flag in oncoming package.
+	 */
+	for (p = 0; p < topology_max_packages(); p++)
+		monr->pkg_uflags[p] = puflags[p];
+
+exit:
+	monr_hrchy_release_locks(&flags);
+
+	return err;
+}
+
 static inline struct monr *monr_from_event(struct perf_event *event)
 {
 	return (struct monr *) READ_ONCE(event->hw.cmt_monr);
 }
 
+static enum cmt_user_flags uflags_from_event(struct perf_event *event)
+{
+	return event->attr.config1 | CMT_UF_HAS_USER;
+}
+
+/* return true if monr uflags will change, false otherwise. */
+static bool monr_account_uflags(struct monr *monr,
+				enum cmt_user_flags uflags, bool account)
+{
+	enum cmt_user_flags old_flags = monr->uflags;
+
+	if (uflags & CMT_UF_HAS_USER)
+		monr->nr_has_user += account ? 1 : -1;
+
+	monr->uflags =  (monr->nr_has_user ? CMT_UF_HAS_USER : 0);
+
+	return old_flags != monr->uflags;
+}
+
 static struct monr *monr_remove_event(struct perf_event *event)
 {
 	struct monr *monr = monr_from_event(event);
+	enum cmt_user_flags uflags = uflags_from_event(event);
+	int err;
 
 	lockdep_assert_held(&cmt_mutex);
 	monr_hrchy_assert_held_mutexes();
@@ -207,11 +328,23 @@ static struct monr *monr_remove_event(struct perf_event *event)
 
 	WRITE_ONCE(event->hw.cmt_monr, NULL);
 
+	if (monr_account_uflags(monr, uflags, false)) {
+		/*
+		 * Undo flags in error, cannot fail since flags require rmids
+		 * and less flags mean less rmids required.
+		 */
+		err = monr_apply_uflags(monr, NULL);
+		WARN_ON_ONCE(err);
+	}
+
 	return monr;
 }
 
 static int monr_append_event(struct monr *monr, struct perf_event *event)
 {
+	enum cmt_user_flags uflags = uflags_from_event(event);
+	int err;
+
 	lockdep_assert_held(&cmt_mutex);
 	monr_hrchy_assert_held_mutexes();
 
@@ -225,7 +358,14 @@ static int monr_append_event(struct monr *monr, struct perf_event *event)
 
 	WRITE_ONCE(event->hw.cmt_monr, monr);
 
-	return 0;
+	if (!monr_account_uflags(monr, uflags, true))
+		return 0;
+
+	err = monr_apply_uflags(monr, NULL);
+	if (err)
+		monr_remove_event(event);
+
+	return err;
 }
 
 static void monr_hrchy_insert_leaf(struct monr *monr, struct monr *parent)
@@ -465,7 +605,8 @@ static void intel_cmt_event_destroy(struct perf_event *event)
 
 	/* monr is dettached from event. */
 	monr = monr_remove_event(event);
-	monr_destroy(monr);
+	if (!monr_has_user(monr))
+		monr_destroy(monr);
 
 	monr_hrchy_release_mutexes();
 	mutex_unlock(&cmt_mutex);
@@ -625,6 +766,7 @@ static int init_pkg_data(int cpu)
 	struct monr *pos = NULL;
 	struct pkg_data *pkgd;
 	struct pmonr *pmonr;
+	unsigned long flags;
 	int err = 0;
 	u16 pkgid = topology_logical_package_id(cpu);
 
@@ -650,6 +792,10 @@ static int init_pkg_data(int cpu)
 		 * not set in cmt_pkgs_data yet.
 		 */
 		RCU_INIT_POINTER(pos->pmonrs[pkgid], pmonr);
+
+		raw_spin_lock_irqsave(&pkgd->lock, flags);
+		err = __pmonr_apply_uflags(pmonr, pmonr_uflags(pmonr));
+		raw_spin_unlock_irqrestore(&pkgd->lock, flags);
 	}
 
 	if (err) {
@@ -739,6 +885,9 @@ static void cmt_dealloc(void)
 
 	kfree(cmt_pkgs_data);
 	cmt_pkgs_data = NULL;
+
+	kfree(pkg_uflags_zeroes);
+	pkg_uflags_zeroes = NULL;
 }
 
 static void cmt_stop(void)
@@ -749,6 +898,11 @@ static void cmt_stop(void)
 
 static int __init cmt_alloc(void)
 {
+	pkg_uflags_size = sizeof(*pkg_uflags_zeroes) * topology_max_packages();
+	pkg_uflags_zeroes = kzalloc(pkg_uflags_size, GFP_KERNEL);
+	if (!pkg_uflags_zeroes)
+		return -ENOMEM;
+
 	cmt_l3_scale = boot_cpu_data.x86_cache_occ_scale;
 	if (cmt_l3_scale == 0)
 		cmt_l3_scale = 1;
@@ -771,7 +925,11 @@ static int __init cmt_alloc(void)
 static int __init cmt_start(void)
 {
 	char *str, scale[20];
-	int err;
+	int err, p;
+
+	monr_account_uflags(monr_hrchy_root, root_monr_uflags, true);
+	for (p = 0; p < topology_max_packages(); p++)
+		monr_hrchy_root->pkg_uflags[p] = root_monr_uflags;
 
 	/* will be modified by init_pkg_data() in intel_cmt_prep_up(). */
 	__min_max_rmid = UINT_MAX;
diff --git a/arch/x86/events/intel/cmt.h b/arch/x86/events/intel/cmt.h
index 7f3a7b8..66b078a 100644
--- a/arch/x86/events/intel/cmt.h
+++ b/arch/x86/events/intel/cmt.h
@@ -76,6 +76,16 @@ struct pkg_data {
 };
 
 /**
+ * enum cmt_user_flags - user set flags for monr and pmonrs.
+ */
+enum cmt_user_flags {
+	/* if no has_user other flags are meaningless. */
+	CMT_UF_HAS_USER		= BIT(0), /* has cgroup or event users */
+	CMT_UF_MAX		= BIT(1) - 1,
+	CMT_UF_ERROR		= CMT_UF_MAX + 1,
+};
+
+/**
  * struct monr - MONitored Resource.
  * @mon_events:		The head of event's group that use this monr, if any.
  * @entry:		List entry into cmt_event_monrs.
@@ -83,6 +93,10 @@ struct pkg_data {
  * @parent:		Parent in monr hierarchy.
  * @children:		List of children in monr hierarchy.
  * @parent_entry:	Entry in parent's children list.
+ * @nr_has_user:	nr of CMT_UF_HAS_USER set in events in mon_events.
+ * @uflags:		monr level cmt_user_flags, or'ed with pkg_uflags.
+ * @pkg_uflags:		package level cmt_user_flags, each entry is used as
+ *			pmonr uflags if that package is online.
  *
  * An monr is assigned to every CMT event and/or monitored cgroups when
  * monitoring is activated and that instance's address do not change during
@@ -98,4 +112,8 @@ struct monr {
 	struct monr			*parent;
 	struct list_head		children;
 	struct list_head		parent_entry;
+
+	int				nr_has_user;
+	enum cmt_user_flags		uflags;
+	enum cmt_user_flags		pkg_uflags[];
 };
-- 
2.8.0.rc3.226.g39d4020

  parent reply	other threads:[~2016-10-30  0:48 UTC|newest]

Thread overview: 59+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-10-30  0:37 [PATCH v3 00/46] Cache Monitoring Technology (aka CQM) David Carrillo-Cisneros
2016-10-30  0:37 ` [PATCH v3 01/46] perf/x86/intel/cqm: remove previous version of CQM and MBM David Carrillo-Cisneros
2016-10-30  0:37 ` [PATCH v3 02/46] perf/x86/intel: rename CQM cpufeatures to CMT David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 03/46] x86/intel: add CONFIG_INTEL_RDT_M configuration flag David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 04/46] perf/x86/intel/cmt: add device initialization and CPU hotplug support David Carrillo-Cisneros
2016-11-10 15:19   ` Thomas Gleixner
2016-10-30  0:38 ` [PATCH v3 05/46] perf/x86/intel/cmt: add per-package locks David Carrillo-Cisneros
2016-11-10 21:23   ` Thomas Gleixner
2016-11-11  2:22     ` David Carrillo-Cisneros
2016-11-11  7:21       ` Peter Zijlstra
2016-11-11  7:32         ` Ingo Molnar
2016-11-11  9:41         ` Thomas Gleixner
2016-11-11 17:21           ` David Carrillo-Cisneros
2016-11-13 10:58             ` Thomas Gleixner
2016-11-15  4:53         ` David Carrillo-Cisneros
2016-11-16 19:00           ` Thomas Gleixner
2016-10-30  0:38 ` [PATCH v3 06/46] perf/x86/intel/cmt: add intel_cmt pmu David Carrillo-Cisneros
2016-11-10 21:27   ` Thomas Gleixner
2016-10-30  0:38 ` [PATCH v3 07/46] perf/core: add RDT Monitoring attributes to struct hw_perf_event David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 08/46] perf/x86/intel/cmt: add MONitored Resource (monr) initialization David Carrillo-Cisneros
2016-11-10 23:09   ` Thomas Gleixner
2016-10-30  0:38 ` [PATCH v3 09/46] perf/x86/intel/cmt: add basic monr hierarchy David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 10/46] perf/x86/intel/cmt: add Package MONitored Resource (pmonr) initialization David Carrillo-Cisneros
2016-10-30  0:38 ` David Carrillo-Cisneros [this message]
2016-10-30  0:38 ` [PATCH v3 12/46] perf/x86/intel/cmt: add per-package rmid pools David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 13/46] perf/x86/intel/cmt: add pmonr's Off and Unused states David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 14/46] perf/x86/intel/cmt: add Active and Dep_{Idle, Dirty} states David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 15/46] perf/x86/intel: encapsulate rmid and closid updates in pqr cache David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 16/46] perf/x86/intel/cmt: set sched rmid and complete pmu start/stop/add/del David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 17/46] perf/x86/intel/cmt: add uflag CMT_UF_NOLAZY_RMID David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 18/46] perf/core: add arch_info field to struct perf_cgroup David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 19/46] perf/x86/intel/cmt: add support for cgroup events David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 20/46] perf/core: add pmu::event_terminate David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 21/46] perf/x86/intel/cmt: use newly introduced event_terminate David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 22/46] perf/x86/intel/cmt: sync cgroups and intel_cmt device start/stop David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 23/46] perf/core: hooks to add architecture specific features in perf_cgroup David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 24/46] perf/x86/intel/cmt: add perf_cgroup_arch_css_{online,offline} David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 25/46] perf/x86/intel/cmt: add monr->flags and CMT_MONR_ZOMBIE David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 26/46] sched: introduce the finish_arch_pre_lock_switch() scheduler hook David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 27/46] perf/x86/intel: add pqr cache flags and intel_pqr_ctx_switch David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 28/46] perf,perf/x86,perf/powerpc,perf/arm,perf/*: add int error return to pmu::read David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 29/46] perf/x86/intel/cmt: add error handling to intel_cmt_event_read David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 30/46] perf/x86/intel/cmt: add asynchronous read for task events David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 31/46] perf/x86/intel/cmt: add subtree read for cgroup events David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 32/46] perf/core: Add PERF_EV_CAP_READ_ANY_{CPU_,}PKG flags David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 33/46] perf/x86/intel/cmt: use PERF_EV_CAP_READ_{,CPU_}PKG flags in Intel cmt David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 34/46] perf/core: introduce PERF_EV_CAP_CGROUP_NO_RECURSION David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 35/46] perf/x86/intel/cmt: use PERF_EV_CAP_CGROUP_NO_RECURSION in intel_cmt David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 36/46] perf/core: add perf_event cgroup hooks for subsystem attributes David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 37/46] perf/x86/intel/cmt: add cont_monitoring to perf cgroup David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 38/46] perf/x86/intel/cmt: introduce read SLOs for rotation David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 39/46] perf/x86/intel/cmt: add max_recycle_threshold sysfs attribute David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 40/46] perf/x86/intel/cmt: add rotation scheduled work David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 41/46] perf/x86/intel/cmt: add rotation minimum progress SLO David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 42/46] perf/x86/intel/cmt: add rmid stealing David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 43/46] perf/x86/intel/cmt: add CMT_UF_NOSTEAL_RMID flag David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 44/46] perf/x86/intel/cmt: add debugfs intel_cmt directory David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 45/46] perf/stat: fix bug in handling events in error state David Carrillo-Cisneros
2016-10-30  0:38 ` [PATCH v3 46/46] perf/stat: revamp read error handling, snapshot and per_pkg events David Carrillo-Cisneros

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1477787923-61185-12-git-send-email-davidcc@google.com \
    --to=davidcc@google.com \
    --cc=ak@linux.intel.com \
    --cc=bp@suse.de \
    --cc=eranian@google.com \
    --cc=fenghua.yu@intel.com \
    --cc=kan.liang@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=mtosatti@redhat.com \
    --cc=nilayvaish@gmail.com \
    --cc=peterz@infradead.org \
    --cc=pjt@google.com \
    --cc=ravi.v.shankar@intel.com \
    --cc=tglx@linutronix.de \
    --cc=vegard.nossum@gmail.com \
    --cc=vikas.shivappa@linux.intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).