public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Vikas Shivappa <vikas.shivappa@linux.intel.com>
To: tony.luck@intel.com, ravi.v.shankar@intel.com,
	fenghua.yu@intel.com, vikas.shivappa@intel.com,
	vikas.shivappa@linux.intel.com
Cc: x86@kernel.org, linux-kernel@vger.kernel.org, hpa@zytor.com,
	tglx@linutronix.de, mingo@kernel.org, peterz@infradead.org,
	h.peter.anvin@intel.com
Subject: [PATCH 2/4] perf/x86/mbm: Store bytes counted for mbm during recycle
Date: Fri, 22 Apr 2016 17:27:19 -0700	[thread overview]
Message-ID: <1461371241-4258-3-git-send-email-vikas.shivappa@linux.intel.com> (raw)
In-Reply-To: <1461371241-4258-1-git-send-email-vikas.shivappa@linux.intel.com>

For MBM, since we report total bytes for the duration the perf counts,
we need to keep the total bytes counted every time we loose an RMID.
Introduce rc_count(recycle count) per event
keep this history count(all bytes counted before the current RMID).

If we do not keep this count separately then we may end up sending a
count that may be less than the previous count during -I perf stat
option which leads to negative numbers being reported in the perf. This
happens say when we counted a greater amount with RMID1 and then
counted lesser with RMID2, and if user checks counts in interval mode
after RMID1 and then again after RMID2.

Signed-off-by: Vikas Shivappa <vikas.shivappa@linux.intel.com>
---
 arch/x86/events/intel/cqm.c | 40 +++++++++++++++++++++++++++++++++++++---
 include/linux/perf_event.h  |  1 +
 2 files changed, 38 insertions(+), 3 deletions(-)

diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 8dfba39..e679c39 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -479,6 +479,16 @@ static void cqm_mask_call(struct rmid_read *rr)
 		on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, rr, 1);
 }
 
+static inline void mbm_set_rccount(
+			  struct perf_event *event, struct rmid_read *rr)
+{
+	u64 tmpval;
+
+	tmpval = local64_read(&event->hw.rc_count) + atomic64_read(&rr->value);
+	local64_set(&event->hw.rc_count, tmpval);
+	local64_set(&event->count, tmpval);
+}
+
 /*
  * Exchange the RMID of a group of events.
  */
@@ -493,12 +503,19 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
 
 	/*
 	 * If our RMID is being deallocated, perform a read now.
+	 * For mbm, we need to store the bytes that were counted till now
+	 * separately.
 	 */
 	if (__rmid_valid(old_rmid) && !__rmid_valid(rmid)) {
 
 		rr = __init_rr(old_rmid, group->attr.config, 0);
 		cqm_mask_call(&rr);
-		local64_set(&group->count, atomic64_read(&rr.value));
+
+		if (is_mbm_event(group->attr.config))
+			mbm_set_rccount(group, &rr);
+		else
+			local64_set(&group->count, atomic64_read(&rr.value));
+
 		list_for_each_entry(event, head, hw.cqm_group_entry) {
 			if (event->hw.is_group_event) {
 
@@ -506,6 +523,9 @@ static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
 				rr = __init_rr(old_rmid, evttype, 0);
 
 				cqm_mask_call(&rr);
+				if (is_mbm_event(event->attr.config))
+					mbm_set_rccount(event, &rr);
+				else
 					local64_set(&event->count,
 						    atomic64_read(&rr.value));
 			}
@@ -1194,6 +1214,7 @@ static u64 intel_cqm_event_count(struct perf_event *event)
 {
 	unsigned long flags;
 	struct rmid_read rr = __init_rr(-1, event->attr.config, 0);
+	u64 tmpval;
 
 	/*
 	 * We only need to worry about task events. System-wide events
@@ -1235,6 +1256,11 @@ static u64 intel_cqm_event_count(struct perf_event *event)
 	 * busying performing the IPI calls. It's therefore necessary to
 	 * check @event's RMID afterwards, and if it has changed,
 	 * discard the result of the read.
+	 *
+	 * For MBM events, we are reading the total bytes and not
+	 * a snapshot. Hence if the RMID was recycled for the duration
+	 * we will be adding the rc_count which keeps the historical count
+	 * of old RMIDs that were used.
 	 */
 	rr.rmid = ACCESS_ONCE(event->hw.cqm_rmid);
 
@@ -1244,8 +1270,16 @@ static u64 intel_cqm_event_count(struct perf_event *event)
 	cqm_mask_call(&rr);
 
 	raw_spin_lock_irqsave(&cache_lock, flags);
-	if (event->hw.cqm_rmid == rr.rmid)
-		local64_set(&event->count, atomic64_read(&rr.value));
+	if (event->hw.cqm_rmid == rr.rmid) {
+		if (is_mbm_event(event->attr.config)) {
+			tmpval = atomic64_read(&rr.value) +
+				local64_read(&event->hw.rc_count);
+
+			local64_set(&event->count, tmpval);
+		} else {
+			local64_set(&event->count, atomic64_read(&rr.value));
+		}
+	}
 	raw_spin_unlock_irqrestore(&cache_lock, flags);
 out:
 	return __perf_event_count(event);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f291275..ec7772a 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -122,6 +122,7 @@ struct hw_perf_event {
 			int			cqm_state;
 			u32			cqm_rmid;
 			int			is_group_event;
+			local64_t		rc_count;
 			struct list_head	cqm_events_entry;
 			struct list_head	cqm_groups_entry;
 			struct list_head	cqm_group_entry;
-- 
1.9.1

  parent reply	other threads:[~2016-04-23  0:28 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-23  0:27 [PATCH V1 0/4] Urgent fixes for Intel CQM/MBM counting Vikas Shivappa
2016-04-23  0:27 ` [PATCH 1/4] perf/x86/cqm,mbm: Store cqm,mbm count for all events when RMID is recycled Vikas Shivappa
2016-04-25  9:20   ` Peter Zijlstra
2016-04-25 16:26     ` Vikas Shivappa
2016-04-23  0:27 ` Vikas Shivappa [this message]
2016-04-25  9:13   ` [PATCH 2/4] perf/x86/mbm: Store bytes counted for mbm during recycle Peter Zijlstra
2016-04-25 18:04     ` Vikas Shivappa
2016-04-25 20:02       ` Peter Zijlstra
2016-04-25 21:12         ` Vikas Shivappa
2016-05-03  7:46           ` Peter Zijlstra
2016-05-04  0:00             ` Vikas Shivappa
2016-04-23  0:27 ` [PATCH 3/4] perf/x86/mbm: Fix mbm counting when RMIDs are reused Vikas Shivappa
2016-04-25  9:16   ` Peter Zijlstra
2016-04-25 16:44     ` Vikas Shivappa
2016-04-25 20:05       ` Peter Zijlstra
2016-04-25 21:43         ` Vikas Shivappa
2016-04-25 21:49           ` Vikas Shivappa
2016-04-23  0:27 ` [PATCH 4/4] perf/x86/cqm: Support cqm/mbm only for perf events Vikas Shivappa
2016-04-25  9:18   ` Peter Zijlstra
2016-04-25 16:23     ` Luck, Tony
2016-04-25 20:08       ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1461371241-4258-3-git-send-email-vikas.shivappa@linux.intel.com \
    --to=vikas.shivappa@linux.intel.com \
    --cc=fenghua.yu@intel.com \
    --cc=h.peter.anvin@intel.com \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=ravi.v.shankar@intel.com \
    --cc=tglx@linutronix.de \
    --cc=tony.luck@intel.com \
    --cc=vikas.shivappa@intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox