From: David Carrillo-Cisneros <davidcc@google.com>
To: linux-kernel@vger.kernel.org
Cc: "x86@kernel.org" <x86@kernel.org>, Ingo Molnar <mingo@redhat.com>,
Thomas Gleixner <tglx@linutronix.de>,
Andi Kleen <ak@linux.intel.com>, Kan Liang <kan.liang@intel.com>,
Peter Zijlstra <peterz@infradead.org>,
Vegard Nossum <vegard.nossum@gmail.com>,
Marcelo Tosatti <mtosatti@redhat.com>,
Nilay Vaish <nilayvaish@gmail.com>, Borislav Petkov <bp@suse.de>,
Vikas Shivappa <vikas.shivappa@linux.intel.com>,
Ravi V Shankar <ravi.v.shankar@intel.com>,
Fenghua Yu <fenghua.yu@intel.com>, Paul Turner <pjt@google.com>,
Stephane Eranian <eranian@google.com>,
David Carrillo-Cisneros <davidcc@google.com>
Subject: [PATCH v3 44/46] perf/x86/intel/cmt: add debugfs intel_cmt directory
Date: Sat, 29 Oct 2016 17:38:41 -0700 [thread overview]
Message-ID: <1477787923-61185-45-git-send-email-davidcc@google.com> (raw)
In-Reply-To: <1477787923-61185-1-git-send-email-davidcc@google.com>
Add debug fs to intel_cmt to help maintenance. It shows the following
human readable snapshots of the internals of the CMT driver:
- hrchy: A per monr view of the monr hierarchy.
- pkgs: A per-package view of all online struct pkg_data.
- rmids: A per-package view of occupancy and state of all rmids.
Signed-off-by: David Carrillo-Cisneros <davidcc@google.com>
---
arch/x86/events/intel/cmt.c | 385 ++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 385 insertions(+)
diff --git a/arch/x86/events/intel/cmt.c b/arch/x86/events/intel/cmt.c
index 8cbcbc6..554ebd2 100644
--- a/arch/x86/events/intel/cmt.c
+++ b/arch/x86/events/intel/cmt.c
@@ -2372,6 +2372,376 @@ static ssize_t max_recycle_threshold_store(struct device *dev,
static DEVICE_ATTR_RW(max_recycle_threshold);
+
+#define INTEL_CMT_DEBUGFS
+
+#ifdef INTEL_CMT_DEBUGFS
+
+#include <linux/debugfs.h>
+
+#define DBG_PRINTF(format__, ...) \
+ seq_printf(s, "%*s" format__, 4 * pad, "", ##__VA_ARGS__)
+
+static void cmt_dbg_show__pmonr(struct seq_file *s,
+ struct pmonr *pmonr,
+ int pad)
+{
+ struct pmonr *pos;
+ union pmonr_rmids rmids;
+ static const char * const state_strs[] = {"OFF", "UNUSED", "ACTIVE",
+ "DEP_IDLE", "DEP_DIRTY"};
+
+ DBG_PRINTF("pmonr: (%p, pkgid: %d, monr: %p)\n",
+ pmonr, pmonr->pkgd->pkgid, pmonr->monr);
+ pad++;
+
+ rmids.value = atomic64_read(&pmonr->atomic_rmids);
+ DBG_PRINTF("atomic_rmids: (%d,%d), state: %s, lender: %p\n",
+ rmids.sched_rmid, rmids.read_rmid,
+ state_strs[pmonr->state], pmonr->lender);
+
+ if (pmonr->state == PMONR_ACTIVE) {
+ DBG_PRINTF("pmonr_deps_head:");
+ list_for_each_entry(pos, &pmonr->pmonr_deps_head,
+ pmonr_deps_entry) {
+ seq_printf(s, "%p,", pos);
+ }
+ seq_puts(s, "\n");
+ }
+
+ DBG_PRINTF("last_enter_active: %lu\n",
+ atomic64_read(&pmonr->last_enter_active));
+
+}
+
+static void cmt_dbg_show__monr(struct seq_file *s, struct monr *monr, int pad)
+{
+ struct pkg_data *pkgd = NULL;
+ struct monr *pos;
+ struct pmonr *pmonr;
+ int p;
+
+ DBG_PRINTF("\nmonr: %p, parent: %p\n", monr, monr->parent);
+
+ pad++;
+
+ DBG_PRINTF("children: [");
+
+
+ list_for_each_entry(pos, &monr->children, parent_entry)
+ seq_printf(s, "%p, ", pos);
+
+ seq_puts(s, "]\n");
+
+ DBG_PRINTF("mon_cgrp: (%s, %p)",
+ monr->mon_cgrp ? monr->mon_cgrp->css.cgroup->kn->name : "NA",
+ monr->mon_cgrp);
+ DBG_PRINTF("mon_events: %p\n", monr->mon_events);
+
+ DBG_PRINTF("pmonrs:\n");
+ rcu_read_lock();
+ while ((pkgd = cmt_pkgs_data_next_rcu(pkgd))) {
+ pmonr = pkgd_pmonr(pkgd, monr);
+ cmt_dbg_show__pmonr(s, pmonr, pad + 1);
+ }
+ rcu_read_unlock();
+
+ DBG_PRINTF("last_rmid_recoup: %lu, nr_dep_pmonrs: %u, flags: %x\n",
+ atomic64_read(&monr->last_rmid_recoup),
+ atomic_read(&monr->nr_dep_pmonrs), monr->flags);
+ DBG_PRINTF("nr_has_user: %d, nr_nosteal_rmid: %d,",
+ monr->nr_has_user, monr->nr_nosteal_rmid);
+ DBG_PRINTF("nr_nolazy_rmid: %d, uflags: %x\n",
+ monr->nr_nolazy_rmid, monr->uflags);
+ DBG_PRINTF("pkg_uflags: [");
+ for (p = 0; p < topology_max_packages(); p++)
+ seq_printf(s, "%x;", monr->pkg_uflags[p]);
+ seq_puts(s, "]\n");
+}
+
+
+static void cmt_dbg_show__pkgd(struct seq_file *s,
+ struct pkg_data *pkgd, int pad, bool csd_full)
+{
+ struct pmonr *pmonr;
+ unsigned long flags;
+ int r;
+
+ raw_spin_lock_irqsave(&pkgd->lock, flags);
+
+ DBG_PRINTF("\npkgd: %p, pkgid: %d, max_rmid: %d\n",
+ pkgd, pkgd->pkgid, pkgd->max_rmid);
+ pad++;
+
+ DBG_PRINTF("free_rmids: [%*pbl]\n",
+ CMT_MAX_NR_RMIDS, pkgd->free_rmids);
+ DBG_PRINTF("dirty_rmids: [%*pbl]\n",
+ CMT_MAX_NR_RMIDS, pkgd->dirty_rmids);
+
+ DBG_PRINTF("active_pmonrs:\n");
+ list_for_each_entry(pmonr, &pkgd->active_pmonrs, rot_entry)
+ cmt_dbg_show__pmonr(s, pmonr, pad + 1);
+
+ DBG_PRINTF("dep_idle_pmonrs:\n");
+ list_for_each_entry(pmonr, &pkgd->dep_idle_pmonrs, rot_entry)
+ cmt_dbg_show__pmonr(s, pmonr, pad + 1);
+
+ DBG_PRINTF("dep_dirty_pmonrs:\n");
+ list_for_each_entry(pmonr, &pkgd->dep_dirty_pmonrs, rot_entry)
+ cmt_dbg_show__pmonr(s, pmonr, pad + 1);
+ /*
+ * only print pmonr pointer since this pmonrs are contained in either
+ * Dep_Idle or Dep_Dirty pmonrs.
+ */
+ DBG_PRINTF("dep_pmonrs: [");
+ list_for_each_entry(pmonr, &pkgd->dep_pmonrs, pkgd_deps_entry)
+ seq_printf(s, "%p,", pmonr);
+ seq_puts(s, "]\n");
+
+ DBG_PRINTF("nr_dirty_rmids: %d, nr_dep_pmonrs: %d\n",
+ pkgd->nr_dirty_rmids, pkgd->nr_dep_pmonrs);
+
+ DBG_PRINTF("work_cpu: %d\n", pkgd->work_cpu);
+
+ DBG_PRINTF("ccsds (");
+ if (csd_full)
+ seq_puts(s, "flags, info, func, ");
+ seq_puts(s, "on_read, value, ret, rmid): [");
+
+ pad += 1;
+ for (r = 0; r <= pkgd->max_rmid; r++) {
+ struct cmt_csd *ccsd = &pkgd->ccsds[r];
+
+ if (r % 4 == 0) {
+ seq_puts(s, "\n");
+ DBG_PRINTF("(");
+ } else {
+ seq_puts(s, "(");
+ }
+
+ if (csd_full) {
+ seq_printf(s, "%d, %p, %p, ", ccsd->csd.flags,
+ ccsd->csd.info, ccsd->csd.func);
+ }
+ seq_printf(s, "%d, %llu, %d, %d",
+ atomic_read(&ccsd->on_read), ccsd->value,
+ ccsd->ret, ccsd->rmid);
+ seq_puts(s, "),\t");
+ }
+ seq_puts(s, "]");
+ pad -= 1;
+
+ raw_spin_unlock_irqrestore(&pkgd->lock, flags);
+}
+
+static int cmt_dbg_pkgs_show(struct seq_file *s, void *unused)
+{
+ struct pkg_data *pkgd = NULL;
+ int pad = 0;
+
+ mutex_lock(&cmt_mutex);
+
+ while ((pkgd = cmt_pkgs_data_next_rcu(pkgd))) {
+ cmt_dbg_show__pkgd(s, pkgd, pad, false);
+ seq_puts(s, "\n");
+ }
+
+ mutex_unlock(&cmt_mutex);
+
+ return 0;
+}
+
+static int cmt_dbg_hrchy_show(struct seq_file *s, void *unused)
+{
+ struct monr *pos = NULL;
+ int pad = 0;
+
+ mutex_lock(&cmt_mutex);
+ while ((pos = monr_next_descendant_pre(pos, monr_hrchy_root)))
+ cmt_dbg_show__monr(s, pos, pad);
+ mutex_unlock(&cmt_mutex);
+
+ return 0;
+}
+
+/* Must run on a CPU in pkgd's pkg. */
+static int cmt_dbg_rmids__rmids(struct seq_file *s, struct pkg_data *pkgd,
+ unsigned long *rmids, int pad)
+{
+ unsigned long zero_val[CMT_MAX_NR_RMIDS_LONGS];
+ int err, r, nr_printed = 0;
+ u64 val;
+
+ bitmap_copy(zero_val, rmids, CMT_MAX_NR_RMIDS);
+
+ DBG_PRINTF("non-zero value (rmid, scaled llc_occupancy): [");
+ pad++;
+ for_each_set_bit(r, rmids, CMT_MAX_NR_RMIDS) {
+ err = cmt_rmid_read(r, &val);
+ if (!err && !val)
+ continue;
+
+ if (nr_printed % 4 == 0) {
+ seq_puts(s, "\n");
+ DBG_PRINTF("(");
+ } else {
+ seq_puts(s, "(");
+ }
+ nr_printed++;
+
+ if (err) {
+ seq_printf(s, "%d, error: %d),\t", r, err);
+ __clear_bit(r, zero_val);
+ continue;
+ }
+ seq_printf(s, "%d, %llu), ", r, val * cmt_l3_scale);
+ __clear_bit(r, zero_val);
+ }
+ seq_puts(s, "]\n");
+ pad--;
+
+ DBG_PRINTF("zero value: [%*pbl]\n", CMT_MAX_NR_RMIDS, zero_val);
+
+ return 0;
+}
+
+static int __cmt_dbg_rmids__pkgd(struct seq_file *s,
+ struct pkg_data *pkgd, int pad)
+{
+ unsigned long rmids_in_pmonr[CMT_MAX_NR_RMIDS_LONGS];
+ int r;
+
+ memset(rmids_in_pmonr, 0, CMT_MAX_NR_RMIDS_BYTES);
+ bitmap_fill(rmids_in_pmonr, pkgd->max_rmid + 1);
+
+ for_each_set_bit(r, pkgd->free_rmids, CMT_MAX_NR_RMIDS)
+ __clear_bit(r, rmids_in_pmonr);
+
+ for_each_set_bit(r, pkgd->dirty_rmids, CMT_MAX_NR_RMIDS)
+ __clear_bit(r, rmids_in_pmonr);
+
+
+ raw_spin_lock(&pkgd->lock);
+
+ DBG_PRINTF("free_rmids:\n");
+ cmt_dbg_rmids__rmids(s, pkgd, pkgd->free_rmids, pad + 1);
+
+ DBG_PRINTF("dirty_rmids:\n");
+ cmt_dbg_rmids__rmids(s, pkgd, pkgd->dirty_rmids, pad + 1);
+
+ DBG_PRINTF("rmids_in_pmonr:\n");
+ cmt_dbg_rmids__rmids(s, pkgd, rmids_in_pmonr, pad + 1);
+
+ raw_spin_unlock(&pkgd->lock);
+
+ return 0;
+}
+
+struct dbg_smp_data {
+ struct seq_file *s;
+ struct pkg_data *pkgd;
+ int pad;
+};
+
+void cmt_dbg_rmids_pkgd(void *data)
+{
+ struct dbg_smp_data *d;
+
+ d = (struct dbg_smp_data *)data;
+ __cmt_dbg_rmids__pkgd(d->s, d->pkgd, d->pad);
+}
+
+static int cmt_dbg_rmids_show(struct seq_file *s, void *unused)
+{
+ struct dbg_smp_data d;
+ struct pkg_data *pkgd = NULL;
+ int pad = 0, err;
+
+ mutex_lock(&cmt_mutex);
+
+ while ((pkgd = cmt_pkgs_data_next_rcu(pkgd))) {
+ DBG_PRINTF("pkgid: %d\n", pkgd->pkgid);
+ d.s = s;
+ d.pkgd = pkgd;
+ d.pad = pad + 1;
+ err = smp_call_function_single(pkgd->work_cpu,
+ cmt_dbg_rmids_pkgd, &d, true);
+ seq_puts(s, "\n");
+ }
+
+ mutex_unlock(&cmt_mutex);
+
+ return 0;
+
+}
+
+#define CMT_DBGS_FILE(name__) \
+static int cmt_dbg_ ## name__ ## _open(struct inode *inode, struct file *file)\
+{\
+ return single_open(file, cmt_dbg_ ## name__ ## _show,\
+ inode->i_private);\
+} \
+static const struct file_operations cmt_dbg_ ## name__ ## _ops = {\
+ .open = cmt_dbg_ ## name__ ## _open,\
+ .read = seq_read,\
+ .llseek = seq_lseek,\
+ .release = single_release,\
+}
+
+CMT_DBGS_FILE(hrchy);
+CMT_DBGS_FILE(pkgs);
+CMT_DBGS_FILE(rmids);
+
+struct dentry *cmt_dbgfs_root;
+
+static int start_debugfs(void)
+{
+ struct dentry *root, *pkgs, *hrchy, *rmids;
+
+ root = debugfs_create_dir("intel_cmt", NULL);
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ pkgs = debugfs_create_file("pkgs", 0444,
+ root, NULL, &cmt_dbg_pkgs_ops);
+ if (IS_ERR(pkgs))
+ return PTR_ERR(pkgs);
+
+ hrchy = debugfs_create_file("hrchy", 0444,
+ root, NULL, &cmt_dbg_hrchy_ops);
+ if (IS_ERR(hrchy))
+ return PTR_ERR(hrchy);
+
+ rmids = debugfs_create_file("rmids", 0444,
+ root, NULL, &cmt_dbg_rmids_ops);
+ if (IS_ERR(rmids))
+ return PTR_ERR(rmids);
+
+ cmt_dbgfs_root = root;
+
+ return 0;
+}
+
+static void stop_debugfs(void)
+{
+ if (!cmt_dbgfs_root)
+ return;
+ debugfs_remove_recursive(cmt_dbgfs_root);
+ cmt_dbgfs_root = NULL;
+}
+
+#else
+
+static int start_debugfs(void)
+{
+}
+
+static void stop_debugfs(void)
+{
+}
+
+#endif
+
static struct attribute *intel_cmt_attrs[] = {
&dev_attr_max_recycle_threshold.attr,
NULL,
@@ -2774,6 +3144,15 @@ static void cmt_stop(void)
mutex_unlock(&cmt_mutex);
}
+static void intel_cmt_terminate(void)
+{
+ stop_debugfs();
+ static_branch_dec(&pqr_common_enable_key);
+ perf_pmu_unregister(&intel_cmt_pmu);
+ cmt_stop();
+ cmt_dealloc();
+}
+
static int __init cmt_alloc(void)
{
pkg_uflags_size = sizeof(*pkg_uflags_zeroes) * topology_max_packages();
@@ -2904,6 +3283,12 @@ static int __init intel_cmt_init(void)
static_branch_inc(&pqr_common_enable_key);
+ err = start_debugfs();
+ if (err) {
+ intel_cmt_terminate();
+ goto err_exit;
+ }
+
return err;
err_stop:
--
2.8.0.rc3.226.g39d4020
next prev parent reply other threads:[~2016-10-30 0:40 UTC|newest]
Thread overview: 59+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-10-30 0:37 [PATCH v3 00/46] Cache Monitoring Technology (aka CQM) David Carrillo-Cisneros
2016-10-30 0:37 ` [PATCH v3 01/46] perf/x86/intel/cqm: remove previous version of CQM and MBM David Carrillo-Cisneros
2016-10-30 0:37 ` [PATCH v3 02/46] perf/x86/intel: rename CQM cpufeatures to CMT David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 03/46] x86/intel: add CONFIG_INTEL_RDT_M configuration flag David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 04/46] perf/x86/intel/cmt: add device initialization and CPU hotplug support David Carrillo-Cisneros
2016-11-10 15:19 ` Thomas Gleixner
2016-10-30 0:38 ` [PATCH v3 05/46] perf/x86/intel/cmt: add per-package locks David Carrillo-Cisneros
2016-11-10 21:23 ` Thomas Gleixner
2016-11-11 2:22 ` David Carrillo-Cisneros
2016-11-11 7:21 ` Peter Zijlstra
2016-11-11 7:32 ` Ingo Molnar
2016-11-11 9:41 ` Thomas Gleixner
2016-11-11 17:21 ` David Carrillo-Cisneros
2016-11-13 10:58 ` Thomas Gleixner
2016-11-15 4:53 ` David Carrillo-Cisneros
2016-11-16 19:00 ` Thomas Gleixner
2016-10-30 0:38 ` [PATCH v3 06/46] perf/x86/intel/cmt: add intel_cmt pmu David Carrillo-Cisneros
2016-11-10 21:27 ` Thomas Gleixner
2016-10-30 0:38 ` [PATCH v3 07/46] perf/core: add RDT Monitoring attributes to struct hw_perf_event David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 08/46] perf/x86/intel/cmt: add MONitored Resource (monr) initialization David Carrillo-Cisneros
2016-11-10 23:09 ` Thomas Gleixner
2016-10-30 0:38 ` [PATCH v3 09/46] perf/x86/intel/cmt: add basic monr hierarchy David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 10/46] perf/x86/intel/cmt: add Package MONitored Resource (pmonr) initialization David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 11/46] perf/x86/intel/cmt: add cmt_user_flags (uflags) to monr David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 12/46] perf/x86/intel/cmt: add per-package rmid pools David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 13/46] perf/x86/intel/cmt: add pmonr's Off and Unused states David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 14/46] perf/x86/intel/cmt: add Active and Dep_{Idle, Dirty} states David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 15/46] perf/x86/intel: encapsulate rmid and closid updates in pqr cache David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 16/46] perf/x86/intel/cmt: set sched rmid and complete pmu start/stop/add/del David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 17/46] perf/x86/intel/cmt: add uflag CMT_UF_NOLAZY_RMID David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 18/46] perf/core: add arch_info field to struct perf_cgroup David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 19/46] perf/x86/intel/cmt: add support for cgroup events David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 20/46] perf/core: add pmu::event_terminate David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 21/46] perf/x86/intel/cmt: use newly introduced event_terminate David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 22/46] perf/x86/intel/cmt: sync cgroups and intel_cmt device start/stop David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 23/46] perf/core: hooks to add architecture specific features in perf_cgroup David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 24/46] perf/x86/intel/cmt: add perf_cgroup_arch_css_{online,offline} David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 25/46] perf/x86/intel/cmt: add monr->flags and CMT_MONR_ZOMBIE David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 26/46] sched: introduce the finish_arch_pre_lock_switch() scheduler hook David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 27/46] perf/x86/intel: add pqr cache flags and intel_pqr_ctx_switch David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 28/46] perf,perf/x86,perf/powerpc,perf/arm,perf/*: add int error return to pmu::read David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 29/46] perf/x86/intel/cmt: add error handling to intel_cmt_event_read David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 30/46] perf/x86/intel/cmt: add asynchronous read for task events David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 31/46] perf/x86/intel/cmt: add subtree read for cgroup events David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 32/46] perf/core: Add PERF_EV_CAP_READ_ANY_{CPU_,}PKG flags David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 33/46] perf/x86/intel/cmt: use PERF_EV_CAP_READ_{,CPU_}PKG flags in Intel cmt David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 34/46] perf/core: introduce PERF_EV_CAP_CGROUP_NO_RECURSION David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 35/46] perf/x86/intel/cmt: use PERF_EV_CAP_CGROUP_NO_RECURSION in intel_cmt David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 36/46] perf/core: add perf_event cgroup hooks for subsystem attributes David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 37/46] perf/x86/intel/cmt: add cont_monitoring to perf cgroup David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 38/46] perf/x86/intel/cmt: introduce read SLOs for rotation David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 39/46] perf/x86/intel/cmt: add max_recycle_threshold sysfs attribute David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 40/46] perf/x86/intel/cmt: add rotation scheduled work David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 41/46] perf/x86/intel/cmt: add rotation minimum progress SLO David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 42/46] perf/x86/intel/cmt: add rmid stealing David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 43/46] perf/x86/intel/cmt: add CMT_UF_NOSTEAL_RMID flag David Carrillo-Cisneros
2016-10-30 0:38 ` David Carrillo-Cisneros [this message]
2016-10-30 0:38 ` [PATCH v3 45/46] perf/stat: fix bug in handling events in error state David Carrillo-Cisneros
2016-10-30 0:38 ` [PATCH v3 46/46] perf/stat: revamp read error handling, snapshot and per_pkg events David Carrillo-Cisneros
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1477787923-61185-45-git-send-email-davidcc@google.com \
--to=davidcc@google.com \
--cc=ak@linux.intel.com \
--cc=bp@suse.de \
--cc=eranian@google.com \
--cc=fenghua.yu@intel.com \
--cc=kan.liang@intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@redhat.com \
--cc=mtosatti@redhat.com \
--cc=nilayvaish@gmail.com \
--cc=peterz@infradead.org \
--cc=pjt@google.com \
--cc=ravi.v.shankar@intel.com \
--cc=tglx@linutronix.de \
--cc=vegard.nossum@gmail.com \
--cc=vikas.shivappa@linux.intel.com \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).