From: Thomas Gleixner <tglx@linutronix.de>
To: LKML <linux-kernel@vger.kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@kernel.org>, Borislav Petkov <bp@alien8.de>,
Stephane Eranian <eranian@google.com>,
Harish Chegondi <harish.chegondi@intel.com>,
Kan Liang <kan.liang@intel.com>,
Andi Kleen <andi.kleen@intel.com>
Subject: [patch 04/11] x86/perf/intel_uncore: Cleanup hardware on exit
Date: Wed, 17 Feb 2016 13:47:33 -0000 [thread overview]
Message-ID: <20160217133932.018288722@linutronix.de> (raw)
In-Reply-To: 20160217132903.767990400@linutronix.de
[-- Attachment #1: x86-perf-intel_uncore--Cleanup-hardware-on-exit.patch --]
[-- Type: text/plain, Size: 10907 bytes --]
When tearing down the boxes nothing undoes the hardware state which was setup
by box->init_box(). Add a box->exit_box() callback and implement it for the
uncores which have an init_box() callback.
This misses the cleanup in the error exit pathes, but I cannot be bothered to
implement it before cleaning up the rest of the driver, which makes that task
way simpler.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/x86/kernel/cpu/perf_event_intel_uncore.c | 6 +-
arch/x86/kernel/cpu/perf_event_intel_uncore.h | 9 +++
arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c | 6 ++
arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c | 13 ++++
arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c | 57 +++++++++++++++++++-
5 files changed, 88 insertions(+), 3 deletions(-)
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -927,6 +927,7 @@ static int uncore_pci_probe(struct pci_d
raw_spin_lock(&uncore_box_lock);
list_del(&box->list);
raw_spin_unlock(&uncore_box_lock);
+ uncore_box_exit(box);
kfree(box);
}
return ret;
@@ -972,6 +973,7 @@ static void uncore_pci_remove(struct pci
}
WARN_ON_ONCE(atomic_read(&box->refcnt) != 1);
+ uncore_box_exit(box);
kfree(box);
if (last_box)
@@ -1079,8 +1081,10 @@ static void uncore_cpu_dying(int cpu)
pmu = &type->pmus[j];
box = *per_cpu_ptr(pmu->box, cpu);
*per_cpu_ptr(pmu->box, cpu) = NULL;
- if (box && atomic_dec_and_test(&box->refcnt))
+ if (box && atomic_dec_and_test(&box->refcnt)) {
list_add(&box->list, &boxes_to_free);
+ uncore_box_exit(box);
+ }
}
}
}
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -61,6 +61,7 @@ struct intel_uncore_type {
struct intel_uncore_ops {
void (*init_box)(struct intel_uncore_box *);
+ void (*exit_box)(struct intel_uncore_box *);
void (*disable_box)(struct intel_uncore_box *);
void (*enable_box)(struct intel_uncore_box *);
void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
@@ -306,6 +307,14 @@ static inline void uncore_box_init(struc
}
}
+static inline void uncore_box_exit(struct intel_uncore_box *box)
+{
+ if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
+ if (box->pmu->type->ops->exit_box)
+ box->pmu->type->ops->exit_box(box);
+ }
+}
+
static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
{
return (box->phys_id < 0);
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_nhmex.c
@@ -201,6 +201,11 @@ static void nhmex_uncore_msr_init_box(st
wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, NHMEX_U_PMON_GLOBAL_EN_ALL);
}
+static void nhmex_uncore_msr_exit_box(struct intel_uncore_box *box)
+{
+ wrmsrl(NHMEX_U_MSR_PMON_GLOBAL_CTL, 0);
+}
+
static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box)
{
unsigned msr = uncore_msr_box_ctl(box);
@@ -250,6 +255,7 @@ static void nhmex_uncore_msr_enable_even
#define NHMEX_UNCORE_OPS_COMMON_INIT() \
.init_box = nhmex_uncore_msr_init_box, \
+ .exit_box = nhmex_uncore_msr_exit_box, \
.disable_box = nhmex_uncore_msr_disable_box, \
.enable_box = nhmex_uncore_msr_enable_box, \
.disable_event = nhmex_uncore_msr_disable_event, \
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
@@ -95,6 +95,12 @@ static void snb_uncore_msr_init_box(stru
}
}
+static void snb_uncore_msr_exit_box(struct intel_uncore_box *box)
+{
+ if (box->pmu->pmu_idx == 0)
+ wrmsrl(SNB_UNC_PERF_GLOBAL_CTL, 0);
+}
+
static struct uncore_event_desc snb_uncore_events[] = {
INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"),
{ /* end: all zeroes */ },
@@ -116,6 +122,7 @@ static struct attribute_group snb_uncore
static struct intel_uncore_ops snb_uncore_msr_ops = {
.init_box = snb_uncore_msr_init_box,
+ .exit_box = snb_uncore_msr_exit_box,
.disable_event = snb_uncore_msr_disable_event,
.enable_event = snb_uncore_msr_enable_event,
.read_counter = uncore_msr_read_counter,
@@ -231,6 +238,11 @@ static void snb_uncore_imc_init_box(stru
box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL;
}
+static void snb_uncore_imc_exit_box(struct intel_uncore_box *box)
+{
+ iounmap(box->io_addr);
+}
+
static void snb_uncore_imc_enable_box(struct intel_uncore_box *box)
{}
@@ -458,6 +470,7 @@ static struct pmu snb_uncore_imc_pmu = {
static struct intel_uncore_ops snb_uncore_imc_ops = {
.init_box = snb_uncore_imc_init_box,
+ .exit_box = snb_uncore_imc_exit_box,
.enable_box = snb_uncore_imc_enable_box,
.disable_box = snb_uncore_imc_disable_box,
.disable_event = snb_uncore_imc_disable_event,
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -387,6 +387,14 @@ static void snbep_uncore_pci_init_box(st
pci_write_config_dword(pdev, box_ctl, SNBEP_PMON_BOX_CTL_INT);
}
+static void snbep_uncore_pci_exit_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ int box_ctl = uncore_pci_box_ctl(box);
+
+ pci_write_config_dword(pdev, box_ctl, 0);
+}
+
static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box)
{
u64 config;
@@ -440,6 +448,14 @@ static void snbep_uncore_msr_init_box(st
wrmsrl(msr, SNBEP_PMON_BOX_CTL_INT);
}
+static void snbep_uncore_msr_exit_box(struct intel_uncore_box *box)
+{
+ unsigned msr = uncore_msr_box_ctl(box);
+
+ if (msr)
+ wrmsrl(msr, 0);
+}
+
static struct attribute *snbep_uncore_formats_attr[] = {
&format_attr_event.attr,
&format_attr_umask.attr,
@@ -567,7 +583,8 @@ static struct attribute_group snbep_unco
#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
- .init_box = snbep_uncore_msr_init_box \
+ .init_box = snbep_uncore_msr_init_box, \
+ .exit_box = snbep_uncore_msr_exit_box \
static struct intel_uncore_ops snbep_uncore_msr_ops = {
SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
@@ -575,6 +592,7 @@ static struct intel_uncore_ops snbep_unc
#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
.init_box = snbep_uncore_pci_init_box, \
+ .exit_box = snbep_uncore_pci_exit_box, \
.disable_box = snbep_uncore_pci_disable_box, \
.enable_box = snbep_uncore_pci_enable_box, \
.disable_event = snbep_uncore_pci_disable_event, \
@@ -1236,10 +1254,19 @@ int snbep_uncore_pci_init(void)
static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box)
{
unsigned msr = uncore_msr_box_ctl(box);
+
if (msr)
wrmsrl(msr, IVBEP_PMON_BOX_CTL_INT);
}
+static void ivbep_uncore_msr_exit_box(struct intel_uncore_box *box)
+{
+ unsigned msr = uncore_msr_box_ctl(box);
+
+ if (msr)
+ wrmsrl(msr, 0);
+}
+
static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box)
{
struct pci_dev *pdev = box->pci_dev;
@@ -1247,8 +1274,16 @@ static void ivbep_uncore_pci_init_box(st
pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, IVBEP_PMON_BOX_CTL_INT);
}
+static void ivbep_uncore_pci_exit_box(struct intel_uncore_box *box)
+{
+ struct pci_dev *pdev = box->pci_dev;
+
+ pci_write_config_dword(pdev, SNBEP_PCI_PMON_BOX_CTL, 0);
+}
+
#define IVBEP_UNCORE_MSR_OPS_COMMON_INIT() \
.init_box = ivbep_uncore_msr_init_box, \
+ .exit_box = ivbep_uncore_msr_exit_box, \
.disable_box = snbep_uncore_msr_disable_box, \
.enable_box = snbep_uncore_msr_enable_box, \
.disable_event = snbep_uncore_msr_disable_event, \
@@ -1261,6 +1296,7 @@ static struct intel_uncore_ops ivbep_unc
static struct intel_uncore_ops ivbep_uncore_pci_ops = {
.init_box = ivbep_uncore_pci_init_box,
+ .exit_box = ivbep_uncore_pci_exit_box,
.disable_box = snbep_uncore_pci_disable_box,
.enable_box = snbep_uncore_pci_enable_box,
.disable_event = snbep_uncore_pci_disable_event,
@@ -1497,6 +1533,7 @@ static void ivbep_cbox_enable_event(stru
static struct intel_uncore_ops ivbep_uncore_cbox_ops = {
.init_box = ivbep_uncore_msr_init_box,
+ .exit_box = ivbep_uncore_msr_exit_box,
.disable_box = snbep_uncore_msr_disable_box,
.enable_box = snbep_uncore_msr_enable_box,
.disable_event = snbep_uncore_msr_disable_event,
@@ -1613,6 +1650,7 @@ static u64 ivbep_uncore_irp_read_counter
static struct intel_uncore_ops ivbep_uncore_irp_ops = {
.init_box = ivbep_uncore_pci_init_box,
+ .exit_box = ivbep_uncore_pci_exit_box,
.disable_box = snbep_uncore_pci_disable_box,
.enable_box = snbep_uncore_pci_enable_box,
.disable_event = ivbep_uncore_irp_disable_event,
@@ -1633,6 +1671,7 @@ static struct intel_uncore_type ivbep_un
static struct intel_uncore_ops ivbep_uncore_qpi_ops = {
.init_box = ivbep_uncore_pci_init_box,
+ .exit_box = ivbep_uncore_pci_exit_box,
.disable_box = snbep_uncore_pci_disable_box,
.enable_box = snbep_uncore_pci_enable_box,
.disable_event = snbep_uncore_pci_disable_event,
@@ -1914,6 +1953,7 @@ static void hswep_cbox_enable_event(stru
static struct intel_uncore_ops knl_uncore_cha_ops = {
.init_box = snbep_uncore_msr_init_box,
+ .exit_box = snbep_uncore_msr_exit_box,
.disable_box = snbep_uncore_msr_disable_box,
.enable_box = snbep_uncore_msr_enable_box,
.disable_event = snbep_uncore_msr_disable_event,
@@ -2008,6 +2048,7 @@ static void knl_uncore_imc_enable_event(
static struct intel_uncore_ops knl_uncore_imc_ops = {
.init_box = snbep_uncore_pci_init_box,
+ .exit_box = snbep_uncore_pci_exit_box,
.disable_box = snbep_uncore_pci_disable_box,
.enable_box = knl_uncore_imc_enable_box,
.read_counter = snbep_uncore_pci_read_counter,
@@ -2397,6 +2438,7 @@ static void hswep_cbox_enable_event(stru
static struct intel_uncore_ops hswep_uncore_cbox_ops = {
.init_box = snbep_uncore_msr_init_box,
+ .exit_box = snbep_uncore_msr_exit_box,
.disable_box = snbep_uncore_msr_disable_box,
.enable_box = snbep_uncore_msr_enable_box,
.disable_event = snbep_uncore_msr_disable_event,
@@ -2442,9 +2484,19 @@ static void hswep_uncore_sbox_msr_init_b
}
}
+static void hswep_uncore_sbox_msr_exit_box(struct intel_uncore_box *box)
+{
+ unsigned msr = uncore_msr_box_ctl(box);
+
+ /* CHECKME: Does this need the bit dance like init() ? */
+ if (msr)
+ wrmsrl(msr, 0);
+}
+
static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
__SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
- .init_box = hswep_uncore_sbox_msr_init_box
+ .init_box = hswep_uncore_sbox_msr_init_box,
+ .exit_box = hswep_uncore_sbox_msr_exit_box
};
static struct attribute *hswep_uncore_sbox_formats_attr[] = {
@@ -2584,6 +2636,7 @@ static u64 hswep_uncore_irp_read_counter
static struct intel_uncore_ops hswep_uncore_irp_ops = {
.init_box = snbep_uncore_pci_init_box,
+ .exit_box = snbep_uncore_pci_exit_box,
.disable_box = snbep_uncore_pci_disable_box,
.enable_box = snbep_uncore_pci_enable_box,
.disable_event = ivbep_uncore_irp_disable_event,
next prev parent reply other threads:[~2016-02-17 13:52 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2016-02-17 13:47 [patch 00/11] x86/perf/intel_uncore: Cleanup and enhancements Thomas Gleixner
2016-02-17 13:47 ` [patch 01/11] x86/perf/intel_uncore: Remove pointless mask check Thomas Gleixner
2016-02-17 13:47 ` [patch 02/11] x86/perf/intel_uncore: Simplify error rollback Thomas Gleixner
2016-02-17 13:47 ` Thomas Gleixner [this message]
2016-02-17 15:49 ` [patch 04/11] x86/perf/intel_uncore: Cleanup hardware on exit Liang, Kan
2016-02-17 18:16 ` Thomas Gleixner
2016-02-17 21:57 ` Liang, Kan
2016-02-17 22:00 ` Thomas Gleixner
2016-02-17 13:47 ` [patch 03/11] x86/perf/intel_uncore: Fix error handling Thomas Gleixner
2016-02-17 13:47 ` [patch 05/11] x86/perf/intel_uncore: Make code readable Thomas Gleixner
2016-02-17 13:47 ` [patch 07/11] x86/perf/uncore: Track packages not per cpu data Thomas Gleixner
2016-02-17 21:19 ` Stephane Eranian
2016-02-17 21:24 ` Andi Kleen
2016-02-17 21:56 ` Thomas Gleixner
2016-02-17 22:16 ` Andi Kleen
2016-02-17 22:31 ` Thomas Gleixner
2016-02-18 7:50 ` Ingo Molnar
2016-02-18 8:13 ` Peter Zijlstra
2016-02-18 9:35 ` Stephane Eranian
2016-02-18 9:51 ` Peter Zijlstra
2016-02-18 10:25 ` Thomas Gleixner
2016-02-18 10:22 ` Thomas Gleixner
2016-02-18 10:54 ` Thomas Gleixner
2016-02-19 8:39 ` Thomas Gleixner
2016-02-17 21:25 ` Thomas Gleixner
2016-02-17 13:47 ` [patch 06/11] x86/topology: Provide helper to retrieve number of cpu packages Thomas Gleixner
2016-02-17 13:47 ` [patch 08/11] x86/perf/intel_uncore: Clear all hardware state on exit Thomas Gleixner
2016-02-17 13:47 ` [patch 09/11] x86/perf/intel_uncore: Make PCI and MSR uncore independent Thomas Gleixner
2016-02-17 13:47 ` [patch 10/11] cpumask: Export cpumask_any_but Thomas Gleixner
2016-02-17 13:47 ` [patch 11/11] x86/perf/intel_uncore: Make it modular Thomas Gleixner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20160217133932.018288722@linutronix.de \
--to=tglx@linutronix.de \
--cc=andi.kleen@intel.com \
--cc=bp@alien8.de \
--cc=eranian@google.com \
--cc=harish.chegondi@intel.com \
--cc=kan.liang@intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=mingo@kernel.org \
--cc=peterz@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).