public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Jacob Shin <jacob.shin@amd.com>
To: Ingo Molnar <mingo@redhat.com>,
	Peter Zijlstra <a.p.zijlstra@chello.nl>,
	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Cc: "H. Peter Anvin" <hpa@zytor.com>,
	Thomas Gleixner <tglx@linutronix.de>, <x86@kernel.org>,
	Stephane Eranian <eranian@google.com>,
	Jiri Olsa <jolsa@redhat.com>, <linux-kernel@vger.kernel.org>,
	Jacob Shin <jacob.shin@amd.com>
Subject: [PATCH RESEND 1/3] perf, amd: Further generalize NB event constraints handling logic
Date: Tue, 9 Apr 2013 10:23:52 -0500	[thread overview]
Message-ID: <1365521034-4496-2-git-send-email-jacob.shin@amd.com> (raw)
In-Reply-To: <1365521034-4496-1-git-send-email-jacob.shin@amd.com>

In preparation for enabling AMD L2I performance counters, we will
further generalize NB event constraints handling logic to now allow
any type of shared counters. This is just a code rework, there are
no functional changes.

Signed-off-by: Jacob Shin <jacob.shin@amd.com>
---
 arch/x86/kernel/cpu/perf_event.h     |    6 +--
 arch/x86/kernel/cpu/perf_event_amd.c |   70 +++++++++++++++++-----------------
 2 files changed, 38 insertions(+), 38 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index ba9aadf..f092dfe 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -68,8 +68,8 @@ struct event_constraint {
 #define PERF_X86_EVENT_PEBS_LDLAT	0x1 /* ld+ldlat data address sampling */
 #define PERF_X86_EVENT_PEBS_ST		0x2 /* st data address sampling */
 
-struct amd_nb {
-	int nb_id;  /* NorthBridge id */
+struct amd_shared_regs {
+	int id;
 	int refcnt; /* reference count */
 	struct perf_event *owners[X86_PMC_IDX_MAX];
 	struct event_constraint event_constraints[X86_PMC_IDX_MAX];
@@ -170,7 +170,7 @@ struct cpu_hw_events {
 	/*
 	 * AMD specific bits
 	 */
-	struct amd_nb			*amd_nb;
+	struct amd_shared_regs		*amd_nb;
 	/* Inverted mask of bits to clear in the perf_ctr ctrl registers */
 	u64				perf_ctr_virt_mask;
 
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index dfdab42..23964a6 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -292,9 +292,9 @@ static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc)
 
 static inline int amd_has_nb(struct cpu_hw_events *cpuc)
 {
-	struct amd_nb *nb = cpuc->amd_nb;
+	struct amd_shared_regs *nb = cpuc->amd_nb;
 
-	return nb && nb->nb_id != -1;
+	return nb && nb->id != -1;
 }
 
 static int amd_pmu_hw_config(struct perf_event *event)
@@ -321,10 +321,9 @@ static int amd_pmu_hw_config(struct perf_event *event)
 	return amd_core_hw_config(event);
 }
 
-static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
-					   struct perf_event *event)
+static void amd_put_shared_event_constraints(struct amd_shared_regs *regs,
+					     struct perf_event *event)
 {
-	struct amd_nb *nb = cpuc->amd_nb;
 	int i;
 
 	/*
@@ -336,7 +335,7 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
 	 * when we come here
 	 */
 	for (i = 0; i < x86_pmu.num_counters; i++) {
-		if (cmpxchg(nb->owners + i, event, NULL) == event)
+		if (cmpxchg(regs->owners + i, event, NULL) == event)
 			break;
 	}
 }
@@ -386,16 +385,17 @@ static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc)
   *
   * Given that resources are allocated (cmpxchg), they must be
   * eventually freed for others to use. This is accomplished by
-  * calling __amd_put_nb_event_constraints()
+  * calling amd_put_shared_event_constraints()
   *
   * Non NB events are not impacted by this restriction.
   */
 static struct event_constraint *
-__amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
-			       struct event_constraint *c)
+amd_get_shared_event_constraints(struct cpu_hw_events *cpuc,
+				 struct amd_shared_regs *regs,
+				 struct perf_event *event,
+				 struct event_constraint *c)
 {
 	struct hw_perf_event *hwc = &event->hw;
-	struct amd_nb *nb = cpuc->amd_nb;
 	struct perf_event *old;
 	int idx, new = -1;
 
@@ -418,8 +418,8 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev
 	for_each_set_bit(idx, c->idxmsk, x86_pmu.num_counters) {
 		if (new == -1 || hwc->idx == idx)
 			/* assign free slot, prefer hwc->idx */
-			old = cmpxchg(nb->owners + idx, NULL, event);
-		else if (nb->owners[idx] == event)
+			old = cmpxchg(regs->owners + idx, NULL, event);
+		else if (regs->owners[idx] == event)
 			/* event already present */
 			old = event;
 		else
@@ -430,7 +430,7 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev
 
 		/* reassign to this slot */
 		if (new != -1)
-			cmpxchg(nb->owners + new, event, NULL);
+			cmpxchg(regs->owners + new, event, NULL);
 		new = idx;
 
 		/* already present, reuse */
@@ -444,29 +444,29 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev
 	if (amd_is_perfctr_nb_event(hwc))
 		amd_nb_interrupt_hw_config(hwc);
 
-	return &nb->event_constraints[new];
+	return &regs->event_constraints[new];
 }
 
-static struct amd_nb *amd_alloc_nb(int cpu)
+static struct amd_shared_regs *amd_alloc_shared_regs(int cpu)
 {
-	struct amd_nb *nb;
+	struct amd_shared_regs *regs;
 	int i;
 
-	nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
-			  cpu_to_node(cpu));
-	if (!nb)
+	regs = kmalloc_node(sizeof(struct amd_shared_regs),
+			    GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu));
+	if (!regs)
 		return NULL;
 
-	nb->nb_id = -1;
+	regs->id = -1;
 
 	/*
-	 * initialize all possible NB constraints
+	 * initialize all possible constraints
 	 */
 	for (i = 0; i < x86_pmu.num_counters; i++) {
-		__set_bit(i, nb->event_constraints[i].idxmsk);
-		nb->event_constraints[i].weight = 1;
+		__set_bit(i, regs->event_constraints[i].idxmsk);
+		regs->event_constraints[i].weight = 1;
 	}
-	return nb;
+	return regs;
 }
 
 static int amd_pmu_cpu_prepare(int cpu)
@@ -478,7 +478,7 @@ static int amd_pmu_cpu_prepare(int cpu)
 	if (boot_cpu_data.x86_max_cores < 2)
 		return NOTIFY_OK;
 
-	cpuc->amd_nb = amd_alloc_nb(cpu);
+	cpuc->amd_nb = amd_alloc_shared_regs(cpu);
 	if (!cpuc->amd_nb)
 		return NOTIFY_BAD;
 
@@ -488,7 +488,7 @@ static int amd_pmu_cpu_prepare(int cpu)
 static void amd_pmu_cpu_starting(int cpu)
 {
 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
-	struct amd_nb *nb;
+	struct amd_shared_regs *nb;
 	int i, nb_id;
 
 	cpuc->perf_ctr_virt_mask = AMD64_EVENTSEL_HOSTONLY;
@@ -504,14 +504,14 @@ static void amd_pmu_cpu_starting(int cpu)
 		if (WARN_ON_ONCE(!nb))
 			continue;
 
-		if (nb->nb_id == nb_id) {
+		if (nb->id == nb_id) {
 			cpuc->kfree_on_online = cpuc->amd_nb;
 			cpuc->amd_nb = nb;
 			break;
 		}
 	}
 
-	cpuc->amd_nb->nb_id = nb_id;
+	cpuc->amd_nb->id = nb_id;
 	cpuc->amd_nb->refcnt++;
 }
 
@@ -525,9 +525,9 @@ static void amd_pmu_cpu_dead(int cpu)
 	cpuhw = &per_cpu(cpu_hw_events, cpu);
 
 	if (cpuhw->amd_nb) {
-		struct amd_nb *nb = cpuhw->amd_nb;
+		struct amd_shared_regs *nb = cpuhw->amd_nb;
 
-		if (nb->nb_id == -1 || --nb->refcnt == 0)
+		if (nb->id == -1 || --nb->refcnt == 0)
 			kfree(nb);
 
 		cpuhw->amd_nb = NULL;
@@ -543,15 +543,15 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
 	if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
 		return &unconstrained;
 
-	return __amd_get_nb_event_constraints(cpuc, event,
-					      amd_nb_event_constraint);
+	return amd_get_shared_event_constraints(cpuc, cpuc->amd_nb, event,
+						amd_nb_event_constraint);
 }
 
 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
 				      struct perf_event *event)
 {
 	if (amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))
-		__amd_put_nb_event_constraints(cpuc, event);
+		amd_put_shared_event_constraints(cpuc->amd_nb, event);
 }
 
 PMU_FORMAT_ATTR(event,	"config:0-7,32-35");
@@ -711,8 +711,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
 			return &amd_f15_PMC20;
 		}
 	case AMD_EVENT_NB:
-		return __amd_get_nb_event_constraints(cpuc, event,
-						      amd_nb_event_constraint);
+		return amd_get_shared_event_constraints(cpuc, cpuc->amd_nb,
+				event, amd_nb_event_constraint);
 	default:
 		return &emptyconstraint;
 	}
-- 
1.7.9.5



  reply	other threads:[~2013-04-09 15:24 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-04-09 15:23 [PATCH RESEND 0/3] perf, amd: Support for Family 16h L2I Performance Counters Jacob Shin
2013-04-09 15:23 ` Jacob Shin [this message]
2013-04-09 15:23 ` [PATCH RESEND 2/3] perf, x86: Allow for multiple kfree_on_online pointers Jacob Shin
2013-04-09 15:23 ` [PATCH RESEND 3/3] perf, amd: Enable L2I performance counters on AMD Family 16h Jacob Shin
2013-04-10  9:41 ` [PATCH RESEND 0/3] perf, amd: Support for Family 16h L2I Performance Counters Peter Zijlstra
2013-04-10  9:48   ` Peter Zijlstra
2013-04-10 11:38   ` Stephane Eranian
2013-04-10 11:49     ` Peter Zijlstra
2013-04-10 11:52       ` Stephane Eranian
2013-04-10 11:55       ` Peter Zijlstra
2013-04-10 11:56         ` Ingo Molnar
2013-04-10 12:12           ` Stephane Eranian
2013-04-10 12:28             ` Ingo Molnar
2013-04-10 12:29               ` Stephane Eranian
2013-04-10 15:03                 ` Jacob Shin
2013-04-10 12:29             ` Borislav Petkov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1365521034-4496-2-git-send-email-jacob.shin@amd.com \
    --to=jacob.shin@amd.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=acme@ghostprotocols.net \
    --cc=eranian@google.com \
    --cc=hpa@zytor.com \
    --cc=jolsa@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox