trinity.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Oleg Nesterov <oleg@redhat.com>
To: Ingo Molnar <mingo@elte.hu>, Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org,
	Peter Zijlstra <a.p.zijlstra@chello.nl>,
	Paul Mackerras <paulus@samba.org>,
	Arnaldo Carvalho de Melo <acme@ghostprotocols.net>,
	trinity@vger.kernel.org, Jiri Olsa <jolsa@redhat.com>
Subject: [PATCH 2/2] hw_breakpoint: Introduce "struct bp_cpuinfo"
Date: Sun, 2 Jun 2013 21:50:57 +0200	[thread overview]
Message-ID: <20130602195057.GC3277@redhat.com> (raw)
In-Reply-To: <20130602194912.GA3277@redhat.com>

This patch simply moves all per-cpu variables into the new single
per-cpu "struct bp_cpuinfo".

To me this looks more logical and clean, but this can also simplify
the further potential changes. In particular, I do not think this
memory should be per-cpu, it is never used "locally". After this
change it is trivial to turn it into, say, bootmem[nr_cpu_ids].

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
---
 kernel/events/hw_breakpoint.c |   69 +++++++++++++++++++++-------------------
 1 files changed, 36 insertions(+), 33 deletions(-)

diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 17d8093..42c47a8 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -46,23 +46,26 @@
 #include <linux/smp.h>
 
 #include <linux/hw_breakpoint.h>
-
-
 /*
  * Constraints data
  */
+struct bp_cpuinfo {
+	/* Number of pinned cpu breakpoints in a cpu */
+	unsigned int	cpu_pinned;
+	/* tsk_pinned[n] is the number of tasks having n+1 breakpoints */
+	unsigned int	*tsk_pinned;
+	/* Number of non-pinned cpu/task breakpoints in a cpu */
+	unsigned int	flexible; /* XXX: placeholder, see fetch_this_slot() */
+};
 
-/* Number of pinned cpu breakpoints in a cpu */
-static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
-
-/* Number of pinned task breakpoints in a cpu */
-static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]);
-
-/* Number of non-pinned cpu/task breakpoints in a cpu */
-static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
-
+static DEFINE_PER_CPU(struct bp_cpuinfo, bp_cpuinfo[TYPE_MAX]);
 static int nr_slots[TYPE_MAX];
 
+static struct bp_cpuinfo *get_bp_info(int cpu, enum bp_type_idx type)
+{
+	return per_cpu_ptr(bp_cpuinfo + type, cpu);
+}
+
 /* Keep track of the breakpoints attached to tasks */
 static LIST_HEAD(bp_task_head);
 
@@ -96,8 +99,8 @@ static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
  */
 static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
 {
+	unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
 	int i;
-	unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
 
 	for (i = nr_slots[type] - 1; i >= 0; i--) {
 		if (tsk_pinned[i] > 0)
@@ -146,8 +149,10 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
 	int cpu;
 
 	for_each_cpu(cpu, cpumask) {
-		unsigned int nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
+		struct bp_cpuinfo *info = get_bp_info(cpu, type);
+		int nr;
 
+		nr = info->cpu_pinned;
 		if (!bp->hw.bp_target)
 			nr += max_task_bp_pinned(cpu, type);
 		else
@@ -156,8 +161,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
 		if (nr > slots->pinned)
 			slots->pinned = nr;
 
-		nr = per_cpu(nr_bp_flexible[type], cpu);
-
+		nr = info->flexible;
 		if (nr > slots->flexible)
 			slots->flexible = nr;
 	}
@@ -180,8 +184,7 @@ fetch_this_slot(struct bp_busy_slots *slots, int weight)
 static void toggle_bp_task_slot(struct perf_event *bp, int cpu,
 				enum bp_type_idx type, int weight)
 {
-	/* tsk_pinned[n-1] is the number of tasks having n>0 breakpoints */
-	unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
+	unsigned int *tsk_pinned = get_bp_info(cpu, type)->tsk_pinned;
 	int old_idx, new_idx;
 
 	old_idx = task_bp_pinned(cpu, bp, type) - 1;
@@ -208,7 +211,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
 
 	/* Pinned counter cpu profiling */
 	if (!bp->hw.bp_target) {
-		per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
+		get_bp_info(bp->cpu, type)->cpu_pinned += weight;
 		return;
 	}
 
@@ -240,8 +243,8 @@ __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
  *
  *   - If attached to a single cpu, check:
  *
- *       (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
- *           + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
+ *       (per_cpu(info->flexible, cpu) || (per_cpu(info->cpu_pinned, cpu)
+ *           + max(per_cpu(info->tsk_pinned, cpu)))) < HBP_NUM
  *
  *       -> If there are already non-pinned counters in this cpu, it means
  *          there is already a free slot for them.
@@ -251,8 +254,8 @@ __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
  *
  *   - If attached to every cpus, check:
  *
- *       (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
- *           + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
+ *       (per_cpu(info->flexible, *) || (max(per_cpu(info->cpu_pinned, *))
+ *           + max(per_cpu(info->tsk_pinned, *)))) < HBP_NUM
  *
  *       -> This is roughly the same, except we check the number of per cpu
  *          bp for every cpu and we keep the max one. Same for the per tasks
@@ -263,16 +266,16 @@ __weak void arch_unregister_hw_breakpoint(struct perf_event *bp)
  *
  *   - If attached to a single cpu, check:
  *
- *       ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
- *            + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
+ *       ((per_cpu(info->flexible, cpu) > 1) + per_cpu(info->cpu_pinned, cpu)
+ *            + max(per_cpu(info->tsk_pinned, cpu))) < HBP_NUM
  *
- *       -> Same checks as before. But now the nr_bp_flexible, if any, must keep
+ *       -> Same checks as before. But now the info->flexible, if any, must keep
  *          one register at least (or they will never be fed).
  *
  *   - If attached to every cpus, check:
  *
- *       ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
- *            + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
+ *       ((per_cpu(info->flexible, *) > 1) + max(per_cpu(info->cpu_pinned, *))
+ *            + max(per_cpu(info->tsk_pinned, *))) < HBP_NUM
  */
 static int __reserve_bp_slot(struct perf_event *bp)
 {
@@ -617,7 +620,6 @@ static struct pmu perf_breakpoint = {
 
 int __init init_hw_breakpoint(void)
 {
-	unsigned int **task_bp_pinned;
 	int cpu, err_cpu;
 	int i;
 
@@ -626,10 +628,11 @@ int __init init_hw_breakpoint(void)
 
 	for_each_possible_cpu(cpu) {
 		for (i = 0; i < TYPE_MAX; i++) {
-			task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
-			*task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i],
-						  GFP_KERNEL);
-			if (!*task_bp_pinned)
+			struct bp_cpuinfo *info = get_bp_info(cpu, i);
+
+			info->tsk_pinned = kcalloc(nr_slots[i], sizeof(int),
+							GFP_KERNEL);
+			if (!info->tsk_pinned)
 				goto err_alloc;
 		}
 	}
@@ -643,7 +646,7 @@ int __init init_hw_breakpoint(void)
  err_alloc:
 	for_each_possible_cpu(err_cpu) {
 		for (i = 0; i < TYPE_MAX; i++)
-			kfree(per_cpu(nr_task_bp_pinned[i], err_cpu));
+			kfree(get_bp_info(err_cpu, i)->tsk_pinned);
 		if (err_cpu == cpu)
 			break;
 	}
-- 
1.5.5.1


  parent reply	other threads:[~2013-06-02 19:50 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-05-20 16:19 WARN_ONCE in arch/x86/kernel/hw_breakpoint.c Vince Weaver
2013-05-28 17:00 ` Oleg Nesterov
2013-05-28 17:28   ` Oleg Nesterov
2013-05-28 18:47     ` Oleg Nesterov
2013-05-29 16:32       ` [MAYBEPATCH] : " Oleg Nesterov
2013-06-01 18:20 ` [PATCH 0/2]: " Oleg Nesterov
2013-06-01 18:21   ` [PATCH 1/2] hw_breakpoint: Fix cpu check in task_bp_pinned(cpu) Oleg Nesterov
2013-06-13 14:20     ` Frederic Weisbecker
2013-06-01 18:21   ` [PATCH 2/2] hw_breakpoint: Use cpu_possible_mask in {reserve,release}_bp_slot() Oleg Nesterov
2013-06-15 12:46     ` Frederic Weisbecker
2013-06-01 19:45 ` [PATCH 0/3] hw_breakpoint: cleanups Oleg Nesterov
2013-06-01 19:45   ` [PATCH 1/3] hw_breakpoint: Simplify list/idx mess in toggle_bp_slot() paths Oleg Nesterov
2013-06-15 12:59     ` Frederic Weisbecker
2013-06-01 19:46   ` [PATCH 2/3] hw_breakpoint: Simplify the "weight" usage " Oleg Nesterov
2013-06-15 13:14     ` Frederic Weisbecker
2013-06-01 19:46   ` [PATCH 3/3] hw_breakpoint: Introduce cpumask_of_bp() Oleg Nesterov
2013-06-15 13:29     ` Frederic Weisbecker
2013-06-13 14:01   ` [PATCH 0/3] hw_breakpoint: cleanups Frederic Weisbecker
2013-06-13 15:15     ` Oleg Nesterov
2013-06-13 15:24       ` Frederic Weisbecker
2013-06-02 19:49 ` [PATCH 0/2] hw_breakpoint: more cleanups Oleg Nesterov
2013-06-02 19:50   ` [PATCH 1/2] hw_breakpoint: Simplify *register_wide_hw_breakpoint() Oleg Nesterov
2013-06-18  0:12     ` Frederic Weisbecker
2013-06-02 19:50   ` Oleg Nesterov [this message]
2013-06-18 12:37     ` [PATCH 2/2] hw_breakpoint: Introduce "struct bp_cpuinfo" Frederic Weisbecker
2013-06-18 14:42       ` Oleg Nesterov
2013-06-18 17:01         ` Frederic Weisbecker
2013-06-19 15:54           ` Oleg Nesterov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20130602195057.GC3277@redhat.com \
    --to=oleg@redhat.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=acme@ghostprotocols.net \
    --cc=jolsa@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@elte.hu \
    --cc=paulus@samba.org \
    --cc=trinity@vger.kernel.org \
    --cc=vincent.weaver@maine.edu \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).