* [GIT PULL] Lockup detector updates
@ 2010-05-19 10:37 Frederic Weisbecker
2010-05-19 10:37 ` [PATCH 1/2] lockup_detector: Convert per_cpu to __get_cpu_var for readability Frederic Weisbecker
` (2 more replies)
0 siblings, 3 replies; 4+ messages in thread
From: Frederic Weisbecker @ 2010-05-19 10:37 UTC (permalink / raw)
To: Ingo Molnar
Cc: LKML, Frederic Weisbecker, Don Zickus, Peter Zijlstra,
Cyrill Gorcunov
Ingo,
Please pull the perf/nmi branch that can be found at:
git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing.git
perf/nmi
Thanks,
Frederic
---
Don Zickus (1):
lockup_detector: Convert per_cpu to __get_cpu_var for readability
Frederic Weisbecker (1):
lockup_detector: Don't enable the lockup detector by default
kernel/watchdog.c | 35 +++++++++++++++++------------------
lib/Kconfig.debug | 1 -
2 files changed, 17 insertions(+), 19 deletions(-)
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 1/2] lockup_detector: Convert per_cpu to __get_cpu_var for readability
2010-05-19 10:37 [GIT PULL] Lockup detector updates Frederic Weisbecker
@ 2010-05-19 10:37 ` Frederic Weisbecker
2010-05-19 10:37 ` [PATCH 2/2] lockup_detector: Don't enable the lockup detector by default Frederic Weisbecker
2010-05-19 11:10 ` [GIT PULL] Lockup detector updates Ingo Molnar
2 siblings, 0 replies; 4+ messages in thread
From: Frederic Weisbecker @ 2010-05-19 10:37 UTC (permalink / raw)
To: Ingo Molnar
Cc: LKML, Don Zickus, Ingo Molnar, Peter Zijlstra, Cyrill Gorcunov,
Frederic Weisbecker
From: Don Zickus <dzickus@redhat.com>
Just a bunch of conversions as suggested by Frederic W.
__get_cpu_var() provides preemption disabled checks.
Plus it gives more readability as it makes it obvious
we are dealing locally now with these vars.
Signed-off-by: Don Zickus <dzickus@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
LKML-Reference: <1274133966-18415-2-git-send-email-dzickus@redhat.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
---
kernel/watchdog.c | 35 +++++++++++++++++------------------
1 files changed, 17 insertions(+), 18 deletions(-)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index e53622c..91b0b26 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -115,7 +115,7 @@ static unsigned long get_sample_period(void)
/* Commands for resetting the watchdog */
static void __touch_watchdog(void)
{
- int this_cpu = raw_smp_processor_id();
+ int this_cpu = smp_processor_id();
__get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu);
}
@@ -157,21 +157,21 @@ void touch_softlockup_watchdog_sync(void)
#ifdef CONFIG_HARDLOCKUP_DETECTOR
/* watchdog detector functions */
-static int is_hardlockup(int cpu)
+static int is_hardlockup(void)
{
- unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
+ unsigned long hrint = __get_cpu_var(hrtimer_interrupts);
- if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
+ if (__get_cpu_var(hrtimer_interrupts_saved) == hrint)
return 1;
- per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
+ __get_cpu_var(hrtimer_interrupts_saved) = hrint;
return 0;
}
#endif
-static int is_softlockup(unsigned long touch_ts, int cpu)
+static int is_softlockup(unsigned long touch_ts)
{
- unsigned long now = get_timestamp(cpu);
+ unsigned long now = get_timestamp(smp_processor_id());
/* Warn about unreasonable delays: */
if (time_after(now, touch_ts + softlockup_thresh))
@@ -206,8 +206,6 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi,
struct perf_sample_data *data,
struct pt_regs *regs)
{
- int this_cpu = smp_processor_id();
-
if (__get_cpu_var(watchdog_nmi_touch) == true) {
__get_cpu_var(watchdog_nmi_touch) = false;
return;
@@ -219,7 +217,9 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi,
* fired multiple times before we overflow'd. If it hasn't
* then this is a good indication the cpu is stuck
*/
- if (is_hardlockup(this_cpu)) {
+ if (is_hardlockup()) {
+ int this_cpu = smp_processor_id();
+
/* only print hardlockups once */
if (__get_cpu_var(hard_watchdog_warn) == true)
return;
@@ -247,7 +247,6 @@ static inline void watchdog_interrupt_count(void) { return; }
/* watchdog kicker functions */
static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
{
- int this_cpu = smp_processor_id();
unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts);
struct pt_regs *regs = get_irq_regs();
int duration;
@@ -262,12 +261,12 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
if (touch_ts == 0) {
- if (unlikely(per_cpu(softlockup_touch_sync, this_cpu))) {
+ if (unlikely(__get_cpu_var(softlockup_touch_sync))) {
/*
* If the time stamp was touched atomically
* make sure the scheduler tick is up to date.
*/
- per_cpu(softlockup_touch_sync, this_cpu) = false;
+ __get_cpu_var(softlockup_touch_sync) = false;
sched_clock_tick();
}
__touch_watchdog();
@@ -280,14 +279,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
* indicate it is getting cpu time. If it hasn't then
* this is a good indication some task is hogging the cpu
*/
- duration = is_softlockup(touch_ts, this_cpu);
+ duration = is_softlockup(touch_ts);
if (unlikely(duration)) {
/* only warn once */
if (__get_cpu_var(soft_watchdog_warn) == true)
return HRTIMER_RESTART;
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
- this_cpu, duration,
+ smp_processor_id(), duration,
current->comm, task_pid_nr(current));
print_modules();
print_irqtrace_events(current);
@@ -309,10 +308,10 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
/*
* The watchdog thread - touches the timestamp.
*/
-static int watchdog(void *__bind_cpu)
+static int watchdog(void *unused)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
- struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, (unsigned long)__bind_cpu);
+ struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
sched_setscheduler(current, SCHED_FIFO, ¶m);
@@ -328,7 +327,7 @@ static int watchdog(void *__bind_cpu)
/*
* Run briefly once per second to reset the softlockup timestamp.
* If this gets delayed for more than 60 seconds then the
- * debug-printout triggers in softlockup_tick().
+ * debug-printout triggers in watchdog_timer_fn().
*/
while (!kthread_should_stop()) {
__touch_watchdog();
--
1.6.2.3
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 2/2] lockup_detector: Don't enable the lockup detector by default
2010-05-19 10:37 [GIT PULL] Lockup detector updates Frederic Weisbecker
2010-05-19 10:37 ` [PATCH 1/2] lockup_detector: Convert per_cpu to __get_cpu_var for readability Frederic Weisbecker
@ 2010-05-19 10:37 ` Frederic Weisbecker
2010-05-19 11:10 ` [GIT PULL] Lockup detector updates Ingo Molnar
2 siblings, 0 replies; 4+ messages in thread
From: Frederic Weisbecker @ 2010-05-19 10:37 UTC (permalink / raw)
To: Ingo Molnar
Cc: LKML, Frederic Weisbecker, Don Zickus, Ingo Molnar,
Peter Zijlstra, Cyrill Gorcunov
The lockup detector is a new feature that now involves the
nmi watchdog. Drop the default y and let the user choose.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
---
lib/Kconfig.debug | 1 -
1 files changed, 0 insertions(+), 1 deletions(-)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index e65e47d..63968a9 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -155,7 +155,6 @@ config DEBUG_SHIRQ
config LOCKUP_DETECTOR
bool "Detect Hard and Soft Lockups"
depends on DEBUG_KERNEL && !S390
- default y
help
Say Y here to enable the kernel to act as a watchdog to detect
hard and soft lockups.
--
1.6.2.3
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [GIT PULL] Lockup detector updates
2010-05-19 10:37 [GIT PULL] Lockup detector updates Frederic Weisbecker
2010-05-19 10:37 ` [PATCH 1/2] lockup_detector: Convert per_cpu to __get_cpu_var for readability Frederic Weisbecker
2010-05-19 10:37 ` [PATCH 2/2] lockup_detector: Don't enable the lockup detector by default Frederic Weisbecker
@ 2010-05-19 11:10 ` Ingo Molnar
2 siblings, 0 replies; 4+ messages in thread
From: Ingo Molnar @ 2010-05-19 11:10 UTC (permalink / raw)
To: Frederic Weisbecker; +Cc: LKML, Don Zickus, Peter Zijlstra, Cyrill Gorcunov
* Frederic Weisbecker <fweisbec@gmail.com> wrote:
> Ingo,
>
> Please pull the perf/nmi branch that can be found at:
>
> git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing.git
> perf/nmi
>
> Thanks,
> Frederic
> ---
>
> Don Zickus (1):
> lockup_detector: Convert per_cpu to __get_cpu_var for readability
>
> Frederic Weisbecker (1):
> lockup_detector: Don't enable the lockup detector by default
>
>
> kernel/watchdog.c | 35 +++++++++++++++++------------------
> lib/Kconfig.debug | 1 -
> 2 files changed, 17 insertions(+), 19 deletions(-)
Pulled, thanks!
Ingo
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2010-05-19 11:10 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-05-19 10:37 [GIT PULL] Lockup detector updates Frederic Weisbecker
2010-05-19 10:37 ` [PATCH 1/2] lockup_detector: Convert per_cpu to __get_cpu_var for readability Frederic Weisbecker
2010-05-19 10:37 ` [PATCH 2/2] lockup_detector: Don't enable the lockup detector by default Frederic Weisbecker
2010-05-19 11:10 ` [GIT PULL] Lockup detector updates Ingo Molnar
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox