* [PATCH] tty, add kref to sysrq handlers
@ 2012-05-14 14:41 Prarit Bhargava
0 siblings, 0 replies; 5+ messages in thread
From: Prarit Bhargava @ 2012-05-14 14:41 UTC (permalink / raw)
To: linux-kernel
Cc: Prarit Bhargava, gregkh, John Stultz, Thomas Gleixner, lwoodman,
jbaron, dzickus, alan
On a large system with a large number of tasks, the output of
echo t > /proc/sysrq-trigger
can take a long period of time. If this period is greater than the period
of the current clocksource, the clocksource watchdog will mark the
clocksource as unstable and fail the clocksource over.
The problem with sysrq is that __handle_sysrq() takes a spin_lock with
interrupts disabled and disables interrupts for the duration of the
handler. If this happens during sysrq-t on a large system with a large
number of tasks, the result is a "brown-out" of the system.
The spin_lock in question, sysrq_key_table_lock, is in place to prevent
the removal of a sysrq handler while it is being executed in
__handle_sysrq().
A kref is added to each sysrq handler and is incremented and decremented
in __handle_sysrq(). This, while more complicated than a lock , leads to
minimizing the time that the sysrq_key_table_lock is acquired and results
in a functional sysrq-t.
I've tested both options and I no longer see the clocksource watchdog
marking the TSC clocksource as unstable.
Signed-off-by: Prarit Bhargava <prarit@redhat.com>
Cc: gregkh@linuxfoundation.org
Cc: John Stultz <johnstul@us.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: lwoodman@redhat.com
Cc: jbaron@redhat.com
Cc: dzickus@redhat.com
Cc: alan@lxorguk.ukuu.org.uk
---
drivers/tty/sysrq.c | 42 +++++++++++++++++++++++++++++++++++++++---
include/linux/sysrq.h | 2 ++
2 files changed, 41 insertions(+), 3 deletions(-)
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 0572889..38c6ae6 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -458,6 +458,20 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
&sysrq_ftrace_dump_op, /* z */
};
+void sysrq_release(struct kref *kref)
+{
+ struct sysrq_key_op *release_op;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+ release_op = container_of(kref, struct sysrq_key_op, kref);
+ for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++)
+ if (sysrq_key_table[i] == release_op)
+ sysrq_key_table[i] = NULL;
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+}
+
/* key2index calculation, -1 on invalid index */
static int sysrq_key_table_key2index(int key)
{
@@ -502,7 +516,6 @@ void __handle_sysrq(int key, bool check_mask)
int i;
unsigned long flags;
- spin_lock_irqsave(&sysrq_key_table_lock, flags);
/*
* Raise the apparent loglevel to maximum so that the sysrq header
* is shown to provide the user with positive feedback. We do not
@@ -513,7 +526,12 @@ void __handle_sysrq(int key, bool check_mask)
console_loglevel = 7;
printk(KERN_INFO "SysRq : ");
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
op_p = __sysrq_get_key_op(key);
+ if (op_p)
+ kref_get(&op_p->kref);
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+
if (op_p) {
/*
* Should we check for enabled operations (/proc/sysrq-trigger
@@ -526,9 +544,14 @@ void __handle_sysrq(int key, bool check_mask)
} else {
printk("This sysrq operation is disabled.\n");
}
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+ kref_put(&op_p->kref, sysrq_release);
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
} else {
printk("HELP : ");
/* Only print the help msg once per handler */
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) {
if (sysrq_key_table[i]) {
int j;
@@ -541,10 +564,10 @@ void __handle_sysrq(int key, bool check_mask)
printk("%s ", sysrq_key_table[i]->help_msg);
}
}
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
printk("\n");
console_loglevel = orig_log_level;
}
- spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
}
void handle_sysrq(int key)
@@ -837,7 +860,12 @@ static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p,
spin_lock_irqsave(&sysrq_key_table_lock, flags);
if (__sysrq_get_key_op(key) == remove_op_p) {
- __sysrq_put_key_op(key, insert_op_p);
+ if (!remove_op_p) { /* register */
+ __sysrq_put_key_op(key, insert_op_p);
+ kref_init(&insert_op_p->kref);
+ }
+ if (!insert_op_p) /* unregister */
+ kref_put(&remove_op_p->kref, sysrq_release);
retval = 0;
} else {
retval = -1;
@@ -898,6 +926,14 @@ static inline void sysrq_init_procfs(void)
static int __init sysrq_init(void)
{
+ int i;
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+ for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++)
+ if (sysrq_key_table[i])
+ kref_init(&sysrq_key_table[i]->kref);
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+
sysrq_init_procfs();
if (sysrq_on())
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 7faf933..d458f39 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/kref.h>
/* Enable/disable SYSRQ support by default (0==no, 1==yes). */
#define SYSRQ_DEFAULT_ENABLE 1
@@ -36,6 +37,7 @@ struct sysrq_key_op {
char *help_msg;
char *action_msg;
int enable_mask;
+ struct kref kref;
};
#ifdef CONFIG_MAGIC_SYSRQ
--
1.7.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH] tty, add kref to sysrq handlers
@ 2012-06-12 15:22 Prarit Bhargava
2012-07-19 14:06 ` Don Zickus
0 siblings, 1 reply; 5+ messages in thread
From: Prarit Bhargava @ 2012-06-12 15:22 UTC (permalink / raw)
To: linux-kernel
Cc: Prarit Bhargava, gregkh, John Stultz, Thomas Gleixner, lwoodman,
jbaron, dzickus
[This seems to have been missed ... sending again]
On a large system with a large number of tasks, the output of
echo t > /proc/sysrq-trigger
can take a long period of time. If this period is greater than the period
of the current clocksource, the clocksource watchdog will mark the
clocksource as unstable and fail the clocksource over.
The problem with sysrq is that __handle_sysrq() takes a spin_lock with
interrupts disabled and disables interrupts for the duration of the
handler. If this happens during sysrq-t on a large system with a large
number of tasks, the result is a "brown-out" of the system.
The spin_lock in question, sysrq_key_table_lock, is in place to prevent
the removal of a sysrq handler while it is being executed in
__handle_sysrq().
A kref is added to each sysrq handler and is incremented and decremented
in __handle_sysrq(). This, while more complicated than a lock , leads to
minimizing the time that the sysrq_key_table_lock is acquired and results
in a functional sysrq-t.
I've tested both options and I no longer see the clocksource watchdog
marking the TSC clocksource as unstable.
Cc: gregkh@linuxfoundation.org
Cc: John Stultz <johnstul@us.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: lwoodman@redhat.com
Cc: jbaron@redhat.com
Cc: dzickus@redhat.com
---
drivers/tty/sysrq.c | 42 +++++++++++++++++++++++++++++++++++++++---
include/linux/sysrq.h | 2 ++
2 files changed, 41 insertions(+), 3 deletions(-)
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 0572889..38c6ae6 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -458,6 +458,20 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
&sysrq_ftrace_dump_op, /* z */
};
+void sysrq_release(struct kref *kref)
+{
+ struct sysrq_key_op *release_op;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+ release_op = container_of(kref, struct sysrq_key_op, kref);
+ for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++)
+ if (sysrq_key_table[i] == release_op)
+ sysrq_key_table[i] = NULL;
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+}
+
/* key2index calculation, -1 on invalid index */
static int sysrq_key_table_key2index(int key)
{
@@ -502,7 +516,6 @@ void __handle_sysrq(int key, bool check_mask)
int i;
unsigned long flags;
- spin_lock_irqsave(&sysrq_key_table_lock, flags);
/*
* Raise the apparent loglevel to maximum so that the sysrq header
* is shown to provide the user with positive feedback. We do not
@@ -513,7 +526,12 @@ void __handle_sysrq(int key, bool check_mask)
console_loglevel = 7;
printk(KERN_INFO "SysRq : ");
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
op_p = __sysrq_get_key_op(key);
+ if (op_p)
+ kref_get(&op_p->kref);
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+
if (op_p) {
/*
* Should we check for enabled operations (/proc/sysrq-trigger
@@ -526,9 +544,14 @@ void __handle_sysrq(int key, bool check_mask)
} else {
printk("This sysrq operation is disabled.\n");
}
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+ kref_put(&op_p->kref, sysrq_release);
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
} else {
printk("HELP : ");
/* Only print the help msg once per handler */
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) {
if (sysrq_key_table[i]) {
int j;
@@ -541,10 +564,10 @@ void __handle_sysrq(int key, bool check_mask)
printk("%s ", sysrq_key_table[i]->help_msg);
}
}
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
printk("\n");
console_loglevel = orig_log_level;
}
- spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
}
void handle_sysrq(int key)
@@ -837,7 +860,12 @@ static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p,
spin_lock_irqsave(&sysrq_key_table_lock, flags);
if (__sysrq_get_key_op(key) == remove_op_p) {
- __sysrq_put_key_op(key, insert_op_p);
+ if (!remove_op_p) { /* register */
+ __sysrq_put_key_op(key, insert_op_p);
+ kref_init(&insert_op_p->kref);
+ }
+ if (!insert_op_p) /* unregister */
+ kref_put(&remove_op_p->kref, sysrq_release);
retval = 0;
} else {
retval = -1;
@@ -898,6 +926,14 @@ static inline void sysrq_init_procfs(void)
static int __init sysrq_init(void)
{
+ int i;
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+ for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++)
+ if (sysrq_key_table[i])
+ kref_init(&sysrq_key_table[i]->kref);
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+
sysrq_init_procfs();
if (sysrq_on())
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 7faf933..d458f39 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/kref.h>
/* Enable/disable SYSRQ support by default (0==no, 1==yes). */
#define SYSRQ_DEFAULT_ENABLE 1
@@ -36,6 +37,7 @@ struct sysrq_key_op {
char *help_msg;
char *action_msg;
int enable_mask;
+ struct kref kref;
};
#ifdef CONFIG_MAGIC_SYSRQ
--
1.7.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH] tty, add kref to sysrq handlers
2012-06-12 15:22 [PATCH] tty, add kref to sysrq handlers Prarit Bhargava
@ 2012-07-19 14:06 ` Don Zickus
0 siblings, 0 replies; 5+ messages in thread
From: Don Zickus @ 2012-07-19 14:06 UTC (permalink / raw)
To: Prarit Bhargava
Cc: linux-kernel, gregkh, John Stultz, Thomas Gleixner, lwoodman,
jbaron
On Tue, Jun 12, 2012 at 11:22:02AM -0400, Prarit Bhargava wrote:
> [This seems to have been missed ... sending again]
>
> On a large system with a large number of tasks, the output of
>
> echo t > /proc/sysrq-trigger
>
> can take a long period of time. If this period is greater than the period
> of the current clocksource, the clocksource watchdog will mark the
> clocksource as unstable and fail the clocksource over.
A lot of our customers are running into this issue (especially the big
boxes). This is patch solves their problem nicely.
The approach is a little heavier than the current approach, but it allows
a useful feature like sysrq-t to work reliably.
Acked-by: Don Zickus <dzickus@redhat.com>
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH] tty, add kref to sysrq handlers
@ 2012-07-27 12:19 Prarit Bhargava
0 siblings, 0 replies; 5+ messages in thread
From: Prarit Bhargava @ 2012-07-27 12:19 UTC (permalink / raw)
To: linux-kernel
Cc: Prarit Bhargava, gregkh, John Stultz, Thomas Gleixner, lwoodman,
jbaron, alan
3rd try on this one ...
----8<-----
On a large system with a large number of tasks, the output of
echo t > /proc/sysrq-trigger
can take a long period of time. If this period is greater than the period
of the current clocksource, the clocksource watchdog will mark the
clocksource as unstable and fail the clocksource over.
The problem with sysrq is that __handle_sysrq() takes a spin_lock with
interrupts disabled and disables interrupts for the duration of the
handler. If this happens during sysrq-t on a large system with a large
number of tasks, the result is a "brown-out" of the system.
The spin_lock in question, sysrq_key_table_lock, is in place to prevent
the removal of a sysrq handler while it is being executed in
__handle_sysrq().
A kref is added to each sysrq handler and is incremented and decremented
in __handle_sysrq(). This, while more complicated than a lock , leads to
minimizing the time that the sysrq_key_table_lock is acquired and results
in a functional sysrq-t.
I've tested both options and I no longer see the clocksource watchdog
marking the TSC clocksource as unstable.
Acked-by: Don Zickus <dzickus@redhat.com>
Cc: gregkh@linuxfoundation.org
Cc: John Stultz <johnstul@us.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: lwoodman@redhat.com
Cc: jbaron@redhat.com
Cc: alan@linux.intel.com
---
drivers/tty/sysrq.c | 42 +++++++++++++++++++++++++++++++++++++++---
include/linux/sysrq.h | 2 ++
2 files changed, 41 insertions(+), 3 deletions(-)
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 05728894..38c6ae6 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -458,6 +458,20 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
&sysrq_ftrace_dump_op, /* z */
};
+void sysrq_release(struct kref *kref)
+{
+ struct sysrq_key_op *release_op;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+ release_op = container_of(kref, struct sysrq_key_op, kref);
+ for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++)
+ if (sysrq_key_table[i] == release_op)
+ sysrq_key_table[i] = NULL;
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+}
+
/* key2index calculation, -1 on invalid index */
static int sysrq_key_table_key2index(int key)
{
@@ -502,7 +516,6 @@ void __handle_sysrq(int key, bool check_mask)
int i;
unsigned long flags;
- spin_lock_irqsave(&sysrq_key_table_lock, flags);
/*
* Raise the apparent loglevel to maximum so that the sysrq header
* is shown to provide the user with positive feedback. We do not
@@ -513,7 +526,12 @@ void __handle_sysrq(int key, bool check_mask)
console_loglevel = 7;
printk(KERN_INFO "SysRq : ");
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
op_p = __sysrq_get_key_op(key);
+ if (op_p)
+ kref_get(&op_p->kref);
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+
if (op_p) {
/*
* Should we check for enabled operations (/proc/sysrq-trigger
@@ -526,9 +544,14 @@ void __handle_sysrq(int key, bool check_mask)
} else {
printk("This sysrq operation is disabled.\n");
}
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+ kref_put(&op_p->kref, sysrq_release);
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
} else {
printk("HELP : ");
/* Only print the help msg once per handler */
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) {
if (sysrq_key_table[i]) {
int j;
@@ -541,10 +564,10 @@ void __handle_sysrq(int key, bool check_mask)
printk("%s ", sysrq_key_table[i]->help_msg);
}
}
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
printk("\n");
console_loglevel = orig_log_level;
}
- spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
}
void handle_sysrq(int key)
@@ -837,7 +860,12 @@ static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p,
spin_lock_irqsave(&sysrq_key_table_lock, flags);
if (__sysrq_get_key_op(key) == remove_op_p) {
- __sysrq_put_key_op(key, insert_op_p);
+ if (!remove_op_p) { /* register */
+ __sysrq_put_key_op(key, insert_op_p);
+ kref_init(&insert_op_p->kref);
+ }
+ if (!insert_op_p) /* unregister */
+ kref_put(&remove_op_p->kref, sysrq_release);
retval = 0;
} else {
retval = -1;
@@ -898,6 +926,14 @@ static inline void sysrq_init_procfs(void)
static int __init sysrq_init(void)
{
+ int i;
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+ for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++)
+ if (sysrq_key_table[i])
+ kref_init(&sysrq_key_table[i]->kref);
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+
sysrq_init_procfs();
if (sysrq_on())
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 7faf933..d458f39 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/kref.h>
/* Enable/disable SYSRQ support by default (0==no, 1==yes). */
#define SYSRQ_DEFAULT_ENABLE 1
@@ -36,6 +37,7 @@ struct sysrq_key_op {
char *help_msg;
char *action_msg;
int enable_mask;
+ struct kref kref;
};
#ifdef CONFIG_MAGIC_SYSRQ
--
1.7.9.3
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH] tty, add kref to sysrq handlers
@ 2012-07-27 13:04 Prarit Bhargava
0 siblings, 0 replies; 5+ messages in thread
From: Prarit Bhargava @ 2012-07-27 13:04 UTC (permalink / raw)
To: linux-kernel
Cc: Prarit Bhargava, gregkh, John Stultz, Thomas Gleixner, lwoodman,
jbaron, alan
On a large system with a large number of tasks, the output of
echo t > /proc/sysrq-trigger
can take a long period of time. If this period is greater than the period
of the current clocksource, the clocksource watchdog will mark the
clocksource as unstable and fail the clocksource over.
The problem with sysrq is that __handle_sysrq() takes a spin_lock with
interrupts disabled and disables interrupts for the duration of the
handler. If this happens during sysrq-t on a large system with a large
number of tasks, the result is a "brown-out" of the system.
The spin_lock in question, sysrq_key_table_lock, is in place to prevent
the removal of a sysrq handler while it is being executed in
__handle_sysrq().
A kref is added to each sysrq handler and is incremented and decremented
in __handle_sysrq(). This, while more complicated than a lock , leads to
minimizing the time that the sysrq_key_table_lock is acquired and results
in a functional sysrq-t.
I've tested both options and I no longer see the clocksource watchdog
marking the TSC clocksource as unstable.
Signed-off-by: Prarit Bhargava <prarit@redhat.com>
Acked-by: Don Zickus <dzickus@redhat.com>
Cc: gregkh@linuxfoundation.org
Cc: John Stultz <johnstul@us.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: lwoodman@redhat.com
Cc: jbaron@redhat.com
Cc: alan@linux.intel.com
---
drivers/tty/sysrq.c | 42 +++++++++++++++++++++++++++++++++++++++---
include/linux/sysrq.h | 2 ++
2 files changed, 41 insertions(+), 3 deletions(-)
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 05728894..38c6ae6 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -458,6 +458,20 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
&sysrq_ftrace_dump_op, /* z */
};
+void sysrq_release(struct kref *kref)
+{
+ struct sysrq_key_op *release_op;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+ release_op = container_of(kref, struct sysrq_key_op, kref);
+ for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++)
+ if (sysrq_key_table[i] == release_op)
+ sysrq_key_table[i] = NULL;
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+}
+
/* key2index calculation, -1 on invalid index */
static int sysrq_key_table_key2index(int key)
{
@@ -502,7 +516,6 @@ void __handle_sysrq(int key, bool check_mask)
int i;
unsigned long flags;
- spin_lock_irqsave(&sysrq_key_table_lock, flags);
/*
* Raise the apparent loglevel to maximum so that the sysrq header
* is shown to provide the user with positive feedback. We do not
@@ -513,7 +526,12 @@ void __handle_sysrq(int key, bool check_mask)
console_loglevel = 7;
printk(KERN_INFO "SysRq : ");
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
op_p = __sysrq_get_key_op(key);
+ if (op_p)
+ kref_get(&op_p->kref);
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+
if (op_p) {
/*
* Should we check for enabled operations (/proc/sysrq-trigger
@@ -526,9 +544,14 @@ void __handle_sysrq(int key, bool check_mask)
} else {
printk("This sysrq operation is disabled.\n");
}
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+ kref_put(&op_p->kref, sysrq_release);
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
} else {
printk("HELP : ");
/* Only print the help msg once per handler */
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++) {
if (sysrq_key_table[i]) {
int j;
@@ -541,10 +564,10 @@ void __handle_sysrq(int key, bool check_mask)
printk("%s ", sysrq_key_table[i]->help_msg);
}
}
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
printk("\n");
console_loglevel = orig_log_level;
}
- spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
}
void handle_sysrq(int key)
@@ -837,7 +860,12 @@ static int __sysrq_swap_key_ops(int key, struct sysrq_key_op *insert_op_p,
spin_lock_irqsave(&sysrq_key_table_lock, flags);
if (__sysrq_get_key_op(key) == remove_op_p) {
- __sysrq_put_key_op(key, insert_op_p);
+ if (!remove_op_p) { /* register */
+ __sysrq_put_key_op(key, insert_op_p);
+ kref_init(&insert_op_p->kref);
+ }
+ if (!insert_op_p) /* unregister */
+ kref_put(&remove_op_p->kref, sysrq_release);
retval = 0;
} else {
retval = -1;
@@ -898,6 +926,14 @@ static inline void sysrq_init_procfs(void)
static int __init sysrq_init(void)
{
+ int i;
+
+ spin_lock_irqsave(&sysrq_key_table_lock, flags);
+ for (i = 0; i < ARRAY_SIZE(sysrq_key_table); i++)
+ if (sysrq_key_table[i])
+ kref_init(&sysrq_key_table[i]->kref);
+ spin_unlock_irqrestore(&sysrq_key_table_lock, flags);
+
sysrq_init_procfs();
if (sysrq_on())
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 7faf933..d458f39 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/kref.h>
/* Enable/disable SYSRQ support by default (0==no, 1==yes). */
#define SYSRQ_DEFAULT_ENABLE 1
@@ -36,6 +37,7 @@ struct sysrq_key_op {
char *help_msg;
char *action_msg;
int enable_mask;
+ struct kref kref;
};
#ifdef CONFIG_MAGIC_SYSRQ
--
1.7.9.3
^ permalink raw reply related [flat|nested] 5+ messages in thread
end of thread, other threads:[~2012-07-27 13:05 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-06-12 15:22 [PATCH] tty, add kref to sysrq handlers Prarit Bhargava
2012-07-19 14:06 ` Don Zickus
-- strict thread matches above, loose matches on Subject: below --
2012-07-27 13:04 Prarit Bhargava
2012-07-27 12:19 Prarit Bhargava
2012-05-14 14:41 Prarit Bhargava
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).