* [PATCH] tty: Use raw spin lock to protect the TTY read section
@ 2012-09-20 14:09 Ivo Sieben
2013-01-24 10:34 ` [PATCH-v2] " Ivo Sieben
0 siblings, 1 reply; 6+ messages in thread
From: Ivo Sieben @ 2012-09-20 14:09 UTC (permalink / raw)
To: Alan Cox, Greg KH, linux-serial, RT; +Cc: Ivo Sieben
The "normal" spin lock that guards the N_TTY line discipline read section
is replaced by a raw spin lock.
On a PREEMP_RT system this prevents unwanted scheduling overhead when data is
read at the same time as data is being received: while RX IRQ threaded handling
is busy a TTY read call is performed from a RT priority > threaded IRQ priority.
The read call tries to take the read section spin lock (held by the threaded
IRQ) which blocks and causes a context switch to/from the threaded IRQ handler
until the spin lock is unlocked.
On a 240 MHz AT91SAM9261 processor setup this fixes about 100us of scheduling
overhead on the TTY read call.
Signed-off-by: Ivo Sieben <meltedpianoman@gmail.com>
---
drivers/tty/n_tty.c | 44 ++++++++++++++++++++++----------------------
drivers/tty/tty_io.c | 2 +-
include/linux/tty.h | 2 +-
3 files changed, 24 insertions(+), 24 deletions(-)
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index ee1c268..743ef45 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -138,9 +138,9 @@ static void put_tty_queue(unsigned char c, struct tty_struct *tty)
* The problem of stomping on the buffers ends here.
* Why didn't anyone see this one coming? --AJK
*/
- spin_lock_irqsave(&tty->read_lock, flags);
+ raw_spin_lock_irqsave(&tty->read_lock, flags);
put_tty_queue_nolock(c, tty);
- spin_unlock_irqrestore(&tty->read_lock, flags);
+ raw_spin_unlock_irqrestore(&tty->read_lock, flags);
}
/**
@@ -173,9 +173,9 @@ static void reset_buffer_flags(struct tty_struct *tty)
{
unsigned long flags;
- spin_lock_irqsave(&tty->read_lock, flags);
+ raw_spin_lock_irqsave(&tty->read_lock, flags);
tty->read_head = tty->read_tail = tty->read_cnt = 0;
- spin_unlock_irqrestore(&tty->read_lock, flags);
+ raw_spin_unlock_irqrestore(&tty->read_lock, flags);
mutex_lock(&tty->echo_lock);
tty->echo_pos = tty->echo_cnt = tty->echo_overrun = 0;
@@ -230,7 +230,7 @@ static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty)
unsigned long flags;
ssize_t n = 0;
- spin_lock_irqsave(&tty->read_lock, flags);
+ raw_spin_lock_irqsave(&tty->read_lock, flags);
if (!tty->icanon) {
n = tty->read_cnt;
} else if (tty->canon_data) {
@@ -238,7 +238,7 @@ static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty)
tty->canon_head - tty->read_tail :
tty->canon_head + (N_TTY_BUF_SIZE - tty->read_tail);
}
- spin_unlock_irqrestore(&tty->read_lock, flags);
+ raw_spin_unlock_irqrestore(&tty->read_lock, flags);
return n;
}
@@ -868,19 +868,19 @@ static void eraser(unsigned char c, struct tty_struct *tty)
kill_type = WERASE;
else {
if (!L_ECHO(tty)) {
- spin_lock_irqsave(&tty->read_lock, flags);
+ raw_spin_lock_irqsave(&tty->read_lock, flags);
tty->read_cnt -= ((tty->read_head - tty->canon_head) &
(N_TTY_BUF_SIZE - 1));
tty->read_head = tty->canon_head;
- spin_unlock_irqrestore(&tty->read_lock, flags);
+ raw_spin_unlock_irqrestore(&tty->read_lock, flags);
return;
}
if (!L_ECHOK(tty) || !L_ECHOKE(tty) || !L_ECHOE(tty)) {
- spin_lock_irqsave(&tty->read_lock, flags);
+ raw_spin_lock_irqsave(&tty->read_lock, flags);
tty->read_cnt -= ((tty->read_head - tty->canon_head) &
(N_TTY_BUF_SIZE - 1));
tty->read_head = tty->canon_head;
- spin_unlock_irqrestore(&tty->read_lock, flags);
+ raw_spin_unlock_irqrestore(&tty->read_lock, flags);
finish_erasing(tty);
echo_char(KILL_CHAR(tty), tty);
/* Add a newline if ECHOK is on and ECHOKE is off. */
@@ -914,10 +914,10 @@ static void eraser(unsigned char c, struct tty_struct *tty)
break;
}
cnt = (tty->read_head - head) & (N_TTY_BUF_SIZE-1);
- spin_lock_irqsave(&tty->read_lock, flags);
+ raw_spin_lock_irqsave(&tty->read_lock, flags);
tty->read_head = head;
tty->read_cnt -= cnt;
- spin_unlock_irqrestore(&tty->read_lock, flags);
+ raw_spin_unlock_irqrestore(&tty->read_lock, flags);
if (L_ECHO(tty)) {
if (L_ECHOPRT(tty)) {
if (!tty->erasing) {
@@ -1290,12 +1290,12 @@ send_signal:
put_tty_queue(c, tty);
handle_newline:
- spin_lock_irqsave(&tty->read_lock, flags);
+ raw_spin_lock_irqsave(&tty->read_lock, flags);
set_bit(tty->read_head, tty->read_flags);
put_tty_queue_nolock(c, tty);
tty->canon_head = tty->read_head;
tty->canon_data++;
- spin_unlock_irqrestore(&tty->read_lock, flags);
+ raw_spin_unlock_irqrestore(&tty->read_lock, flags);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
if (waitqueue_active(&tty->read_wait))
wake_up_interruptible(&tty->read_wait);
@@ -1371,7 +1371,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
return;
if (tty->real_raw) {
- spin_lock_irqsave(&tty->read_lock, cpuflags);
+ raw_spin_lock_irqsave(&tty->read_lock, cpuflags);
i = min(N_TTY_BUF_SIZE - tty->read_cnt,
N_TTY_BUF_SIZE - tty->read_head);
i = min(count, i);
@@ -1387,7 +1387,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
memcpy(tty->read_buf + tty->read_head, cp, i);
tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
tty->read_cnt += i;
- spin_unlock_irqrestore(&tty->read_lock, cpuflags);
+ raw_spin_unlock_irqrestore(&tty->read_lock, cpuflags);
} else {
for (i = count, p = cp, f = fp; i; i--, p++) {
if (f)
@@ -1633,23 +1633,23 @@ static int copy_from_read_buf(struct tty_struct *tty,
bool is_eof;
retval = 0;
- spin_lock_irqsave(&tty->read_lock, flags);
+ raw_spin_lock_irqsave(&tty->read_lock, flags);
n = min(tty->read_cnt, N_TTY_BUF_SIZE - tty->read_tail);
n = min(*nr, n);
- spin_unlock_irqrestore(&tty->read_lock, flags);
+ raw_spin_unlock_irqrestore(&tty->read_lock, flags);
if (n) {
retval = copy_to_user(*b, &tty->read_buf[tty->read_tail], n);
n -= retval;
is_eof = n == 1 &&
tty->read_buf[tty->read_tail] == EOF_CHAR(tty);
tty_audit_add_data(tty, &tty->read_buf[tty->read_tail], n);
- spin_lock_irqsave(&tty->read_lock, flags);
+ raw_spin_lock_irqsave(&tty->read_lock, flags);
tty->read_tail = (tty->read_tail + n) & (N_TTY_BUF_SIZE-1);
tty->read_cnt -= n;
/* Turn single EOF into zero-length read */
if (L_EXTPROC(tty) && tty->icanon && is_eof && !tty->read_cnt)
n = 0;
- spin_unlock_irqrestore(&tty->read_lock, flags);
+ raw_spin_unlock_irqrestore(&tty->read_lock, flags);
*b += n;
*nr -= n;
}
@@ -1838,7 +1838,7 @@ do_it_again:
eol = test_and_clear_bit(tty->read_tail,
tty->read_flags);
c = tty->read_buf[tty->read_tail];
- spin_lock_irqsave(&tty->read_lock, flags);
+ raw_spin_lock_irqsave(&tty->read_lock, flags);
tty->read_tail = ((tty->read_tail+1) &
(N_TTY_BUF_SIZE-1));
tty->read_cnt--;
@@ -1850,7 +1850,7 @@ do_it_again:
if (--tty->canon_data < 0)
tty->canon_data = 0;
}
- spin_unlock_irqrestore(&tty->read_lock, flags);
+ raw_spin_unlock_irqrestore(&tty->read_lock, flags);
if (!eol || (c != __DISABLED_CHAR)) {
if (tty_put_user(tty, c, b++)) {
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index b425c79..a3ef00b 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2943,7 +2943,7 @@ void initialize_tty_struct(struct tty_struct *tty,
mutex_init(&tty->atomic_write_lock);
mutex_init(&tty->output_lock);
mutex_init(&tty->echo_lock);
- spin_lock_init(&tty->read_lock);
+ raw_spin_lock_init(&tty->read_lock);
spin_lock_init(&tty->ctrl_lock);
INIT_LIST_HEAD(&tty->tty_files);
INIT_WORK(&tty->SAK_work, do_SAK_work);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index e728ecc..21bceef 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -327,7 +327,7 @@ struct tty_struct {
struct mutex echo_lock;
unsigned char *write_buf;
int write_cnt;
- spinlock_t read_lock;
+ raw_spinlock_t read_lock;
/* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
struct tty_port *port;
--
1.7.9.5
^ permalink raw reply related [flat|nested] 6+ messages in thread
* [PATCH-v2] tty: Use raw spin lock to protect the TTY read section
2012-09-20 14:09 [PATCH] tty: Use raw spin lock to protect the TTY read section Ivo Sieben
@ 2013-01-24 10:34 ` Ivo Sieben
2013-01-25 16:13 ` Greg KH
0 siblings, 1 reply; 6+ messages in thread
From: Ivo Sieben @ 2013-01-24 10:34 UTC (permalink / raw)
To: linux-serial, RT, Greg KH; +Cc: Ivo Sieben
The "normal" spin lock that guards the N_TTY line discipline read section
is replaced by a raw spin lock.
On a PREEMP_RT system this prevents unwanted scheduling overhead when data is
read at the same time as data is being received: while RX IRQ threaded handling
is busy a TTY read call is performed from a RT priority > threaded IRQ priority.
The read call tries to take the read section spin lock (held by the threaded
IRQ) which blocks and causes a context switch to/from the threaded IRQ handler
until the spin lock is unlocked.
On a 240 MHz AT91SAM9261 processor setup this fixes about 100us of scheduling
overhead on the TTY read call.
Signed-off-by: Ivo Sieben <meltedpianoman@gmail.com
---
v2:
Rebased this patch to the latest linux tree status.
I tested this on a 3.0.43-rt65 kernel, with the following Kernel Hacking options
enabled:
- CONFIG_DEBUG_RT_MUTEXES
- CONFIG_DEBUG_SPINLOCK
- CONFIG_DEBUG_MUTEXES
- CONFIG_DEBUG_LOCK_ALLOC
- CONFIG_PROVE_LOCKING
- CONFIG_DEBUG_SPINLOCK_SLEEP
drivers/tty/n_tty.c | 56 +++++++++++++++++++++++++--------------------------
1 file changed, 28 insertions(+), 28 deletions(-)
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 19083ef..0950728 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -100,7 +100,7 @@ struct n_tty_data {
struct mutex atomic_read_lock;
struct mutex output_lock;
struct mutex echo_lock;
- spinlock_t read_lock;
+ raw_spinlock_t read_lock;
};
static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
@@ -182,9 +182,9 @@ static void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
* The problem of stomping on the buffers ends here.
* Why didn't anyone see this one coming? --AJK
*/
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
put_tty_queue_nolock(c, ldata);
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
}
/**
@@ -218,9 +218,9 @@ static void reset_buffer_flags(struct tty_struct *tty)
struct n_tty_data *ldata = tty->disc_data;
unsigned long flags;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_head = ldata->read_tail = ldata->read_cnt = 0;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
mutex_lock(&ldata->echo_lock);
ldata->echo_pos = ldata->echo_cnt = ldata->echo_overrun = 0;
@@ -276,7 +276,7 @@ static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty)
unsigned long flags;
ssize_t n = 0;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
if (!ldata->icanon) {
n = ldata->read_cnt;
} else if (ldata->canon_data) {
@@ -284,7 +284,7 @@ static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty)
ldata->canon_head - ldata->read_tail :
ldata->canon_head + (N_TTY_BUF_SIZE - ldata->read_tail);
}
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
return n;
}
@@ -915,19 +915,19 @@ static void eraser(unsigned char c, struct tty_struct *tty)
kill_type = WERASE;
else {
if (!L_ECHO(tty)) {
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_cnt -= ((ldata->read_head - ldata->canon_head) &
(N_TTY_BUF_SIZE - 1));
ldata->read_head = ldata->canon_head;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
return;
}
if (!L_ECHOK(tty) || !L_ECHOKE(tty) || !L_ECHOE(tty)) {
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_cnt -= ((ldata->read_head - ldata->canon_head) &
(N_TTY_BUF_SIZE - 1));
ldata->read_head = ldata->canon_head;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
finish_erasing(ldata);
echo_char(KILL_CHAR(tty), tty);
/* Add a newline if ECHOK is on and ECHOKE is off. */
@@ -961,10 +961,10 @@ static void eraser(unsigned char c, struct tty_struct *tty)
break;
}
cnt = (ldata->read_head - head) & (N_TTY_BUF_SIZE-1);
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_head = head;
ldata->read_cnt -= cnt;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (L_ECHO(tty)) {
if (L_ECHOPRT(tty)) {
if (!ldata->erasing) {
@@ -1344,12 +1344,12 @@ send_signal:
put_tty_queue(c, ldata);
handle_newline:
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
set_bit(ldata->read_head, ldata->read_flags);
put_tty_queue_nolock(c, ldata);
ldata->canon_head = ldata->read_head;
ldata->canon_data++;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
if (waitqueue_active(&tty->read_wait))
wake_up_interruptible(&tty->read_wait);
@@ -1423,7 +1423,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
unsigned long cpuflags;
if (ldata->real_raw) {
- spin_lock_irqsave(&ldata->read_lock, cpuflags);
+ raw_spin_lock_irqsave(&ldata->read_lock, cpuflags);
i = min(N_TTY_BUF_SIZE - ldata->read_cnt,
N_TTY_BUF_SIZE - ldata->read_head);
i = min(count, i);
@@ -1439,7 +1439,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
memcpy(ldata->read_buf + ldata->read_head, cp, i);
ldata->read_head = (ldata->read_head + i) & (N_TTY_BUF_SIZE-1);
ldata->read_cnt += i;
- spin_unlock_irqrestore(&ldata->read_lock, cpuflags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, cpuflags);
} else {
for (i = count, p = cp, f = fp; i; i--, p++) {
if (f)
@@ -1635,7 +1635,7 @@ static int n_tty_open(struct tty_struct *tty)
mutex_init(&ldata->atomic_read_lock);
mutex_init(&ldata->output_lock);
mutex_init(&ldata->echo_lock);
- spin_lock_init(&ldata->read_lock);
+ raw_spin_lock_init(&ldata->read_lock);
/* These are ugly. Currently a malloc failure here can panic */
ldata->read_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
@@ -1703,10 +1703,10 @@ static int copy_from_read_buf(struct tty_struct *tty,
bool is_eof;
retval = 0;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
n = min(ldata->read_cnt, N_TTY_BUF_SIZE - ldata->read_tail);
n = min(*nr, n);
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (n) {
retval = copy_to_user(*b, &ldata->read_buf[ldata->read_tail], n);
n -= retval;
@@ -1714,13 +1714,13 @@ static int copy_from_read_buf(struct tty_struct *tty,
ldata->read_buf[ldata->read_tail] == EOF_CHAR(tty);
tty_audit_add_data(tty, &ldata->read_buf[ldata->read_tail], n,
ldata->icanon);
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_tail = (ldata->read_tail + n) & (N_TTY_BUF_SIZE-1);
ldata->read_cnt -= n;
/* Turn single EOF into zero-length read */
if (L_EXTPROC(tty) && ldata->icanon && is_eof && !ldata->read_cnt)
n = 0;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
*b += n;
*nr -= n;
}
@@ -1900,7 +1900,7 @@ do_it_again:
if (ldata->icanon && !L_EXTPROC(tty)) {
/* N.B. avoid overrun if nr == 0 */
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
while (nr && ldata->read_cnt) {
int eol;
@@ -1918,25 +1918,25 @@ do_it_again:
if (--ldata->canon_data < 0)
ldata->canon_data = 0;
}
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (!eol || (c != __DISABLED_CHAR)) {
if (tty_put_user(tty, c, b++)) {
retval = -EFAULT;
b--;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
break;
}
nr--;
}
if (eol) {
tty_audit_push(tty);
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
break;
}
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
}
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (retval)
break;
} else {
--
1.7.9.5
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH-v2] tty: Use raw spin lock to protect the TTY read section
2013-01-24 10:34 ` [PATCH-v2] " Ivo Sieben
@ 2013-01-25 16:13 ` Greg KH
2013-01-28 12:15 ` Ivo Sieben
0 siblings, 1 reply; 6+ messages in thread
From: Greg KH @ 2013-01-25 16:13 UTC (permalink / raw)
To: Ivo Sieben; +Cc: linux-serial, RT
On Thu, Jan 24, 2013 at 11:34:59AM +0100, Ivo Sieben wrote:
> The "normal" spin lock that guards the N_TTY line discipline read section
> is replaced by a raw spin lock.
>
> On a PREEMP_RT system this prevents unwanted scheduling overhead when data is
> read at the same time as data is being received: while RX IRQ threaded handling
> is busy a TTY read call is performed from a RT priority > threaded IRQ priority.
> The read call tries to take the read section spin lock (held by the threaded
> IRQ) which blocks and causes a context switch to/from the threaded IRQ handler
> until the spin lock is unlocked.
>
> On a 240 MHz AT91SAM9261 processor setup this fixes about 100us of scheduling
> overhead on the TTY read call.
Out of how many us total?
And this really makes a difference? I'd like to hear the rt developers
opinoin of this.
> Signed-off-by: Ivo Sieben <meltedpianoman@gmail.com
Forgot the trailing '>' :)
thanks,
greg k-h
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH-v2] tty: Use raw spin lock to protect the TTY read section
2013-01-25 16:13 ` Greg KH
@ 2013-01-28 12:15 ` Ivo Sieben
2013-01-28 12:32 ` [PATCH-v3] " Ivo Sieben
0 siblings, 1 reply; 6+ messages in thread
From: Ivo Sieben @ 2013-01-28 12:15 UTC (permalink / raw)
To: Greg KH; +Cc: linux-serial, RT
Hi,
2013/1/25 Greg KH <gregkh@linuxfoundation.org>:
> On Thu, Jan 24, 2013 at 11:34:59AM +0100, Ivo Sieben wrote:
>>
>> On a 240 MHz AT91SAM9261 processor setup this fixes about 100us of scheduling
>> overhead on the TTY read call.
>
> Out of how many us total?
>
I tested with a TTY read call that before all optimizations had an
average 76 us and a maximum of 305 us.
After the optimizations the average dropped to 59 us and the maximum
dropped to 135 us.
Of course that gain of 170us is not completely due to this patch, also
the other TTY patches that I've posted have contributed to this.
I will remove this remark on the exact numbers from the commit
message... it prevents unnecessary scheduling overhead, which is I
think sufficient to describe what this patch does.
> And this really makes a difference? I'd like to hear the rt developers
> opinoin of this.
>
> greg k-h
I will repost this patch and ask Thomas Gleixner and Steven Rostedt to
take a look at it.
Regards,
Ivo Sieben
^ permalink raw reply [flat|nested] 6+ messages in thread
* [PATCH-v3] tty: Use raw spin lock to protect the TTY read section
2013-01-28 12:15 ` Ivo Sieben
@ 2013-01-28 12:32 ` Ivo Sieben
2013-02-04 15:33 ` Thomas Gleixner
0 siblings, 1 reply; 6+ messages in thread
From: Ivo Sieben @ 2013-01-28 12:32 UTC (permalink / raw)
To: linux-serial, RT, Thomas Gleixner, Steven Rostedt, Greg KH; +Cc: Ivo Sieben
The "normal" spin lock that guards the N_TTY line discipline read section
is replaced by a raw spin lock.
On a PREEMP_RT system this prevents unwanted scheduling overhead when data is
read at the same time as data is being received: while RX IRQ threaded handling
is busy a TTY read call is performed from a RT priority > threaded IRQ priority.
The read call tries to take the read section spin lock (held by the threaded
IRQ) which blocks and causes a context switch to/from the threaded IRQ handler
until the spin lock is unlocked.
Signed-off-by: Ivo Sieben <meltedpianoman@gmail.com>
---
v3:
- Removed some detailed info from the commit message
@Thomas Gleixner & Steven Rostedt:
Greg Kroah-Hartman responded to this patch: "And this really makes a
difference? I'd like to hear the rt developers opinion of this."
Would you care to take a look at it?
I tested this on a 3.0.43-rt65 kernel, with the following Kernel Hacking options
enabled:
- CONFIG_DEBUG_RT_MUTEXES
- CONFIG_DEBUG_SPINLOCK
- CONFIG_DEBUG_MUTEXES
- CONFIG_DEBUG_LOCK_ALLOC
- CONFIG_PROVE_LOCKING
- CONFIG_DEBUG_SPINLOCK_SLEEP
drivers/tty/n_tty.c | 56 +++++++++++++++++++++++++--------------------------
1 file changed, 28 insertions(+), 28 deletions(-)
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 19083ef..0950728 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -100,7 +100,7 @@ struct n_tty_data {
struct mutex atomic_read_lock;
struct mutex output_lock;
struct mutex echo_lock;
- spinlock_t read_lock;
+ raw_spinlock_t read_lock;
};
static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
@@ -182,9 +182,9 @@ static void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
* The problem of stomping on the buffers ends here.
* Why didn't anyone see this one coming? --AJK
*/
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
put_tty_queue_nolock(c, ldata);
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
}
/**
@@ -218,9 +218,9 @@ static void reset_buffer_flags(struct tty_struct *tty)
struct n_tty_data *ldata = tty->disc_data;
unsigned long flags;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_head = ldata->read_tail = ldata->read_cnt = 0;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
mutex_lock(&ldata->echo_lock);
ldata->echo_pos = ldata->echo_cnt = ldata->echo_overrun = 0;
@@ -276,7 +276,7 @@ static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty)
unsigned long flags;
ssize_t n = 0;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
if (!ldata->icanon) {
n = ldata->read_cnt;
} else if (ldata->canon_data) {
@@ -284,7 +284,7 @@ static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty)
ldata->canon_head - ldata->read_tail :
ldata->canon_head + (N_TTY_BUF_SIZE - ldata->read_tail);
}
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
return n;
}
@@ -915,19 +915,19 @@ static void eraser(unsigned char c, struct tty_struct *tty)
kill_type = WERASE;
else {
if (!L_ECHO(tty)) {
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_cnt -= ((ldata->read_head - ldata->canon_head) &
(N_TTY_BUF_SIZE - 1));
ldata->read_head = ldata->canon_head;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
return;
}
if (!L_ECHOK(tty) || !L_ECHOKE(tty) || !L_ECHOE(tty)) {
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_cnt -= ((ldata->read_head - ldata->canon_head) &
(N_TTY_BUF_SIZE - 1));
ldata->read_head = ldata->canon_head;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
finish_erasing(ldata);
echo_char(KILL_CHAR(tty), tty);
/* Add a newline if ECHOK is on and ECHOKE is off. */
@@ -961,10 +961,10 @@ static void eraser(unsigned char c, struct tty_struct *tty)
break;
}
cnt = (ldata->read_head - head) & (N_TTY_BUF_SIZE-1);
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_head = head;
ldata->read_cnt -= cnt;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (L_ECHO(tty)) {
if (L_ECHOPRT(tty)) {
if (!ldata->erasing) {
@@ -1344,12 +1344,12 @@ send_signal:
put_tty_queue(c, ldata);
handle_newline:
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
set_bit(ldata->read_head, ldata->read_flags);
put_tty_queue_nolock(c, ldata);
ldata->canon_head = ldata->read_head;
ldata->canon_data++;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
if (waitqueue_active(&tty->read_wait))
wake_up_interruptible(&tty->read_wait);
@@ -1423,7 +1423,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
unsigned long cpuflags;
if (ldata->real_raw) {
- spin_lock_irqsave(&ldata->read_lock, cpuflags);
+ raw_spin_lock_irqsave(&ldata->read_lock, cpuflags);
i = min(N_TTY_BUF_SIZE - ldata->read_cnt,
N_TTY_BUF_SIZE - ldata->read_head);
i = min(count, i);
@@ -1439,7 +1439,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
memcpy(ldata->read_buf + ldata->read_head, cp, i);
ldata->read_head = (ldata->read_head + i) & (N_TTY_BUF_SIZE-1);
ldata->read_cnt += i;
- spin_unlock_irqrestore(&ldata->read_lock, cpuflags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, cpuflags);
} else {
for (i = count, p = cp, f = fp; i; i--, p++) {
if (f)
@@ -1635,7 +1635,7 @@ static int n_tty_open(struct tty_struct *tty)
mutex_init(&ldata->atomic_read_lock);
mutex_init(&ldata->output_lock);
mutex_init(&ldata->echo_lock);
- spin_lock_init(&ldata->read_lock);
+ raw_spin_lock_init(&ldata->read_lock);
/* These are ugly. Currently a malloc failure here can panic */
ldata->read_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
@@ -1703,10 +1703,10 @@ static int copy_from_read_buf(struct tty_struct *tty,
bool is_eof;
retval = 0;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
n = min(ldata->read_cnt, N_TTY_BUF_SIZE - ldata->read_tail);
n = min(*nr, n);
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (n) {
retval = copy_to_user(*b, &ldata->read_buf[ldata->read_tail], n);
n -= retval;
@@ -1714,13 +1714,13 @@ static int copy_from_read_buf(struct tty_struct *tty,
ldata->read_buf[ldata->read_tail] == EOF_CHAR(tty);
tty_audit_add_data(tty, &ldata->read_buf[ldata->read_tail], n,
ldata->icanon);
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
ldata->read_tail = (ldata->read_tail + n) & (N_TTY_BUF_SIZE-1);
ldata->read_cnt -= n;
/* Turn single EOF into zero-length read */
if (L_EXTPROC(tty) && ldata->icanon && is_eof && !ldata->read_cnt)
n = 0;
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
*b += n;
*nr -= n;
}
@@ -1900,7 +1900,7 @@ do_it_again:
if (ldata->icanon && !L_EXTPROC(tty)) {
/* N.B. avoid overrun if nr == 0 */
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
while (nr && ldata->read_cnt) {
int eol;
@@ -1918,25 +1918,25 @@ do_it_again:
if (--ldata->canon_data < 0)
ldata->canon_data = 0;
}
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (!eol || (c != __DISABLED_CHAR)) {
if (tty_put_user(tty, c, b++)) {
retval = -EFAULT;
b--;
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
break;
}
nr--;
}
if (eol) {
tty_audit_push(tty);
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
break;
}
- spin_lock_irqsave(&ldata->read_lock, flags);
+ raw_spin_lock_irqsave(&ldata->read_lock, flags);
}
- spin_unlock_irqrestore(&ldata->read_lock, flags);
+ raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (retval)
break;
} else {
--
1.7.9.5
^ permalink raw reply related [flat|nested] 6+ messages in thread
* Re: [PATCH-v3] tty: Use raw spin lock to protect the TTY read section
2013-01-28 12:32 ` [PATCH-v3] " Ivo Sieben
@ 2013-02-04 15:33 ` Thomas Gleixner
0 siblings, 0 replies; 6+ messages in thread
From: Thomas Gleixner @ 2013-02-04 15:33 UTC (permalink / raw)
To: Ivo Sieben; +Cc: linux-serial, RT, Steven Rostedt, Greg KH
On Mon, 28 Jan 2013, Ivo Sieben wrote:
> @Thomas Gleixner & Steven Rostedt:
> Greg Kroah-Hartman responded to this patch: "And this really makes a
> difference? I'd like to hear the rt developers opinion of this."
In general the sleeping spinlocks overhead can be quite visible and
AFAICT the ntty read_lock only protects very short code pathes which
manipulate the read buffer and related data. So from an RT perspective
this lock can be a real spinlock even on RT.
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Thanks,
tglx
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2013-02-04 15:33 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2012-09-20 14:09 [PATCH] tty: Use raw spin lock to protect the TTY read section Ivo Sieben
2013-01-24 10:34 ` [PATCH-v2] " Ivo Sieben
2013-01-25 16:13 ` Greg KH
2013-01-28 12:15 ` Ivo Sieben
2013-01-28 12:32 ` [PATCH-v3] " Ivo Sieben
2013-02-04 15:33 ` Thomas Gleixner
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).