linux-rt-users.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Mike Galbraith <umgwanakikbuti@gmail.com>
To: Jan Kiszka <jan.kiszka@siemens.com>
Cc: Steven Rostedt <rostedt@goodmis.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	RT <linux-rt-users@vger.kernel.org>,
	Linux Kernel Mailing List <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH RT 3.18] irq_work: Provide a soft-irq based queue
Date: Thu, 23 Apr 2015 08:58:33 +0200	[thread overview]
Message-ID: <1429772313.3419.38.camel@gmail.com> (raw)
In-Reply-To: <5538915C.8010904@siemens.com>

[-- Attachment #1: Type: text/plain, Size: 1092 bytes --]

On Thu, 2015-04-23 at 08:29 +0200, Jan Kiszka wrote:
> 
> >  void irq_work_tick(void)
> >  {
> > -#ifdef CONFIG_PREEMPT_RT_FULL
> > -       irq_work_run_list(this_cpu_ptr(&lazy_list));
> > -#else
> > -       struct llist_head *raised = &__get_cpu_var(raised_list);
> > +       struct llist_head *raised = this_cpu_ptr(&raised_list);
> >  
> > -       if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
> > +       if (!llist_empty(raised) && 
> > (!arch_irq_work_has_interrupt() ||
> > +           IS_ENABLED(CONFIG_PREEMPT_RT_FULL)))
> 
> OK, that additional condition is addressing archs that don't have
> irq_work support and fall back to the timer, right?

How will ever run if it is not run in either irq_work_run() or 
irq_work_tick()?  There are two choices, we better pick one.

Attaching patch since either evolution fscked up again (it does that), 
or someone has managed to turn it into a completely useless piece of 
crap... if so, likely the same dipstick who made it save messages such 
that you need fromdos to wipe away the shite it smears all over it.

        -Mike

[-- Attachment #2: irq_work-Provide-a-soft-irq-based-queue.patch --]
[-- Type: text/x-patch, Size: 4954 bytes --]

Subject: [PATCH RT 3.18] irq_work: Provide a soft-irq based queue
Date:	Thu, 16 Apr 2015 18:28:16 +0200
From:	Jan Kiszka <jan.kiszka@siemens.com>

Instead of turning all irq_work requests into lazy ones on -rt, just
move their execution from hard into soft-irq context.

This resolves deadlocks of ftrace which will queue work from arbitrary
contexts, including those that have locks held that are needed for
raising a soft-irq.

Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
---

Second try, looks much better so far. And it also removes my concerns
regarding other potential cases besides ftrace.

 kernel/irq_work.c |   84 ++++++++++++++++++++++++++----------------------------
 1 file changed, 41 insertions(+), 43 deletions(-)

--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -80,17 +80,12 @@ bool irq_work_queue_on(struct irq_work *
 	if (!irq_work_claim(work))
 		return false;
 
-#ifdef CONFIG_PREEMPT_RT_FULL
-	if (work->flags & IRQ_WORK_HARD_IRQ)
+	if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && (work->flags & IRQ_WORK_HARD_IRQ))
 		raise_irqwork = llist_add(&work->llnode,
 					  &per_cpu(hirq_work_list, cpu));
 	else
 		raise_irqwork = llist_add(&work->llnode,
-					  &per_cpu(lazy_list, cpu));
-#else
-		raise_irqwork = llist_add(&work->llnode,
 					  &per_cpu(raised_list, cpu));
-#endif
 
 	if (raise_irqwork)
 		arch_send_call_function_single_ipi(cpu);
@@ -103,6 +98,9 @@ EXPORT_SYMBOL_GPL(irq_work_queue_on);
 /* Enqueue the irq work @work on the current CPU */
 bool irq_work_queue(struct irq_work *work)
 {
+	bool realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
+	bool raise = false;
+
 	/* Only queue if not already pending */
 	if (!irq_work_claim(work))
 		return false;
@@ -110,25 +108,22 @@ bool irq_work_queue(struct irq_work *wor
 	/* Queue the entry and raise the IPI if needed. */
 	preempt_disable();
 
-#ifdef CONFIG_PREEMPT_RT_FULL
-	if (work->flags & IRQ_WORK_HARD_IRQ) {
+	if (realtime && (work->flags & IRQ_WORK_HARD_IRQ)) {
 		if (llist_add(&work->llnode, this_cpu_ptr(&hirq_work_list)))
-			arch_irq_work_raise();
-	} else {
-		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
-		    tick_nohz_tick_stopped())
-			raise_softirq(TIMER_SOFTIRQ);
-	}
-#else
-	if (work->flags & IRQ_WORK_LAZY) {
+			raise = 1;
+	} else if (work->flags & IRQ_WORK_LAZY) {
 		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
-		    tick_nohz_tick_stopped())
-			arch_irq_work_raise();
-	} else {
-		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
-			arch_irq_work_raise();
-	}
-#endif
+		    tick_nohz_tick_stopped()) {
+			if (realtime)
+				raise_softirq(TIMER_SOFTIRQ);
+			else
+				raise = true;
+		}
+	} else if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
+		raise = true;
+
+	if (raise)
+		arch_irq_work_raise();
 
 	preempt_enable();
 
@@ -143,12 +138,13 @@ bool irq_work_needs_cpu(void)
 	raised = this_cpu_ptr(&raised_list);
 	lazy = this_cpu_ptr(&lazy_list);
 
-	if (llist_empty(raised))
-		if (llist_empty(lazy))
-#ifdef CONFIG_PREEMPT_RT_FULL
+	if (llist_empty(raised) && llist_empty(lazy)) {
+		if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
 			if (llist_empty(this_cpu_ptr(&hirq_work_list)))
-#endif
 				return false;
+		} else
+			return false;
+	}
 
 	/* All work should have been flushed before going offline */
 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
@@ -162,9 +158,7 @@ static void irq_work_run_list(struct lli
 	struct irq_work *work;
 	struct llist_node *llnode;
 
-#ifndef CONFIG_PREEMPT_RT_FULL
-	BUG_ON(!irqs_disabled());
-#endif
+	BUG_ON(!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !irqs_disabled());
 
 	if (llist_empty(list))
 		return;
@@ -200,26 +194,30 @@ static void irq_work_run_list(struct lli
  */
 void irq_work_run(void)
 {
-#ifdef CONFIG_PREEMPT_RT_FULL
-	irq_work_run_list(this_cpu_ptr(&hirq_work_list));
-#else
-	irq_work_run_list(this_cpu_ptr(&raised_list));
-	irq_work_run_list(this_cpu_ptr(&lazy_list));
-#endif
+	if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
+		irq_work_run_list(this_cpu_ptr(&hirq_work_list));
+		/*
+		 * NOTE: we raise softirq via IPI for safety,
+		 * and execute in irq_work_tick() to move the
+		 * overhead from hard to soft irq context.
+		 */
+		if (!llist_empty(this_cpu_ptr(&raised_list)))
+			raise_softirq(TIMER_SOFTIRQ);
+	} else {
+		irq_work_run_list(this_cpu_ptr(&raised_list));
+		irq_work_run_list(this_cpu_ptr(&lazy_list));
+	}
 }
 EXPORT_SYMBOL_GPL(irq_work_run);
 
 void irq_work_tick(void)
 {
-#ifdef CONFIG_PREEMPT_RT_FULL
-	irq_work_run_list(this_cpu_ptr(&lazy_list));
-#else
-	struct llist_head *raised = &__get_cpu_var(raised_list);
+	struct llist_head *raised = this_cpu_ptr(&raised_list);
 
-	if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
+	if (!llist_empty(raised) && (!arch_irq_work_has_interrupt() ||
+	    IS_ENABLED(CONFIG_PREEMPT_RT_FULL)))
 		irq_work_run_list(raised);
-	irq_work_run_list(&__get_cpu_var(lazy_list));
-#endif
+	irq_work_run_list(this_cpu_ptr(&lazy_list));
 }
 
 /*

  reply	other threads:[~2015-04-23  6:58 UTC|newest]

Thread overview: 26+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-04-16 14:06 [PATCH RT 3.18] ring-buffer: Mark irq_work as HARD_IRQ to prevent deadlocks Jan Kiszka
2015-04-16 14:12 ` Steven Rostedt
2015-04-16 14:26 ` Sebastian Andrzej Siewior
2015-04-16 14:28   ` Jan Kiszka
2015-04-16 14:57     ` Sebastian Andrzej Siewior
2015-04-16 15:31       ` Jan Kiszka
2015-04-16 15:10     ` Steven Rostedt
2015-04-16 15:29       ` Jan Kiszka
2015-04-16 15:33         ` Sebastian Andrzej Siewior
2015-04-16 16:28         ` [PATCH RT 3.18] irq_work: Provide a soft-irq based queue Jan Kiszka
2015-04-20  8:03           ` Mike Galbraith
2015-04-23  6:11             ` Mike Galbraith
2015-04-23  6:29               ` Jan Kiszka
2015-04-23  6:58                 ` Mike Galbraith [this message]
2015-04-23  7:14                   ` Jan Kiszka
2015-04-23  6:50               ` Jan Kiszka
2015-04-23  7:01                 ` Mike Galbraith
2015-04-23  7:12                   ` Jan Kiszka
2015-04-23  7:19                     ` Mike Galbraith
2015-04-23 21:00                       ` Steven Rostedt
2015-04-24  6:54                         ` Mike Galbraith
2015-04-24  9:00                           ` Jan Kiszka
2015-04-24  9:59                             ` Mike Galbraith
2015-04-25  7:20                             ` Mike Galbraith
2015-04-25  7:26                               ` Jan Kiszka
2015-05-18 19:52                                 ` Sebastian Andrzej Siewior

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1429772313.3419.38.camel@gmail.com \
    --to=umgwanakikbuti@gmail.com \
    --cc=bigeasy@linutronix.de \
    --cc=jan.kiszka@siemens.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-rt-users@vger.kernel.org \
    --cc=rostedt@goodmis.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).