linux-rt-users.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Andrea Parri <andrea.parri@amarulasolutions.com>
To: Julia Cartwright <julia@ni.com>
Cc: Ingo Molnar <mingo@kernel.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Peter Zijlstra <peterz@infradead.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	"linux-rt-users@vger.kernel.org" <linux-rt-users@vger.kernel.org>,
	Steffen Trumtrar <s.trumtrar@pengutronix.de>,
	Tim Sander <tim@krieglstein.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	Guenter Roeck <linux@roeck-us.net>
Subject: Re: [PATCH 1/2] kthread: convert worker lock to raw spinlock
Date: Fri, 5 Oct 2018 20:10:35 +0200	[thread overview]
Message-ID: <20181005181035.GA19828@andrea> (raw)
In-Reply-To: <da33d7d7aa32d08f9cbf5016fb04426f90645cb0.1538089180.git.julia@ni.com>

Hi Julia,

On Fri, Sep 28, 2018 at 09:03:51PM +0000, Julia Cartwright wrote:
> In order to enable the queuing of kthread work items from hardirq
> context even when PREEMPT_RT_FULL is enabled, convert the worker
> spin_lock to a raw_spin_lock.
> 
> This is only acceptable to do because the work performed under the lock
> is well-bounded and minimal.

Clearly not my topic..., but out of curiosity:  What do you mean by
"well-bounded" and "minimal"?  Can you maybe point me to some doc.?

  Andrea


> 
> Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> Cc: Guenter Roeck <linux@roeck-us.net>
> Reported-and-tested-by: Steffen Trumtrar <s.trumtrar@pengutronix.de>
> Reported-by: Tim Sander <tim@krieglstein.org>
> Signed-off-by: Julia Cartwright <julia@ni.com>
> ---
>  include/linux/kthread.h |  2 +-
>  kernel/kthread.c        | 42 ++++++++++++++++++++---------------------
>  2 files changed, 22 insertions(+), 22 deletions(-)
> 
> diff --git a/include/linux/kthread.h b/include/linux/kthread.h
> index c1961761311d..ad292898f7f2 100644
> --- a/include/linux/kthread.h
> +++ b/include/linux/kthread.h
> @@ -85,7 +85,7 @@ enum {
>  
>  struct kthread_worker {
>  	unsigned int		flags;
> -	spinlock_t		lock;
> +	raw_spinlock_t		lock;
>  	struct list_head	work_list;
>  	struct list_head	delayed_work_list;
>  	struct task_struct	*task;
> diff --git a/kernel/kthread.c b/kernel/kthread.c
> index 486dedbd9af5..c1d9ee6671c6 100644
> --- a/kernel/kthread.c
> +++ b/kernel/kthread.c
> @@ -597,7 +597,7 @@ void __kthread_init_worker(struct kthread_worker *worker,
>  				struct lock_class_key *key)
>  {
>  	memset(worker, 0, sizeof(struct kthread_worker));
> -	spin_lock_init(&worker->lock);
> +	raw_spin_lock_init(&worker->lock);
>  	lockdep_set_class_and_name(&worker->lock, key, name);
>  	INIT_LIST_HEAD(&worker->work_list);
>  	INIT_LIST_HEAD(&worker->delayed_work_list);
> @@ -639,21 +639,21 @@ int kthread_worker_fn(void *worker_ptr)
>  
>  	if (kthread_should_stop()) {
>  		__set_current_state(TASK_RUNNING);
> -		spin_lock_irq(&worker->lock);
> +		raw_spin_lock_irq(&worker->lock);
>  		worker->task = NULL;
> -		spin_unlock_irq(&worker->lock);
> +		raw_spin_unlock_irq(&worker->lock);
>  		return 0;
>  	}
>  
>  	work = NULL;
> -	spin_lock_irq(&worker->lock);
> +	raw_spin_lock_irq(&worker->lock);
>  	if (!list_empty(&worker->work_list)) {
>  		work = list_first_entry(&worker->work_list,
>  					struct kthread_work, node);
>  		list_del_init(&work->node);
>  	}
>  	worker->current_work = work;
> -	spin_unlock_irq(&worker->lock);
> +	raw_spin_unlock_irq(&worker->lock);
>  
>  	if (work) {
>  		__set_current_state(TASK_RUNNING);
> @@ -810,12 +810,12 @@ bool kthread_queue_work(struct kthread_worker *worker,
>  	bool ret = false;
>  	unsigned long flags;
>  
> -	spin_lock_irqsave(&worker->lock, flags);
> +	raw_spin_lock_irqsave(&worker->lock, flags);
>  	if (!queuing_blocked(worker, work)) {
>  		kthread_insert_work(worker, work, &worker->work_list);
>  		ret = true;
>  	}
> -	spin_unlock_irqrestore(&worker->lock, flags);
> +	raw_spin_unlock_irqrestore(&worker->lock, flags);
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(kthread_queue_work);
> @@ -841,7 +841,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
>  	if (WARN_ON_ONCE(!worker))
>  		return;
>  
> -	spin_lock(&worker->lock);
> +	raw_spin_lock(&worker->lock);
>  	/* Work must not be used with >1 worker, see kthread_queue_work(). */
>  	WARN_ON_ONCE(work->worker != worker);
>  
> @@ -850,7 +850,7 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
>  	list_del_init(&work->node);
>  	kthread_insert_work(worker, work, &worker->work_list);
>  
> -	spin_unlock(&worker->lock);
> +	raw_spin_unlock(&worker->lock);
>  }
>  EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
>  
> @@ -906,14 +906,14 @@ bool kthread_queue_delayed_work(struct kthread_worker *worker,
>  	unsigned long flags;
>  	bool ret = false;
>  
> -	spin_lock_irqsave(&worker->lock, flags);
> +	raw_spin_lock_irqsave(&worker->lock, flags);
>  
>  	if (!queuing_blocked(worker, work)) {
>  		__kthread_queue_delayed_work(worker, dwork, delay);
>  		ret = true;
>  	}
>  
> -	spin_unlock_irqrestore(&worker->lock, flags);
> +	raw_spin_unlock_irqrestore(&worker->lock, flags);
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
> @@ -949,7 +949,7 @@ void kthread_flush_work(struct kthread_work *work)
>  	if (!worker)
>  		return;
>  
> -	spin_lock_irq(&worker->lock);
> +	raw_spin_lock_irq(&worker->lock);
>  	/* Work must not be used with >1 worker, see kthread_queue_work(). */
>  	WARN_ON_ONCE(work->worker != worker);
>  
> @@ -961,7 +961,7 @@ void kthread_flush_work(struct kthread_work *work)
>  	else
>  		noop = true;
>  
> -	spin_unlock_irq(&worker->lock);
> +	raw_spin_unlock_irq(&worker->lock);
>  
>  	if (!noop)
>  		wait_for_completion(&fwork.done);
> @@ -994,9 +994,9 @@ static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
>  		 * any queuing is blocked by setting the canceling counter.
>  		 */
>  		work->canceling++;
> -		spin_unlock_irqrestore(&worker->lock, *flags);
> +		raw_spin_unlock_irqrestore(&worker->lock, *flags);
>  		del_timer_sync(&dwork->timer);
> -		spin_lock_irqsave(&worker->lock, *flags);
> +		raw_spin_lock_irqsave(&worker->lock, *flags);
>  		work->canceling--;
>  	}
>  
> @@ -1043,7 +1043,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
>  	unsigned long flags;
>  	int ret = false;
>  
> -	spin_lock_irqsave(&worker->lock, flags);
> +	raw_spin_lock_irqsave(&worker->lock, flags);
>  
>  	/* Do not bother with canceling when never queued. */
>  	if (!work->worker)
> @@ -1060,7 +1060,7 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
>  fast_queue:
>  	__kthread_queue_delayed_work(worker, dwork, delay);
>  out:
> -	spin_unlock_irqrestore(&worker->lock, flags);
> +	raw_spin_unlock_irqrestore(&worker->lock, flags);
>  	return ret;
>  }
>  EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
> @@ -1074,7 +1074,7 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
>  	if (!worker)
>  		goto out;
>  
> -	spin_lock_irqsave(&worker->lock, flags);
> +	raw_spin_lock_irqsave(&worker->lock, flags);
>  	/* Work must not be used with >1 worker, see kthread_queue_work(). */
>  	WARN_ON_ONCE(work->worker != worker);
>  
> @@ -1088,13 +1088,13 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
>  	 * In the meantime, block any queuing by setting the canceling counter.
>  	 */
>  	work->canceling++;
> -	spin_unlock_irqrestore(&worker->lock, flags);
> +	raw_spin_unlock_irqrestore(&worker->lock, flags);
>  	kthread_flush_work(work);
> -	spin_lock_irqsave(&worker->lock, flags);
> +	raw_spin_lock_irqsave(&worker->lock, flags);
>  	work->canceling--;
>  
>  out_fast:
> -	spin_unlock_irqrestore(&worker->lock, flags);
> +	raw_spin_unlock_irqrestore(&worker->lock, flags);
>  out:
>  	return ret;
>  }
> -- 
> 2.18.0
> 

  parent reply	other threads:[~2018-10-05 18:10 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-18 13:21 [BUG] dw_wdt watchdog on linux-rt 4.18.5-rt4 not triggering Steffen Trumtrar
2018-09-18 13:46 ` Guenter Roeck
2018-09-19  6:46   ` Steffen Trumtrar
     [not found]     ` <20180919194303.GA5033@roeck-us.net>
2018-09-20 20:48       ` Julia Cartwright
2018-09-21 13:34         ` Guenter Roeck
2018-09-21 16:42           ` Julia Cartwright
2018-09-21 20:21             ` Guenter Roeck
2018-09-24  7:24         ` Steffen Trumtrar
2018-09-28 21:03           ` [PATCH 0/2] Fix watchdogd wakeup deferral on RT Julia Cartwright
2018-09-28 21:03             ` [PATCH 1/2] kthread: convert worker lock to raw spinlock Julia Cartwright
2018-10-05 16:46               ` Sebastian Andrzej Siewior
2018-10-05 18:10               ` Andrea Parri [this message]
2018-10-09 10:56                 ` Sebastian Andrzej Siewior
2018-09-28 21:03             ` [PATCH RT 2/2] watchdog, rt: prevent deferral of watchdogd wakeup Julia Cartwright
2018-09-28 22:38               ` kbuild test robot
2018-09-29  6:38                 ` Thomas Gleixner
2018-09-29 22:13                   ` Sebastian Andrzej Siewior
2018-09-30  1:41                     ` [kbuild-all] " Li, Philip
2018-09-28 23:20               ` kbuild test robot
2018-09-30 14:00               ` Guenter Roeck
2018-10-05 16:52               ` Sebastian Andrzej Siewior
2018-09-20  8:18   ` [BUG] dw_wdt watchdog on linux-rt 4.18.5-rt4 not triggering Tim Sander
2018-09-18 18:14 ` Julia Cartwright

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20181005181035.GA19828@andrea \
    --to=andrea.parri@amarulasolutions.com \
    --cc=bigeasy@linutronix.de \
    --cc=julia@ni.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-rt-users@vger.kernel.org \
    --cc=linux@roeck-us.net \
    --cc=mingo@kernel.org \
    --cc=peterz@infradead.org \
    --cc=s.trumtrar@pengutronix.de \
    --cc=tglx@linutronix.de \
    --cc=tim@krieglstein.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).