public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Rodolfo Giometti <giometti@enneenne.com>
To: Michael Byczkowski <by@by-online.de>
Cc: Andrew Morton <akpm@linux-foundation.org>, linux-kernel@vger.kernel.org
Subject: Re: [PATCH v3 2/3] pps: convert pps_device lock to raw_spinlock for PREEMPT_RT
Date: Fri, 10 Apr 2026 08:36:49 +0200	[thread overview]
Message-ID: <58db49fe-1c8b-48af-b366-2e3f6da2fbbc@enneenne.com> (raw)
In-Reply-To: <D98824E6-9550-47C2-ACD4-4C636618A756@by-online.de>

On 4/9/26 17:28, Michael Byczkowski wrote:
> On PREEMPT_RT, spinlock_t becomes a sleeping mutex, which allows
> pps_event() to be preempted mid-update by other RT threads. This
> introduces jitter in the PPS event recording path.
> 
> Convert pps_device.lock to raw_spinlock_t so the timestamp and
> sequence number updates remain non-preemptible on RT.
> 
> Move wake_up_interruptible_all() and kill_fasync() outside the
> raw_spinlock critical section, as these acquire sleeping locks
> on PREEMPT_RT. The lock now protects only the timestamp, sequence
> number, and last_ev updates. This is safe because PPS_FETCH waiters
> use wait_event_interruptible_timeout() and will re-check after
> waking, and kill_fasync() does not require PPS data to be locked
> (thanks to Nikolaus Buchwitz <nb@buchwitz.com> for reporting).
> 
> On non-RT kernels, raw_spinlock_t compiles to identical code as
> spinlock_t — no behavioral change.
> 
> Signed-off-by: Michael Byczkowski <by@by-online.de>
> Tested-by: Michael Byczkowski <by@by-online.de>
> ---
>   drivers/pps/kapi.c         | 20 ++++++++++++--------
>   drivers/pps/pps.c          | 16 ++++++++--------
>   include/linux/pps_kernel.h |  2 +-
>   3 files changed, 21 insertions(+), 17 deletions(-)
> 
> diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
> index 1bf0335a1b41..97898b0eeff4 100644
> --- a/drivers/pps/kapi.c
> +++ b/drivers/pps/kapi.c
> @@ -102,7 +102,7 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
>   		pps->info.echo = pps_echo_client_default;
>   
>   	init_waitqueue_head(&pps->queue);
> -	spin_lock_init(&pps->lock);
> +	raw_spin_lock_init(&pps->lock);
>   
>   	/* Create the char device */
>   	err = pps_register_cdev(pps);
> @@ -167,7 +167,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
>   
>   	timespec_to_pps_ktime(&ts_real, ts->ts_real);
>   
> -	spin_lock_irqsave(&pps->lock, flags);
> +	raw_spin_lock_irqsave(&pps->lock, flags);
>   
>   	/* Must call the echo function? */
>   	if ((pps->params.mode & (PPS_ECHOASSERT | PPS_ECHOCLEAR)))
> @@ -204,16 +204,20 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
>   		captured = ~0;
>   	}
>   
> -	pps_kc_event(pps, ts, event);
> +pps_kc_event(pps, ts, event);

What is this???

>   
> -	/* Wake up if captured something */
> -	if (captured) {
> +	if (captured)
>   		pps->last_ev++;
> -		wake_up_interruptible_all(&pps->queue);
>   
> +	raw_spin_unlock_irqrestore(&pps->lock, flags);
> +
> +	/*
> +	 * Wake up after releasing the lock: wake_up_interruptible_all()
> +	 * and kill_fasync() acquire sleeping locks on PREEMPT_RT.
> +	 */
> +	if (captured) {
> +		wake_up_interruptible_all(&pps->queue);
>   		kill_fasync(&pps->async_queue, SIGIO, POLL_IN);
>   	}
> -
> -	spin_unlock_irqrestore(&pps->lock, flags);
>   }
>   EXPORT_SYMBOL(pps_event);
> diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
> index c6b8b6478276..ad96425208a1 100644
> --- a/drivers/pps/pps.c
> +++ b/drivers/pps/pps.c
> @@ -103,12 +103,12 @@ static long pps_cdev_ioctl(struct file *file,
>   	case PPS_GETPARAMS:
>   		dev_dbg(&pps->dev, "PPS_GETPARAMS\n");
>   
> -		spin_lock_irq(&pps->lock);
> +		raw_spin_lock_irq(&pps->lock);
>   
>   		/* Get the current parameters */
>   		params = pps->params;
>   
> -		spin_unlock_irq(&pps->lock);
> +		raw_spin_unlock_irq(&pps->lock);
>   
>   		err = copy_to_user(uarg, &params, sizeof(struct pps_kparams));
>   		if (err)
> @@ -139,7 +139,7 @@ static long pps_cdev_ioctl(struct file *file,
>   			return -EINVAL;
>   		}
>   
> -		spin_lock_irq(&pps->lock);
> +		raw_spin_lock_irq(&pps->lock);
>   
>   		/* Save the new parameters */
>   		pps->params = params;
> @@ -163,7 +163,7 @@ static long pps_cdev_ioctl(struct file *file,
>   		pps->params.assert_off_tu.flags = 0;
>   		pps->params.clear_off_tu.flags = 0;
>   
> -		spin_unlock_irq(&pps->lock);
> +		raw_spin_unlock_irq(&pps->lock);
>   
>   		break;
>   
> @@ -190,7 +190,7 @@ static long pps_cdev_ioctl(struct file *file,
>   			return err;
>   
>   		/* Return the fetched timestamp and save last fetched event  */
> -		spin_lock_irq(&pps->lock);
> +		raw_spin_lock_irq(&pps->lock);
>   
>   		pps->last_fetched_ev = pps->last_ev;
>   
> @@ -200,7 +200,7 @@ static long pps_cdev_ioctl(struct file *file,
>   		fdata.info.clear_tu = pps->clear_tu;
>   		fdata.info.current_mode = pps->current_mode;
>   
> -		spin_unlock_irq(&pps->lock);
> +		raw_spin_unlock_irq(&pps->lock);
>   
>   		err = copy_to_user(uarg, &fdata, sizeof(struct pps_fdata));
>   		if (err)
> @@ -278,7 +278,7 @@ static long pps_cdev_compat_ioctl(struct file *file,
>   			return err;
>   
>   		/* Return the fetched timestamp and save last fetched event  */
> -		spin_lock_irq(&pps->lock);
> +		raw_spin_lock_irq(&pps->lock);
>   
>   		pps->last_fetched_ev = pps->last_ev;
>   
> @@ -291,7 +291,7 @@ static long pps_cdev_compat_ioctl(struct file *file,
>   		memcpy(&compat.info.clear_tu, &pps->clear_tu,
>   				sizeof(struct pps_ktime_compat));
>   
> -		spin_unlock_irq(&pps->lock);
> +		raw_spin_unlock_irq(&pps->lock);
>   
>   		return copy_to_user(uarg, &compat,
>   				sizeof(struct pps_fdata_compat)) ? -EFAULT : 0;
> diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
> index aab0aebb529e..f2fe504071ed 100644
> --- a/include/linux/pps_kernel.h
> +++ b/include/linux/pps_kernel.h
> @@ -59,7 +59,7 @@ struct pps_device {
>   	void const *lookup_cookie;		/* For pps_lookup_dev() only */
>   	struct device dev;
>   	struct fasync_struct *async_queue;	/* fasync method */
> -	spinlock_t lock;
> +	raw_spinlock_t lock;
>   };
>   
>   /*


-- 
GNU/Linux Solutions                  e-mail: giometti@enneenne.com
Linux Device Driver                          giometti@linux.it
Embedded Systems                     phone:  +39 349 2432127
UNIX programming

  parent reply	other threads:[~2026-04-10  6:39 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-04-09 15:26 [PATCH v3 0/3] pps: improve PREEMPT_RT performance Michael Byczkowski
2026-04-09 15:27 ` [PATCH v3 1/3] pps: pps-gpio: split IRQ handler into hardirq and threaded parts Michael Byczkowski
2026-04-09 15:28   ` [PATCH v3 2/3] pps: convert pps_device lock to raw_spinlock for PREEMPT_RT Michael Byczkowski
2026-04-09 15:29     ` [PATCH v3 3/3] pps: convert pps_kc_hardpps_lock " Michael Byczkowski
2026-04-10  6:36     ` Rodolfo Giometti [this message]
2026-04-09 18:52   ` [PATCH v3 1/3] pps: pps-gpio: split IRQ handler into hardirq and threaded parts David Laight
2026-04-10 10:00     ` Michael Byczkowski

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=58db49fe-1c8b-48af-b366-2e3f6da2fbbc@enneenne.com \
    --to=giometti@enneenne.com \
    --cc=akpm@linux-foundation.org \
    --cc=by@by-online.de \
    --cc=linux-kernel@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox