* [PATCH v3 0/1] rust: helpers: Avoid raw_spin_lock initialization for PREEMPT_RT
@ 2024-11-07 16:32 Eder Zulian
2024-11-07 16:32 ` [PATCH v3 1/1] " Eder Zulian
2024-11-07 16:50 ` [PATCH v3 0/1] " Miguel Ojeda
0 siblings, 2 replies; 5+ messages in thread
From: Eder Zulian @ 2024-11-07 16:32 UTC (permalink / raw)
To: linux-kernel, linux-next, rust-for-linux
Cc: miguel.ojeda.sandonis, tglx, williams, ojeda, alex.gaynor, gary,
bjorn3_gh, benno.lossin, a.hindborg, aliceryhl, tmgross, jlelli,
peterz, mingo, will, longman, boqun.feng, bigeasy, sfr, hpa
Hello!
When PREEMPT_RT=y, spin locks are mapped to rt_mutex types, so using
spinlock_check() + __raw_spin_lock_init() to initialize spin locks is
incorrect, and would cause build errors.
This v3 patch introduces __spin_lock_init() to initialize a spin lock with
lockdep rquired information for PREEMPT_RT builds, and use it in the Rust
helper.
This patch was developed on top of linux-next/master.
As a note, at the time of writing, RUST support for x86_64 depends on
!(MITIGATION_RETHUNK && KASAN) || RUSTC_VERSION >= 108300. Miguel Ojeda
pointed out that this can be avoided with Rust 1.83, to be released in 3
weeks (2024-11-28).
In order to reproduce the problem rust must be available on the system.
$ make LLVM=1 rustavailable
With CONFIG_PREEMPT_RT=y, CONFIG_RUST=y, and CONFIG_DEBUG_SPINLOCK=y a
x86_64 kernel can be built with
$ make LLVM=1 -j$(nproc) bzImage
The problem was reported at least in:
https://lore.kernel.org/oe-kbuild-all/202409251238.vetlgXE9-lkp@intel.com/
https://lore.kernel.org/all/20241107182411.57e2b418@canb.auug.org.au/
Links to v1 and v2 where improvement suggestions were made:
https://lore.kernel.org/all/20241014195253.1704625-1-ezulian@redhat.com/
https://lore.kernel.org/all/20241106211215.2005909-1-ezulian@redhat.com/
Version 2 changes:
- Cleaned up style and incorporated feedback from reviewers Boqun Feng and
Miguel Ojeda.
Version 3 changes:
- Addressed review comments from Boqun Feng. Improved commit title and
description and used a proper 'Fixed:' tag.
Thanks,
Eder Zulian (1):
rust: helpers: Avoid raw_spin_lock initialization for PREEMPT_RT
include/linux/spinlock_rt.h | 15 +++++++--------
rust/helpers/spinlock.c | 8 ++++++--
2 files changed, 13 insertions(+), 10 deletions(-)
--
2.47.0
^ permalink raw reply [flat|nested] 5+ messages in thread
* [PATCH v3 1/1] rust: helpers: Avoid raw_spin_lock initialization for PREEMPT_RT
2024-11-07 16:32 [PATCH v3 0/1] rust: helpers: Avoid raw_spin_lock initialization for PREEMPT_RT Eder Zulian
@ 2024-11-07 16:32 ` Eder Zulian
2024-11-07 20:19 ` Boqun Feng
2024-11-07 16:50 ` [PATCH v3 0/1] " Miguel Ojeda
1 sibling, 1 reply; 5+ messages in thread
From: Eder Zulian @ 2024-11-07 16:32 UTC (permalink / raw)
To: linux-kernel, linux-next, rust-for-linux
Cc: miguel.ojeda.sandonis, tglx, williams, ojeda, alex.gaynor, gary,
bjorn3_gh, benno.lossin, a.hindborg, aliceryhl, tmgross, jlelli,
peterz, mingo, will, longman, boqun.feng, bigeasy, sfr, hpa
When PREEMPT_RT=y, spin locks are mapped to rt_mutex types, so using
spinlock_check() + __raw_spin_lock_init() to initialize spin locks is
incorrect, and would cause build errors.
Introduce __spin_lock_init() to initialize a spin lock with lockdep
rquired information for PREEMPT_RT builds, and use it in the Rust
helper.
Fixes: d2d6422f8bd1 ("x86: Allow to enable PREEMPT_RT.")
Reported-by: kernel test robot <lkp@intel.com>
Closes: https://lore.kernel.org/oe-kbuild-all/202409251238.vetlgXE9-lkp@intel.com/
Signed-off-by: Eder Zulian <ezulian@redhat.com>
---
V1 -> V2: Cleaned up style and addressed review comments
V2 -> V3: Improved commit title and description and corrected the 'Fixed:'
tag as per reviewer's suggestion
include/linux/spinlock_rt.h | 15 +++++++--------
rust/helpers/spinlock.c | 8 ++++++--
2 files changed, 13 insertions(+), 10 deletions(-)
diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
index f9f14e135be7..f6499c37157d 100644
--- a/include/linux/spinlock_rt.h
+++ b/include/linux/spinlock_rt.h
@@ -16,22 +16,21 @@ static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
}
#endif
-#define spin_lock_init(slock) \
+#define __spin_lock_init(slock, name, key, percpu) \
do { \
- static struct lock_class_key __key; \
- \
rt_mutex_base_init(&(slock)->lock); \
- __rt_spin_lock_init(slock, #slock, &__key, false); \
+ __rt_spin_lock_init(slock, name, key, percpu); \
} while (0)
-#define local_spin_lock_init(slock) \
+#define _spin_lock_init(slock, percpu) \
do { \
static struct lock_class_key __key; \
- \
- rt_mutex_base_init(&(slock)->lock); \
- __rt_spin_lock_init(slock, #slock, &__key, true); \
+ __spin_lock_init(slock, #slock, &__key, percpu); \
} while (0)
+#define spin_lock_init(slock) _spin_lock_init(slock, false)
+#define local_spin_lock_init(slock) _spin_lock_init(slock, true)
+
extern void rt_spin_lock(spinlock_t *lock) __acquires(lock);
extern void rt_spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock);
extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
diff --git a/rust/helpers/spinlock.c b/rust/helpers/spinlock.c
index b7b0945e8b3c..5971fdf6f755 100644
--- a/rust/helpers/spinlock.c
+++ b/rust/helpers/spinlock.c
@@ -6,10 +6,14 @@ void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_SPINLOCK
+# if defined(CONFIG_PREEMPT_RT)
+ __spin_lock_init(lock, name, key, false);
+# else /*!CONFIG_PREEMPT_RT */
__raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
-#else
+# endif /* CONFIG_PREEMPT_RT */
+#else /* !CONFIG_DEBUG_SPINLOCK */
spin_lock_init(lock);
-#endif
+#endif /* CONFIG_DEBUG_SPINLOCK */
}
void rust_helper_spin_lock(spinlock_t *lock)
--
2.47.0
^ permalink raw reply related [flat|nested] 5+ messages in thread
* Re: [PATCH v3 0/1] rust: helpers: Avoid raw_spin_lock initialization for PREEMPT_RT
2024-11-07 16:32 [PATCH v3 0/1] rust: helpers: Avoid raw_spin_lock initialization for PREEMPT_RT Eder Zulian
2024-11-07 16:32 ` [PATCH v3 1/1] " Eder Zulian
@ 2024-11-07 16:50 ` Miguel Ojeda
2024-11-07 17:09 ` Eder Zulian
1 sibling, 1 reply; 5+ messages in thread
From: Miguel Ojeda @ 2024-11-07 16:50 UTC (permalink / raw)
To: Eder Zulian
Cc: linux-kernel, linux-next, rust-for-linux, tglx, williams, ojeda,
alex.gaynor, gary, bjorn3_gh, benno.lossin, a.hindborg, aliceryhl,
tmgross, jlelli, peterz, mingo, will, longman, boqun.feng,
bigeasy, sfr, hpa
On Thu, Nov 7, 2024 at 5:33 PM Eder Zulian <ezulian@redhat.com> wrote:
>
> As a note, at the time of writing, RUST support for x86_64 depends on
> !(MITIGATION_RETHUNK && KASAN) || RUSTC_VERSION >= 108300. Miguel Ojeda
> pointed out that this can be avoided with Rust 1.83, to be released in 3
> weeks (2024-11-28).
I was referring there to the "or" in that condition, i.e. the "||
RUSTC_VERSION >= 108300" part. In other words, it was just a comment I
made to explain in the other thread that disabling KASAN or RETHUNK is
not needed anymore when you use 1.83 in the future. :)
But that seems unrelated to the patch here, so normally you wouldn't
add it to the cover letter. Or am I missing something? Same for the
`make rustavailable` note below (i.e. `RUST=y` already implies that).
(Of course, no need to resend anything for this -- it is just a note
to clarify, and anyway the cover letter does not go into the
repository :)
Thanks!
Cheers,
Miguel
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v3 0/1] rust: helpers: Avoid raw_spin_lock initialization for PREEMPT_RT
2024-11-07 16:50 ` [PATCH v3 0/1] " Miguel Ojeda
@ 2024-11-07 17:09 ` Eder Zulian
0 siblings, 0 replies; 5+ messages in thread
From: Eder Zulian @ 2024-11-07 17:09 UTC (permalink / raw)
To: Miguel Ojeda
Cc: linux-kernel, linux-next, rust-for-linux, tglx, williams, ojeda,
alex.gaynor, gary, bjorn3_gh, benno.lossin, a.hindborg, aliceryhl,
tmgross, jlelli, peterz, mingo, will, longman, boqun.feng,
bigeasy, sfr, hpa
Hi Miguel,
On Thu, Nov 07, 2024 at 05:50:50PM +0100, Miguel Ojeda wrote:
> On Thu, Nov 7, 2024 at 5:33 PM Eder Zulian <ezulian@redhat.com> wrote:
> >
> > As a note, at the time of writing, RUST support for x86_64 depends on
> > !(MITIGATION_RETHUNK && KASAN) || RUSTC_VERSION >= 108300. Miguel Ojeda
> > pointed out that this can be avoided with Rust 1.83, to be released in 3
> > weeks (2024-11-28).
>
> I was referring there to the "or" in that condition, i.e. the "||
> RUSTC_VERSION >= 108300" part. In other words, it was just a comment I
> made to explain in the other thread that disabling KASAN or RETHUNK is
> not needed anymore when you use 1.83 in the future. :)
>
Yes, I thought that was clear all along.
> But that seems unrelated to the patch here, so normally you wouldn't
> add it to the cover letter. Or am I missing something? Same for the
> `make rustavailable` note below (i.e. `RUST=y` already implies that).
>
Noted. I don't think you're missing anything. Thank you for the hints.
> (Of course, no need to resend anything for this -- it is just a note
> to clarify, and anyway the cover letter does not go into the
> repository :)
>
> Thanks!
>
> Cheers,
> Miguel
>
Thank you,
Eder
^ permalink raw reply [flat|nested] 5+ messages in thread
* Re: [PATCH v3 1/1] rust: helpers: Avoid raw_spin_lock initialization for PREEMPT_RT
2024-11-07 16:32 ` [PATCH v3 1/1] " Eder Zulian
@ 2024-11-07 20:19 ` Boqun Feng
0 siblings, 0 replies; 5+ messages in thread
From: Boqun Feng @ 2024-11-07 20:19 UTC (permalink / raw)
To: Eder Zulian
Cc: linux-kernel, linux-next, rust-for-linux, miguel.ojeda.sandonis,
tglx, williams, ojeda, alex.gaynor, gary, bjorn3_gh, benno.lossin,
a.hindborg, aliceryhl, tmgross, jlelli, peterz, mingo, will,
longman, bigeasy, sfr, hpa
On Thu, Nov 07, 2024 at 05:32:23PM +0100, Eder Zulian wrote:
> When PREEMPT_RT=y, spin locks are mapped to rt_mutex types, so using
> spinlock_check() + __raw_spin_lock_init() to initialize spin locks is
> incorrect, and would cause build errors.
>
> Introduce __spin_lock_init() to initialize a spin lock with lockdep
> rquired information for PREEMPT_RT builds, and use it in the Rust
> helper.
>
> Fixes: d2d6422f8bd1 ("x86: Allow to enable PREEMPT_RT.")
> Reported-by: kernel test robot <lkp@intel.com>
> Closes: https://lore.kernel.org/oe-kbuild-all/202409251238.vetlgXE9-lkp@intel.com/
> Signed-off-by: Eder Zulian <ezulian@redhat.com>
Reviewed-by: Boqun Feng <boqun.feng@gmail.com>
For testing, I used the following command as a reproducer:
ARM64:
./tools/testing/kunit/kunit.py run --make_options LLVM=1 --arch arm64 --kconfig_add CONFIG_RUST=y --kconfig_add CONFIG_SMP=y rust_doctests_kernel --kconfig_add CONFIG_WERROR=y --kconfig_add CONFIG_EXPERT=y --kconfig_add CONFIG_PREEMPT_RT=y --kconfig_add CONFIG_PROVE_LOCKING=y
X86_64:
./tools/testing/kunit/kunit.py run --make_options LLVM=1 --arch arm64 --kconfig_add CONFIG_RUST=y --kconfig_add CONFIG_SMP=y rust_doctests_kernel --kconfig_add CONFIG_WERROR=y --kconfig_add CONFIG_EXPERT=y --kconfig_add CONFIG_PREEMPT_RT=y --kconfig_add CONFIG_PROVE_LOCKING=y
And I applied this onto the tip/locking/core, can confirm the build
errors are gone. So
Tested-by: Boqun Feng <boqun.feng@gmail.com>
Regards,
Boqun
> ---
> V1 -> V2: Cleaned up style and addressed review comments
> V2 -> V3: Improved commit title and description and corrected the 'Fixed:'
> tag as per reviewer's suggestion
>
> include/linux/spinlock_rt.h | 15 +++++++--------
> rust/helpers/spinlock.c | 8 ++++++--
> 2 files changed, 13 insertions(+), 10 deletions(-)
>
> diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h
> index f9f14e135be7..f6499c37157d 100644
> --- a/include/linux/spinlock_rt.h
> +++ b/include/linux/spinlock_rt.h
> @@ -16,22 +16,21 @@ static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
> }
> #endif
>
> -#define spin_lock_init(slock) \
> +#define __spin_lock_init(slock, name, key, percpu) \
> do { \
> - static struct lock_class_key __key; \
> - \
> rt_mutex_base_init(&(slock)->lock); \
> - __rt_spin_lock_init(slock, #slock, &__key, false); \
> + __rt_spin_lock_init(slock, name, key, percpu); \
> } while (0)
>
> -#define local_spin_lock_init(slock) \
> +#define _spin_lock_init(slock, percpu) \
> do { \
> static struct lock_class_key __key; \
> - \
> - rt_mutex_base_init(&(slock)->lock); \
> - __rt_spin_lock_init(slock, #slock, &__key, true); \
> + __spin_lock_init(slock, #slock, &__key, percpu); \
> } while (0)
>
> +#define spin_lock_init(slock) _spin_lock_init(slock, false)
> +#define local_spin_lock_init(slock) _spin_lock_init(slock, true)
> +
> extern void rt_spin_lock(spinlock_t *lock) __acquires(lock);
> extern void rt_spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock);
> extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
> diff --git a/rust/helpers/spinlock.c b/rust/helpers/spinlock.c
> index b7b0945e8b3c..5971fdf6f755 100644
> --- a/rust/helpers/spinlock.c
> +++ b/rust/helpers/spinlock.c
> @@ -6,10 +6,14 @@ void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
> struct lock_class_key *key)
> {
> #ifdef CONFIG_DEBUG_SPINLOCK
> +# if defined(CONFIG_PREEMPT_RT)
> + __spin_lock_init(lock, name, key, false);
> +# else /*!CONFIG_PREEMPT_RT */
> __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
> -#else
> +# endif /* CONFIG_PREEMPT_RT */
> +#else /* !CONFIG_DEBUG_SPINLOCK */
> spin_lock_init(lock);
> -#endif
> +#endif /* CONFIG_DEBUG_SPINLOCK */
> }
>
> void rust_helper_spin_lock(spinlock_t *lock)
> --
> 2.47.0
>
^ permalink raw reply [flat|nested] 5+ messages in thread
end of thread, other threads:[~2024-11-07 20:19 UTC | newest]
Thread overview: 5+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-11-07 16:32 [PATCH v3 0/1] rust: helpers: Avoid raw_spin_lock initialization for PREEMPT_RT Eder Zulian
2024-11-07 16:32 ` [PATCH v3 1/1] " Eder Zulian
2024-11-07 20:19 ` Boqun Feng
2024-11-07 16:50 ` [PATCH v3 0/1] " Miguel Ojeda
2024-11-07 17:09 ` Eder Zulian
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).