From: Thomas Gleixner <tglx@linutronix.de>
To: Christoph Lameter via B4 Relay <devnull+cl.gentwo.org@kernel.org>,
Catalin Marinas <catalin.marinas@arm.com>,
Will Deacon <will@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
Ingo Molnar <mingo@redhat.com>, Waiman Long <longman@redhat.com>,
Boqun Feng <boqun.feng@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
linux-arm-kernel@lists.infradead.org, linux-arch@vger.kernel.org,
"Christoph Lameter (Ampere)" <cl@gentwo.org>
Subject: Re: [PATCH v2] Avoid memory barrier in read_seqcount() through load acquire
Date: Fri, 23 Aug 2024 23:05:30 +0200 [thread overview]
Message-ID: <87ttfbeyqt.ffs@tglx> (raw)
In-Reply-To: <20240819-seq_optimize-v2-1-9d0da82b022f@gentwo.org>
On Mon, Aug 19 2024 at 11:30, Christoph Lameter via wrote:
> @@ -293,6 +321,18 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
> *
> * Return: count to be passed to read_seqcount_retry()
> */
> +#ifdef CONFIG_ARCH_HAS_ACQUIRE_RELEASE
> +#define raw_read_seqcount_begin(s) \
> +({ \
> + unsigned _seq; \
> + \
> + while ((_seq = seqprop_sequence_acquire(s)) & 1) \
> + cpu_relax(); \
> + \
> + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
> + _seq; \
> +})
So this covers only raw_read_seqcount_begin(), but not
raw_read_seqcount() which has the same smp_rmb() inside.
This all can be done without the extra copies of the counter
accessors. Uncompiled patch below.
It's a little larger than I initialy wanted to do it, but I had to keep
the raw READ_ONCE() for __read_seqcount_begin() to not inflict the
smp_load_acquire() to the only usage site in the dcache code.
The acquire conditional in __seqprop_load_sequence() is optimized out by
the compiler as all of this is macro/__always_inline.
Thanks,
tglx
---
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -132,6 +132,14 @@ static inline void seqcount_lockdep_read
#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock)
#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex)
+static __always_inline unsigned __seqprop_load_sequence(const seqcount_t *s, bool acquire)
+{
+ if (acquire && IS_ENABLED(CONFIG_ARCH_HAS_ACQUIRE_RELEASE))
+ return smp_load_acquire(&s->sequence);
+ else
+ return READ_ONCE(s->sequence);
+}
+
/*
* SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
* seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
@@ -155,9 +163,10 @@ static __always_inline const seqcount_t
} \
\
static __always_inline unsigned \
-__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
+__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s, \
+ bool acquire) \
{ \
- unsigned seq = READ_ONCE(s->seqcount.sequence); \
+ unsigned seq = __seqprop_load_sequence(&s->seqcount, acquire); \
\
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \
return seq; \
@@ -170,7 +179,7 @@ static __always_inline unsigned \
* Re-read the sequence counter since the (possibly \
* preempted) writer made progress. \
*/ \
- seq = READ_ONCE(s->seqcount.sequence); \
+ seq = __seqprop_load_sequence(&s->seqcount, acquire); \
} \
\
return seq; \
@@ -206,9 +215,9 @@ static inline const seqcount_t *__seqpro
return s;
}
-static inline unsigned __seqprop_sequence(const seqcount_t *s)
+static inline unsigned __seqprop_sequence(const seqcount_t *s, bool acquire)
{
- return READ_ONCE(s->sequence);
+ return __seqprop_load_sequence(s, acquire);
}
static inline bool __seqprop_preemptible(const seqcount_t *s)
@@ -258,29 +267,23 @@ SEQCOUNT_LOCKNAME(mutex, struct m
#define seqprop_ptr(s) __seqprop(s, ptr)(s)
#define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s)
-#define seqprop_sequence(s) __seqprop(s, sequence)(s)
+#define seqprop_sequence(s, a) __seqprop(s, sequence)(s, a)
#define seqprop_preemptible(s) __seqprop(s, preemptible)(s)
#define seqprop_assert(s) __seqprop(s, assert)(s)
/**
- * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
- * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
- *
- * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
- * barrier. Callers should ensure that smp_rmb() or equivalent ordering is
- * provided before actually loading any of the variables that are to be
- * protected in this critical section.
- *
- * Use carefully, only in critical code, and comment how the barrier is
- * provided.
+ * read_seqcount_begin_cond_acquire() - begin a seqcount_t read section
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ * @acquire: If true, the read of the sequence count uses smp_load_acquire()
+ * if the architecure provides and enabled it.
*
* Return: count to be passed to read_seqcount_retry()
*/
-#define __read_seqcount_begin(s) \
+#define read_seqcount_begin_cond_acquire(s, acquire) \
({ \
unsigned __seq; \
\
- while ((__seq = seqprop_sequence(s)) & 1) \
+ while ((__seq = seqprop_sequence(s, acquire)) & 1) \
cpu_relax(); \
\
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
@@ -288,6 +291,26 @@ SEQCOUNT_LOCKNAME(mutex, struct m
})
/**
+ * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
+ * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
+ *
+ * __read_seqcount_begin is like read_seqcount_begin, but it neither
+ * provides a smp_rmb() barrier nor does it use smp_load_acquire() on
+ * architectures which provide it.
+ *
+ * Callers should ensure that smp_rmb() or equivalent ordering is provided
+ * before actually loading any of the variables that are to be protected in
+ * this critical section.
+ *
+ * Use carefully, only in critical code, and comment how the barrier is
+ * provided.
+ *
+ * Return: count to be passed to read_seqcount_retry()
+ */
+#define __read_seqcount_begin(s) \
+ read_seqcount_begin_cond_acquire(s, false)
+
+/**
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
* @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants
*
@@ -295,9 +318,10 @@ SEQCOUNT_LOCKNAME(mutex, struct m
*/
#define raw_read_seqcount_begin(s) \
({ \
- unsigned _seq = __read_seqcount_begin(s); \
+ unsigned _seq = read_seqcount_begin_cond_acquire(s, true); \
\
- smp_rmb(); \
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_ACQUIRE_RELEASE)) \
+ smp_rmb(); \
_seq; \
})
@@ -326,9 +350,10 @@ SEQCOUNT_LOCKNAME(mutex, struct m
*/
#define raw_read_seqcount(s) \
({ \
- unsigned __seq = seqprop_sequence(s); \
+ unsigned __seq = seqprop_sequence(s, true); \
\
- smp_rmb(); \
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_ACQUIRE_RELEASE)) \
+ smp_rmb(); \
kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
__seq; \
})
next prev parent reply other threads:[~2024-08-23 21:05 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-08-19 18:30 [PATCH v2] Avoid memory barrier in read_seqcount() through load acquire Christoph Lameter via B4 Relay
2024-08-23 10:32 ` Will Deacon
2024-08-23 17:56 ` Christoph Lameter (Ampere)
2024-08-23 19:38 ` Christoph Lameter (Ampere)
2024-08-23 21:05 ` Thomas Gleixner [this message]
2024-08-28 17:15 ` Christoph Lameter (Ampere)
2024-09-02 11:55 ` Thomas Gleixner
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=87ttfbeyqt.ffs@tglx \
--to=tglx@linutronix.de \
--cc=boqun.feng@gmail.com \
--cc=catalin.marinas@arm.com \
--cc=cl@gentwo.org \
--cc=devnull+cl.gentwo.org@kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=longman@redhat.com \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=torvalds@linux-foundation.org \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).