From: Nicholas Piggin <npiggin@gmail.com>
To: linuxppc-dev@lists.ozlabs.org
Cc: Jordan Niethe <jniethe5@gmail.com>,
Laurent Dufour <laurent.dufour@fr.ibm.com>,
Nicholas Piggin <npiggin@gmail.com>
Subject: [RFC PATCH 2/4] powerpc/qspinlock: Avoid cmpxchg style patterns in queue head locking
Date: Tue, 15 Nov 2022 02:11:17 +1000 [thread overview]
Message-ID: <20221114161119.2883620-3-npiggin@gmail.com> (raw)
In-Reply-To: <20221114161119.2883620-1-npiggin@gmail.com>
Locking by the MCS queue head must clear the tail CPU if there are
no more queue entries left, and it has to deal with concurrent lock
stealing. Implementing these with cmpxchg style updates leaves the
possibility for unnecessary failure when the lock word changes.
Implement this instead within one larx/stcx. critical section that
tests the value and takes the appropriate action: bail out if
it was locked, otherwise lock and clear the tail if we are the tail,
else lock and leave the tail.
With this primitive, there is no longer a significant reason to keep
the large !maybe_stealers special case, so remove it.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/lib/qspinlock.c | 92 ++++++++++++------------------------
1 file changed, 29 insertions(+), 63 deletions(-)
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index ff718f27cbc9..79793b3209ea 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -104,62 +104,36 @@ static inline int get_owner_cpu(u32 val)
return (val & _Q_OWNER_CPU_MASK) >> _Q_OWNER_CPU_OFFSET;
}
-/* Take the lock by setting the lock bit, no other CPUs will touch it. */
-static __always_inline void set_locked(struct qspinlock *lock)
+static __always_inline u32 trylock_clear_my_tail(struct qspinlock *lock, u32 mytail)
{
- u32 new = queued_spin_encode_locked_val();
+ u32 newval = queued_spin_encode_locked_val();
u32 prev, tmp;
asm volatile(
-"1: lwarx %0,0,%2,%4 # set_locked \n"
-" or %1,%0,%3 \n"
-" stwcx. %1,0,%2 \n"
+"1: lwarx %0,0,%2,%7 # trylock_clear_my_tail \n"
+ /* This test is necessary if there could be stealers */
+" andi. %1,%0,%5 \n"
+" bne 3f \n"
+ /* Test whether the lock tail == mytail */
+" and %1,%0,%6 \n"
+" cmpw 0,%1,%3 \n"
+ /* Merge the new locked value */
+" or %1,%1,%4 \n"
+" bne 2f \n"
+ /* If the lock tail matched, then clear it, otherwise leave it. */
+" andc %1,%1,%6 \n"
+"2: stwcx. %1,0,%2 \n"
" bne- 1b \n"
"\t" PPC_ACQUIRE_BARRIER " \n"
+"3: \n"
: "=&r" (prev), "=&r" (tmp)
- : "r" (&lock->val), "r" (new),
+ : "r" (&lock->val), "r"(mytail), "r" (newval),
+ "i" (_Q_LOCKED_VAL),
+ "r" (_Q_TAIL_CPU_MASK),
"i" (IS_ENABLED(CONFIG_PPC64))
: "cr0", "memory");
- BUG_ON(prev & _Q_LOCKED_VAL);
-}
-
-static __always_inline u32 __trylock_cmpxchg(struct qspinlock *lock, u32 old, u32 new)
-{
- u32 prev;
-
- BUG_ON(old & _Q_LOCKED_VAL);
-
- asm volatile(
-"1: lwarx %0,0,%1,%4 # __trylock_cmpxchg \n"
-" cmpw 0,%0,%2 \n"
-" bne- 2f \n"
-" stwcx. %3,0,%1 \n"
-" bne- 1b \n"
-"\t" PPC_ACQUIRE_BARRIER " \n"
-"2: \n"
- : "=&r" (prev)
- : "r" (&lock->val), "r"(old), "r" (new),
- "i" (IS_ENABLED(CONFIG_PPC64))
- : "cr0", "memory");
-
- return likely(prev == old);
-}
-
-/* Take lock, clearing tail, cmpxchg with old (which must not be locked) */
-static __always_inline int trylock_clear_tail_cpu(struct qspinlock *lock, u32 val)
-{
- u32 newval = queued_spin_encode_locked_val();
-
- return __trylock_cmpxchg(lock, val, newval);
-}
-
-/* Take lock, preserving tail, cmpxchg with val (which must not be locked) */
-static __always_inline int trylock_with_tail_cpu(struct qspinlock *lock, u32 val)
-{
- u32 newval = queued_spin_encode_locked_val() | (val & _Q_TAIL_CPU_MASK);
-
- return __trylock_cmpxchg(lock, val, newval);
+ return prev;
}
/*
@@ -620,14 +594,11 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
spin_end();
/* If we're the last queued, must clean up the tail. */
- if ((val & _Q_TAIL_CPU_MASK) == tail) {
- if (trylock_clear_tail_cpu(lock, val))
- goto release;
- /* Another waiter must have enqueued. */
- }
+ old = trylock_clear_my_tail(lock, tail);
+ BUG_ON(old & _Q_LOCKED_VAL);
+ if ((old & _Q_TAIL_CPU_MASK) == tail)
+ goto release;
- /* We must be the owner, just set the lock bit and acquire */
- set_locked(lock);
} else {
int set_yield_cpu = -1;
int iters = 0;
@@ -682,18 +653,13 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
spin_end();
/* If we're the last queued, must clean up the tail. */
- if ((val & _Q_TAIL_CPU_MASK) == tail) {
- if (trylock_clear_tail_cpu(lock, val))
- goto release;
- /* Another waiter must have enqueued, or lock stolen. */
- } else {
- if (trylock_with_tail_cpu(lock, val))
- goto unlock_next;
- }
- goto again;
+ old = trylock_clear_my_tail(lock, tail);
+ if (unlikely(old & _Q_LOCKED_VAL))
+ goto again;
+ if ((old & _Q_TAIL_CPU_MASK) == tail)
+ goto release;
}
-unlock_next:
/* contended path; must wait for next != NULL (MCS protocol) */
next = READ_ONCE(node->next);
if (!next) {
--
2.37.2
next prev parent reply other threads:[~2022-11-14 16:14 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-14 16:11 [RFC PATCH 0/4] powerpc/qspinlock: make slowpath accesses more efficient Nicholas Piggin
2022-11-14 16:11 ` [RFC PATCH 1/4] powerpc/qspinlock: Avoid cmpxchg pattern in lock stealing Nicholas Piggin
2022-11-14 16:11 ` Nicholas Piggin [this message]
2022-11-14 16:11 ` [RFC PATCH 3/4] powerpc/qspinlock: Remove !maybe_waiters special case queue head locking Nicholas Piggin
2022-11-14 16:11 ` [RFC PATCH 4/4] powerpc/qspinlock: add compile-time tuning adjustments Nicholas Piggin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221114161119.2883620-3-npiggin@gmail.com \
--to=npiggin@gmail.com \
--cc=jniethe5@gmail.com \
--cc=laurent.dufour@fr.ibm.com \
--cc=linuxppc-dev@lists.ozlabs.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).