From: Nicholas Piggin <npiggin@gmail.com>
To: linuxppc-dev@lists.ozlabs.org
Cc: Jordan Niethe <jniethe5@gmail.com>,
Laurent Dufour <laurent.dufour@fr.ibm.com>,
Nicholas Piggin <npiggin@gmail.com>
Subject: [PATCH v2 05/17] powerpc/qspinlock: allow new waiters to steal the lock before queueing
Date: Mon, 14 Nov 2022 12:31:25 +1000 [thread overview]
Message-ID: <20221114023137.2679627-7-npiggin@gmail.com> (raw)
In-Reply-To: <20221114023137.2679627-1-npiggin@gmail.com>
Allow new waiters a number of spins on the lock word before queueing,
which particularly helps paravirt performance when physical CPUs are
oversubscribed.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/lib/qspinlock.c | 159 ++++++++++++++++++++++++++++++-----
1 file changed, 140 insertions(+), 19 deletions(-)
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 6c58c24af5a0..872d4628a44d 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -19,8 +19,17 @@ struct qnodes {
struct qnode nodes[MAX_NODES];
};
+/* Tuning parameters */
+static int steal_spins __read_mostly = (1<<5);
+static bool maybe_stealers __read_mostly = true;
+
static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes);
+static __always_inline int get_steal_spins(void)
+{
+ return steal_spins;
+}
+
static inline u32 encode_tail_cpu(int cpu)
{
return (cpu + 1) << _Q_TAIL_CPU_OFFSET;
@@ -50,15 +59,14 @@ static __always_inline void set_locked(struct qspinlock *lock)
BUG_ON(prev & _Q_LOCKED_VAL);
}
-/* Take lock, clearing tail, cmpxchg with old (which must not be locked) */
-static __always_inline int trylock_clear_tail_cpu(struct qspinlock *lock, u32 old)
+static __always_inline u32 __trylock_cmpxchg(struct qspinlock *lock, u32 old, u32 new)
{
u32 prev;
BUG_ON(old & _Q_LOCKED_VAL);
asm volatile(
-"1: lwarx %0,0,%1,%4 # trylock_clear_tail_cpu \n"
+"1: lwarx %0,0,%1,%4 # __trylock_cmpxchg \n"
" cmpw 0,%0,%2 \n"
" bne- 2f \n"
" stwcx. %3,0,%1 \n"
@@ -66,13 +74,27 @@ static __always_inline int trylock_clear_tail_cpu(struct qspinlock *lock, u32 ol
"\t" PPC_ACQUIRE_BARRIER " \n"
"2: \n"
: "=&r" (prev)
- : "r" (&lock->val), "r"(old), "r" (_Q_LOCKED_VAL),
+ : "r" (&lock->val), "r"(old), "r" (new),
"i" (IS_ENABLED(CONFIG_PPC64))
: "cr0", "memory");
return likely(prev == old);
}
+/* Take lock, clearing tail, cmpxchg with old (which must not be locked) */
+static __always_inline int trylock_clear_tail_cpu(struct qspinlock *lock, u32 val)
+{
+ return __trylock_cmpxchg(lock, val, _Q_LOCKED_VAL);
+}
+
+/* Take lock, preserving tail, cmpxchg with val (which must not be locked) */
+static __always_inline int trylock_with_tail_cpu(struct qspinlock *lock, u32 val)
+{
+ u32 newval = _Q_LOCKED_VAL | (val & _Q_TAIL_CPU_MASK);
+
+ return __trylock_cmpxchg(lock, val, newval);
+}
+
/*
* Publish our tail, replacing previous tail. Return previous value.
*
@@ -122,6 +144,30 @@ static struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
BUG();
}
+static inline bool try_to_steal_lock(struct qspinlock *lock)
+{
+ int iters = 0;
+
+ if (!maybe_stealers)
+ return false;
+
+ /* Attempt to steal the lock */
+ do {
+ u32 val = READ_ONCE(lock->val);
+
+ if (unlikely(!(val & _Q_LOCKED_VAL))) {
+ if (trylock_with_tail_cpu(lock, val))
+ return true;
+ } else {
+ cpu_relax();
+ }
+
+ iters++;
+ } while (iters < get_steal_spins());
+
+ return false;
+}
+
static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
{
struct qnodes *qnodesp;
@@ -171,25 +217,49 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
smp_rmb(); /* acquire barrier for the mcs lock */
}
- /* We're at the head of the waitqueue, wait for the lock. */
- for (;;) {
- val = READ_ONCE(lock->val);
- if (!(val & _Q_LOCKED_VAL))
- break;
+ if (!maybe_stealers) {
+ /* We're at the head of the waitqueue, wait for the lock. */
+ for (;;) {
+ val = READ_ONCE(lock->val);
+ if (!(val & _Q_LOCKED_VAL))
+ break;
- cpu_relax();
- }
+ cpu_relax();
+ }
+
+ /* If we're the last queued, must clean up the tail. */
+ if ((val & _Q_TAIL_CPU_MASK) == tail) {
+ if (trylock_clear_tail_cpu(lock, val))
+ goto release;
+ /* Another waiter must have enqueued. */
+ }
+
+ /* We must be the owner, just set the lock bit and acquire */
+ set_locked(lock);
+ } else {
+again:
+ /* We're at the head of the waitqueue, wait for the lock. */
+ for (;;) {
+ val = READ_ONCE(lock->val);
+ if (!(val & _Q_LOCKED_VAL))
+ break;
- /* If we're the last queued, must clean up the tail. */
- if ((val & _Q_TAIL_CPU_MASK) == tail) {
- if (trylock_clear_tail_cpu(lock, val))
- goto release;
- /* Another waiter must have enqueued */
+ cpu_relax();
+ }
+
+ /* If we're the last queued, must clean up the tail. */
+ if ((val & _Q_TAIL_CPU_MASK) == tail) {
+ if (trylock_clear_tail_cpu(lock, val))
+ goto release;
+ /* Another waiter must have enqueued, or lock stolen. */
+ } else {
+ if (trylock_with_tail_cpu(lock, val))
+ goto unlock_next;
+ }
+ goto again;
}
- /* We must be the owner, just set the lock bit and acquire */
- set_locked(lock);
-
+unlock_next:
/* contended path; must wait for next != NULL (MCS protocol) */
while (!(next = READ_ONCE(node->next)))
cpu_relax();
@@ -209,6 +279,9 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
void queued_spin_lock_slowpath(struct qspinlock *lock)
{
+ if (try_to_steal_lock(lock))
+ return;
+
queued_spin_lock_mcs_queue(lock);
}
EXPORT_SYMBOL(queued_spin_lock_slowpath);
@@ -218,3 +291,51 @@ void pv_spinlocks_init(void)
{
}
#endif
+
+#include <linux/debugfs.h>
+static int steal_spins_set(void *data, u64 val)
+{
+ static DEFINE_MUTEX(lock);
+
+ /*
+ * The lock slow path has a !maybe_stealers case that can assume
+ * the head of queue will not see concurrent waiters. That waiter
+ * is unsafe in the presence of stealers, so must keep them away
+ * from one another.
+ */
+
+ mutex_lock(&lock);
+ if (val && !steal_spins) {
+ maybe_stealers = true;
+ /* wait for queue head waiter to go away */
+ synchronize_rcu();
+ steal_spins = val;
+ } else if (!val && steal_spins) {
+ steal_spins = val;
+ /* wait for all possible stealers to go away */
+ synchronize_rcu();
+ maybe_stealers = false;
+ } else {
+ steal_spins = val;
+ }
+ mutex_unlock(&lock);
+
+ return 0;
+}
+
+static int steal_spins_get(void *data, u64 *val)
+{
+ *val = steal_spins;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_steal_spins, steal_spins_get, steal_spins_set, "%llu\n");
+
+static __init int spinlock_debugfs_init(void)
+{
+ debugfs_create_file("qspl_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_steal_spins);
+
+ return 0;
+}
+device_initcall(spinlock_debugfs_init);
--
2.37.2
next prev parent reply other threads:[~2022-11-14 2:38 UTC|newest]
Thread overview: 19+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-14 2:31 [PATCH v2 00/17] powerpc: alternate queued spinlock implementation Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 01a/17] powerpc/qspinlock: prepare powerpc qspinlock implementation Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 01/17] powerpc/qspinlock: " Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 02/17] powerpc/qspinlock: add mcs queueing for contended waiters Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 03/17] powerpc/qspinlock: use a half-word store to unlock to avoid larx/stcx Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 04/17] powerpc/qspinlock: convert atomic operations to assembly Nicholas Piggin
2022-11-14 2:31 ` Nicholas Piggin [this message]
2022-11-14 2:31 ` [PATCH v2 06/17] powerpc/qspinlock: theft prevention to control latency Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 07/17] powerpc/qspinlock: store owner CPU in lock word Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 08/17] powerpc/qspinlock: paravirt yield to lock owner Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 09/17] powerpc/qspinlock: implement option to yield to previous node Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 10/17] powerpc/qspinlock: allow stealing when head of queue yields Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 11/17] powerpc/qspinlock: allow propagation of yield CPU down the queue Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 12/17] powerpc/qspinlock: add ability to prod new queue head CPU Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 13/17] powerpc/qspinlock: trylock and initial lock attempt may steal Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 14/17] powerpc/qspinlock: use spin_begin/end API Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 15/17] powerpc/qspinlock: reduce remote node steal spins Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 16/17] powerpc/qspinlock: allow indefinite spinning on a preempted owner Nicholas Piggin
2022-11-14 2:31 ` [PATCH v2 17/17] powerpc/qspinlock: provide accounting and options for sleepy locks Nicholas Piggin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221114023137.2679627-7-npiggin@gmail.com \
--to=npiggin@gmail.com \
--cc=jniethe5@gmail.com \
--cc=laurent.dufour@fr.ibm.com \
--cc=linuxppc-dev@lists.ozlabs.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).