From: Nicholas Piggin <npiggin@gmail.com>
To: linuxppc-dev@lists.ozlabs.org
Cc: Nicholas Piggin <npiggin@gmail.com>
Subject: [RFC PATCH 05/14] powerpc/qspinlock: allow new waiters to steal the lock before queueing
Date: Mon, 11 Jul 2022 13:04:44 +1000 [thread overview]
Message-ID: <20220711030453.150644-6-npiggin@gmail.com> (raw)
In-Reply-To: <20220711030453.150644-1-npiggin@gmail.com>
Allow new waiters a number of spins on the lock word before queueing,
which particularly helps paravirt performance when physical CPUs are
oversubscribed.
---
arch/powerpc/lib/qspinlock.c | 143 ++++++++++++++++++++++++++++++++---
1 file changed, 132 insertions(+), 11 deletions(-)
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 76dca922ba71..cb87991602ff 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -19,6 +19,10 @@ struct qnodes {
struct qnode nodes[MAX_NODES];
};
+/* Tuning parameters */
+static int STEAL_SPINS __read_mostly = (1<<5);
+static bool MAYBE_STEALERS __read_mostly = true;
+
static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes);
static inline u32 encode_tail_cpu(void)
@@ -76,6 +80,39 @@ static __always_inline int trylock_clear_tail_cpu(struct qspinlock *lock, u32 ol
return 0;
}
+static __always_inline u32 __trylock_cmpxchg(struct qspinlock *lock, u32 old, u32 new)
+{
+ u32 prev;
+
+ BUG_ON(old & _Q_LOCKED_VAL);
+
+ asm volatile(
+"1: lwarx %0,0,%1,%4 # queued_spin_trylock_cmpxchg \n"
+" cmpw 0,%0,%2 \n"
+" bne- 2f \n"
+" stwcx. %3,0,%1 \n"
+" bne- 1b \n"
+"\t" PPC_ACQUIRE_BARRIER " \n"
+"2: \n"
+ : "=&r" (prev)
+ : "r" (&lock->val), "r"(old), "r" (new),
+ "i" (IS_ENABLED(CONFIG_PPC64) ? 1 : 0)
+ : "cr0", "memory");
+
+ return prev;
+}
+
+/* Take lock, preserving tail, cmpxchg with val (which must not be locked) */
+static __always_inline int trylock_with_tail_cpu(struct qspinlock *lock, u32 val)
+{
+ u32 newval = _Q_LOCKED_VAL | (val & _Q_TAIL_CPU_MASK);
+
+ if (__trylock_cmpxchg(lock, val, newval) == val)
+ return 1;
+ else
+ return 0;
+}
+
/*
* Publish our tail, replacing previous tail. Return previous value.
*
@@ -115,6 +152,26 @@ static inline struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
BUG();
}
+static inline bool try_to_steal_lock(struct qspinlock *lock)
+{
+ int iters;
+
+ /* Attempt to steal the lock */
+ for (iters = 0; iters < STEAL_SPINS; iters++) {
+ u32 val = READ_ONCE(lock->val);
+
+ if (val & _Q_LOCKED_VAL) {
+ cpu_relax();
+ continue;
+ }
+
+ if (trylock_with_tail_cpu(lock, val))
+ return true;
+ }
+
+ return false;
+}
+
static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
{
struct qnodes *qnodesp;
@@ -164,20 +221,39 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
smp_rmb(); /* acquire barrier for the mcs lock */
}
- /* We're at the head of the waitqueue, wait for the lock. */
- while ((val = READ_ONCE(lock->val)) & _Q_LOCKED_VAL)
- cpu_relax();
+ if (!MAYBE_STEALERS) {
+ /* We're at the head of the waitqueue, wait for the lock. */
+ while ((val = READ_ONCE(lock->val)) & _Q_LOCKED_VAL)
+ cpu_relax();
- /* If we're the last queued, must clean up the tail. */
- if ((val & _Q_TAIL_CPU_MASK) == tail) {
- if (trylock_clear_tail_cpu(lock, val))
- goto release;
- /* Another waiter must have enqueued */
- }
+ /* If we're the last queued, must clean up the tail. */
+ if ((val & _Q_TAIL_CPU_MASK) == tail) {
+ if (trylock_clear_tail_cpu(lock, val))
+ goto release;
+ /* Another waiter must have enqueued. */
+ }
+
+ /* We must be the owner, just set the lock bit and acquire */
+ lock_set_locked(lock);
+ } else {
+again:
+ /* We're at the head of the waitqueue, wait for the lock. */
+ while ((val = READ_ONCE(lock->val)) & _Q_LOCKED_VAL)
+ cpu_relax();
- /* We must be the owner, just set the lock bit and acquire */
- lock_set_locked(lock);
+ /* If we're the last queued, must clean up the tail. */
+ if ((val & _Q_TAIL_CPU_MASK) == tail) {
+ if (trylock_clear_tail_cpu(lock, val))
+ goto release;
+ /* Another waiter must have enqueued, or lock stolen. */
+ } else {
+ if (trylock_with_tail_cpu(lock, val))
+ goto unlock_next;
+ }
+ goto again;
+ }
+unlock_next:
/* contended path; must wait for next != NULL (MCS protocol) */
while (!(next = READ_ONCE(node->next)))
cpu_relax();
@@ -197,6 +273,9 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
void queued_spin_lock_slowpath(struct qspinlock *lock)
{
+ if (try_to_steal_lock(lock))
+ return;
+
queued_spin_lock_mcs_queue(lock);
}
EXPORT_SYMBOL(queued_spin_lock_slowpath);
@@ -204,6 +283,48 @@ EXPORT_SYMBOL(queued_spin_lock_slowpath);
#ifdef CONFIG_PARAVIRT_SPINLOCKS
void pv_spinlocks_init(void)
{
+ STEAL_SPINS = (1<<15);
}
#endif
+#include <linux/debugfs.h>
+static int steal_spins_set(void *data, u64 val)
+{
+ static DEFINE_MUTEX(lock);
+
+ mutex_lock(&lock);
+ if (val && !STEAL_SPINS) {
+ MAYBE_STEALERS = true;
+ /* wait for waiter to go away */
+ synchronize_rcu();
+ STEAL_SPINS = val;
+ } else if (!val && STEAL_SPINS) {
+ STEAL_SPINS = val;
+ /* wait for all possible stealers to go away */
+ synchronize_rcu();
+ MAYBE_STEALERS = false;
+ } else {
+ STEAL_SPINS = val;
+ }
+ mutex_unlock(&lock);
+
+ return 0;
+}
+
+static int steal_spins_get(void *data, u64 *val)
+{
+ *val = STEAL_SPINS;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_steal_spins, steal_spins_get, steal_spins_set, "%llu\n");
+
+static __init int spinlock_debugfs_init(void)
+{
+ debugfs_create_file("qspl_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_steal_spins);
+
+ return 0;
+}
+device_initcall(spinlock_debugfs_init);
+
--
2.35.1
next prev parent reply other threads:[~2022-07-11 3:08 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-11 3:04 [RFC PATCH 00/14] add our own qspinlock implementation Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 01/14] powerpc/qspinlock: powerpc " Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 02/14] powerpc/qspinlock: add mcs queueing for contended waiters Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 03/14] powerpc/qspinlock: use a half-word store to unlock to avoid larx/stcx Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 04/14] powerpc/qspinlock: convert atomic operations to assembly Nicholas Piggin
2022-07-11 3:04 ` Nicholas Piggin [this message]
2022-07-11 3:04 ` [RFC PATCH 06/14] powerpc/qspinlock: theft prevention to control latency Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 07/14] powerpc/qspinlock: store owner CPU in lock word Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 08/14] powerpc/qspinlock: paravirt yield to lock owner Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 09/14] powerpc/qspinlock: implement option to yield to previous node Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 10/14] powerpc/qspinlock: allow stealing when head of queue yields Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 11/14] powerpc/qspinlock: allow propagation of yield CPU down the queue Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 12/14] powerpc/qspinlock: add ability to prod new queue head CPU Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 13/14] powerpc/qspinlock: trylock and initial lock attempt may steal Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 14/14] powerpc/qspinlock: use spin_begin/end API Nicholas Piggin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220711030453.150644-6-npiggin@gmail.com \
--to=npiggin@gmail.com \
--cc=linuxppc-dev@lists.ozlabs.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).