From: Nicholas Piggin <npiggin@gmail.com>
To: linuxppc-dev@lists.ozlabs.org
Cc: Jordan Niethe <jniethe5@gmail.com>,
Laurent Dufour <ldufour@linux.ibm.com>,
Nicholas Piggin <npiggin@gmail.com>
Subject: [PATCH v3 15/17] powerpc/qspinlock: allow indefinite spinning on a preempted owner
Date: Sat, 26 Nov 2022 19:59:30 +1000 [thread overview]
Message-ID: <20221126095932.1234527-16-npiggin@gmail.com> (raw)
In-Reply-To: <20221126095932.1234527-1-npiggin@gmail.com>
Provide an option that holds off queueing indefinitely while the lock
owner is preempted. This could reduce queueing latencies for very
overcommitted vcpu situations.
This is disabled by default.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
arch/powerpc/lib/qspinlock.c | 77 +++++++++++++++++++++++++++++-------
1 file changed, 62 insertions(+), 15 deletions(-)
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 7f6b41627351..1c9079489b50 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -35,6 +35,7 @@ static int head_spins __read_mostly = (1<<8);
static bool pv_yield_owner __read_mostly = true;
static bool pv_yield_allow_steal __read_mostly = false;
+static bool pv_spin_on_preempted_owner __read_mostly = false;
static bool pv_yield_prev __read_mostly = true;
static bool pv_yield_propagate_owner __read_mostly = true;
static bool pv_prod_head __read_mostly = false;
@@ -191,11 +192,12 @@ static struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
BUG();
}
-/* Called inside spin_begin() */
-static __always_inline void __yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt, bool mustq)
+/* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */
+static __always_inline bool __yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt, bool mustq)
{
int owner;
u32 yield_count;
+ bool preempted = false;
BUG_ON(!(val & _Q_LOCKED_VAL));
@@ -213,6 +215,8 @@ static __always_inline void __yield_to_locked_owner(struct qspinlock *lock, u32
spin_end();
+ preempted = true;
+
/*
* Read the lock word after sampling the yield count. On the other side
* there may a wmb because the yield count update is done by the
@@ -229,29 +233,32 @@ static __always_inline void __yield_to_locked_owner(struct qspinlock *lock, u32
if (mustq)
set_mustq(lock);
spin_begin();
+
/* Don't relax if we yielded. Maybe we should? */
- return;
+ return preempted;
}
spin_begin();
relax:
spin_cpu_relax();
+
+ return preempted;
}
-/* Called inside spin_begin() */
-static __always_inline void yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
+/* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */
+static __always_inline bool yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
{
- __yield_to_locked_owner(lock, val, paravirt, false);
+ return __yield_to_locked_owner(lock, val, paravirt, false);
}
-/* Called inside spin_begin() */
-static __always_inline void yield_head_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
+/* Called inside spin_begin(). Returns whether or not the vCPU was preempted. */
+static __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
{
bool mustq = false;
if ((val & _Q_MUST_Q_VAL) && pv_yield_allow_steal)
mustq = true;
- __yield_to_locked_owner(lock, val, paravirt, mustq);
+ return __yield_to_locked_owner(lock, val, paravirt, mustq);
}
static __always_inline void propagate_yield_cpu(struct qnode *node, u32 val, int *set_yield_cpu, bool paravirt)
@@ -361,13 +368,16 @@ static __always_inline bool try_to_steal_lock(struct qspinlock *lock, bool parav
int iters = 0;
u32 val;
- if (!steal_spins)
+ if (!steal_spins) {
+ /* XXX: should spin_on_preempted_owner do anything here? */
return false;
+ }
/* Attempt to steal the lock */
spin_begin();
-
do {
+ bool preempted = false;
+
val = READ_ONCE(lock->val);
if (val & _Q_MUST_Q_VAL)
break;
@@ -378,10 +388,23 @@ static __always_inline bool try_to_steal_lock(struct qspinlock *lock, bool parav
return true;
spin_begin();
} else {
- yield_to_locked_owner(lock, val, paravirt);
+ preempted = yield_to_locked_owner(lock, val, paravirt);
}
- iters++;
+ if (preempted) {
+ if (!pv_spin_on_preempted_owner)
+ iters++;
+ /*
+ * pv_spin_on_preempted_owner don't increase iters
+ * while the owner is preempted -- we won't interfere
+ * with it by definition. This could introduce some
+ * latency issue if we continually observe preempted
+ * owners, but hopefully that's a rare corner case of
+ * a badly oversubscribed system.
+ */
+ } else {
+ iters++;
+ }
} while (!steal_break(val, iters, paravirt));
spin_end();
@@ -453,15 +476,22 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
/* We're at the head of the waitqueue, wait for the lock. */
spin_begin();
for (;;) {
+ bool preempted;
+
val = READ_ONCE(lock->val);
if (!(val & _Q_LOCKED_VAL))
break;
propagate_yield_cpu(node, val, &set_yield_cpu, paravirt);
- yield_head_to_locked_owner(lock, val, paravirt);
+ preempted = yield_head_to_locked_owner(lock, val, paravirt);
if (!maybe_stealers)
continue;
- iters++;
+ if (preempted) {
+ if (!pv_spin_on_preempted_owner)
+ iters++;
+ } else {
+ iters++;
+ }
if (!mustq && iters >= get_head_spins(paravirt)) {
mustq = true;
@@ -644,6 +674,22 @@ static int pv_yield_allow_steal_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_allow_steal, pv_yield_allow_steal_get, pv_yield_allow_steal_set, "%llu\n");
+static int pv_spin_on_preempted_owner_set(void *data, u64 val)
+{
+ pv_spin_on_preempted_owner = !!val;
+
+ return 0;
+}
+
+static int pv_spin_on_preempted_owner_get(void *data, u64 *val)
+{
+ *val = pv_spin_on_preempted_owner;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_spin_on_preempted_owner, pv_spin_on_preempted_owner_get, pv_spin_on_preempted_owner_set, "%llu\n");
+
static int pv_yield_prev_set(void *data, u64 val)
{
pv_yield_prev = !!val;
@@ -700,6 +746,7 @@ static __init int spinlock_debugfs_init(void)
if (is_shared_processor()) {
debugfs_create_file("qspl_pv_yield_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_owner);
debugfs_create_file("qspl_pv_yield_allow_steal", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_allow_steal);
+ debugfs_create_file("qspl_pv_spin_on_preempted_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_spin_on_preempted_owner);
debugfs_create_file("qspl_pv_yield_prev", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_prev);
debugfs_create_file("qspl_pv_yield_propagate_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_propagate_owner);
debugfs_create_file("qspl_pv_prod_head", 0600, arch_debugfs_dir, NULL, &fops_pv_prod_head);
--
2.37.2
next prev parent reply other threads:[~2022-11-26 10:14 UTC|newest]
Thread overview: 21+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-26 9:59 [PATCH v3 00/17] powerpc: alternate queued spinlock implementation Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 01/17] powerpc/qspinlock: add mcs queueing for contended waiters Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 02/17] powerpc/qspinlock: use a half-word store to unlock to avoid larx/stcx Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 03/17] powerpc/qspinlock: convert atomic operations to assembly Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 04/17] powerpc/qspinlock: allow new waiters to steal the lock before queueing Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 05/17] powerpc/qspinlock: theft prevention to control latency Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 06/17] powerpc/qspinlock: store owner CPU in lock word Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 07/17] powerpc/qspinlock: paravirt yield to lock owner Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 08/17] powerpc/qspinlock: implement option to yield to previous node Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 09/17] powerpc/qspinlock: allow stealing when head of queue yields Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 10/17] powerpc/qspinlock: allow propagation of yield CPU down the queue Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 11/17] powerpc/qspinlock: add ability to prod new queue head CPU Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 12/17] powerpc/qspinlock: allow lock stealing in trylock and lock fastpath Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 13/17] powerpc/qspinlock: use spin_begin/end API Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 14/17] powerpc/qspinlock: reduce remote node steal spins Nicholas Piggin
2022-11-26 9:59 ` Nicholas Piggin [this message]
2022-11-26 9:59 ` [PATCH v3 16/17] powerpc/qspinlock: provide accounting and options for sleepy locks Nicholas Piggin
2022-11-26 9:59 ` [PATCH v3 17/17] powerpc/qspinlock: add compile-time tuning adjustments Nicholas Piggin
2022-11-28 3:11 ` [PATCH v3 real 01/17] powerpc/qspinlock: powerpc qspinlock implementation Nicholas Piggin
2022-12-08 12:39 ` (subset) [PATCH v3 00/17] powerpc: alternate queued spinlock implementation Michael Ellerman
2023-04-13 10:58 ` Shrikanth Hegde
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221126095932.1234527-16-npiggin@gmail.com \
--to=npiggin@gmail.com \
--cc=jniethe5@gmail.com \
--cc=ldufour@linux.ibm.com \
--cc=linuxppc-dev@lists.ozlabs.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).