From: Nicholas Piggin <npiggin@gmail.com>
To: linuxppc-dev@lists.ozlabs.org
Cc: Nicholas Piggin <npiggin@gmail.com>
Subject: [RFC PATCH 10/14] powerpc/qspinlock: allow stealing when head of queue yields
Date: Mon, 11 Jul 2022 13:04:49 +1000 [thread overview]
Message-ID: <20220711030453.150644-11-npiggin@gmail.com> (raw)
In-Reply-To: <20220711030453.150644-1-npiggin@gmail.com>
If the head of queue is preventing stealing but it finds the owner vCPU
is preempted, it will yield its cycles to the owner which could cause it
to become preempted. Add an option to re-allow stealers before yielding,
and disallow them again after returning from the yield.
Disable this option by default for now, i.e., no logical change.
---
arch/powerpc/lib/qspinlock.c | 49 +++++++++++++++++++++++++++++++++---
1 file changed, 45 insertions(+), 4 deletions(-)
diff --git a/arch/powerpc/lib/qspinlock.c b/arch/powerpc/lib/qspinlock.c
index 1a58ed51c060..4f1dc3322485 100644
--- a/arch/powerpc/lib/qspinlock.c
+++ b/arch/powerpc/lib/qspinlock.c
@@ -26,6 +26,7 @@ static bool MAYBE_STEALERS __read_mostly = true;
static int HEAD_SPINS __read_mostly = (1<<13);
static bool pv_yield_owner __read_mostly = true;
+static bool pv_yield_allow_steal __read_mostly = false;
static bool pv_yield_prev __read_mostly = false;
static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes);
@@ -163,6 +164,24 @@ static __always_inline u32 lock_set_mustq(struct qspinlock *lock)
return prev;
}
+static __always_inline u32 lock_clear_mustq(struct qspinlock *lock)
+{
+ u32 new = _Q_MUST_Q_VAL;
+ u32 prev;
+
+ asm volatile(
+"1: lwarx %0,0,%1 # queued_spin_set_mustq \n"
+" andc %0,%0,%2 \n"
+" stwcx. %0,0,%1 \n"
+" bne- 1b \n"
+ : "=&r" (prev)
+ : "r" (&lock->val), "r" (new)
+ : "cr0", "memory");
+
+ return prev;
+}
+
+
static inline struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
{
int cpu = get_tail_cpu(val);
@@ -178,7 +197,7 @@ static inline struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
BUG();
}
-static void yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt)
+static void yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt, bool clear_mustq)
{
int owner;
u32 yield_count;
@@ -207,7 +226,11 @@ static void yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt
smp_rmb();
if (READ_ONCE(lock->val) == val) {
+ if (clear_mustq)
+ lock_clear_mustq(lock);
yield_to_preempted(owner, yield_count);
+ if (clear_mustq)
+ lock_set_mustq(lock);
/* Don't relax if we yielded. Maybe we should? */
return;
}
@@ -253,7 +276,7 @@ static __always_inline bool try_to_steal_lock(struct qspinlock *lock, bool parav
break;
if (val & _Q_LOCKED_VAL) {
- yield_to_locked_owner(lock, val, paravirt);
+ yield_to_locked_owner(lock, val, paravirt, false);
continue;
}
@@ -317,7 +340,7 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
if (!MAYBE_STEALERS) {
/* We're at the head of the waitqueue, wait for the lock. */
while ((val = READ_ONCE(lock->val)) & _Q_LOCKED_VAL)
- yield_to_locked_owner(lock, val, paravirt);
+ yield_to_locked_owner(lock, val, paravirt, false);
/* If we're the last queued, must clean up the tail. */
if ((val & _Q_TAIL_CPU_MASK) == tail) {
@@ -337,7 +360,8 @@ static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, b
lock_set_mustq(lock);
val |= _Q_MUST_Q_VAL;
}
- yield_to_locked_owner(lock, val, paravirt);
+ yield_to_locked_owner(lock, val, paravirt,
+ pv_yield_allow_steal && (iters > HEAD_SPINS));
}
/* If we're the last queued, must clean up the tail. */
@@ -457,6 +481,22 @@ static int pv_yield_owner_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_owner, pv_yield_owner_get, pv_yield_owner_set, "%llu\n");
+static int pv_yield_allow_steal_set(void *data, u64 val)
+{
+ pv_yield_allow_steal = !!val;
+
+ return 0;
+}
+
+static int pv_yield_allow_steal_get(void *data, u64 *val)
+{
+ *val = pv_yield_allow_steal;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pv_yield_allow_steal, pv_yield_allow_steal_get, pv_yield_allow_steal_set, "%llu\n");
+
static int pv_yield_prev_set(void *data, u64 val)
{
pv_yield_prev = !!val;
@@ -479,6 +519,7 @@ static __init int spinlock_debugfs_init(void)
debugfs_create_file("qspl_head_spins", 0600, arch_debugfs_dir, NULL, &fops_head_spins);
if (is_shared_processor()) {
debugfs_create_file("qspl_pv_yield_owner", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_owner);
+ debugfs_create_file("qspl_pv_yield_allow_steal", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_allow_steal);
debugfs_create_file("qspl_pv_yield_prev", 0600, arch_debugfs_dir, NULL, &fops_pv_yield_prev);
}
--
2.35.1
next prev parent reply other threads:[~2022-07-11 3:11 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-11 3:04 [RFC PATCH 00/14] add our own qspinlock implementation Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 01/14] powerpc/qspinlock: powerpc " Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 02/14] powerpc/qspinlock: add mcs queueing for contended waiters Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 03/14] powerpc/qspinlock: use a half-word store to unlock to avoid larx/stcx Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 04/14] powerpc/qspinlock: convert atomic operations to assembly Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 05/14] powerpc/qspinlock: allow new waiters to steal the lock before queueing Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 06/14] powerpc/qspinlock: theft prevention to control latency Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 07/14] powerpc/qspinlock: store owner CPU in lock word Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 08/14] powerpc/qspinlock: paravirt yield to lock owner Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 09/14] powerpc/qspinlock: implement option to yield to previous node Nicholas Piggin
2022-07-11 3:04 ` Nicholas Piggin [this message]
2022-07-11 3:04 ` [RFC PATCH 11/14] powerpc/qspinlock: allow propagation of yield CPU down the queue Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 12/14] powerpc/qspinlock: add ability to prod new queue head CPU Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 13/14] powerpc/qspinlock: trylock and initial lock attempt may steal Nicholas Piggin
2022-07-11 3:04 ` [RFC PATCH 14/14] powerpc/qspinlock: use spin_begin/end API Nicholas Piggin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220711030453.150644-11-npiggin@gmail.com \
--to=npiggin@gmail.com \
--cc=linuxppc-dev@lists.ozlabs.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).