From: Nicholas Piggin <npiggin@gmail.com>
To: Peter Zijlstra <peterz@infradead.org>
Cc: Nicholas Piggin <npiggin@gmail.com>,
Ingo Molnar <mingo@redhat.com>, Will Deacon <will@kernel.org>,
Waiman Long <longman@redhat.com>,
Boqun Feng <boqun.feng@gmail.com>,
linux-kernel@vger.kernel.org
Subject: [PATCH 05/13] locking/qspinlock: be less clever with the preprocessor
Date: Tue, 5 Jul 2022 00:38:12 +1000 [thread overview]
Message-ID: <20220704143820.3071004-6-npiggin@gmail.com> (raw)
In-Reply-To: <20220704143820.3071004-1-npiggin@gmail.com>
Stop qspinlock.c including itself and avoid most of the function
renaming with the preprocessor.
This is mostly done by having the common slowpath code take a 'bool
paravirt' argument and adjusting code based on that.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
kernel/locking/qspinlock.c | 116 ++++++++++++----------------
kernel/locking/qspinlock_paravirt.h | 10 +--
2 files changed, 52 insertions(+), 74 deletions(-)
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 8f2173e22479..b96c58ca51de 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -11,8 +11,6 @@
* Peter Zijlstra <peterz@infradead.org>
*/
-#ifndef _GEN_PV_LOCK_SLOWPATH
-
#include <linux/smp.h>
#include <linux/bug.h>
#include <linux/cpumask.h>
@@ -285,35 +283,21 @@ static __always_inline void set_locked(struct qspinlock *lock)
WRITE_ONCE(lock->locked, _Q_LOCKED_VAL);
}
-
-/*
- * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
- * all the PV callbacks.
- */
-
-static __always_inline void __pv_init_node(struct qnode *node) { }
-static __always_inline void __pv_wait_node(struct qnode *node,
- struct qnode *prev) { }
-static __always_inline void __pv_kick_node(struct qspinlock *lock,
- struct qnode *node) { }
-static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
- struct qnode *node)
- { return 0; }
-
-#define pv_enabled() false
-
-#define pv_init_node __pv_init_node
-#define pv_wait_node __pv_wait_node
-#define pv_kick_node __pv_kick_node
-#define pv_wait_head_or_lock __pv_wait_head_or_lock
-
#ifdef CONFIG_PARAVIRT_SPINLOCKS
-#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
-#endif
-
-#endif /* _GEN_PV_LOCK_SLOWPATH */
+#include "qspinlock_paravirt.h"
+#else /* CONFIG_PARAVIRT_SPINLOCKS */
+static __always_inline void pv_init_node(struct qnode *node) { }
+static __always_inline void pv_wait_node(struct qnode *node,
+ struct qnode *prev) { }
+static __always_inline void pv_kick_node(struct qspinlock *lock,
+ struct qnode *node) { }
+static __always_inline u32 pv_wait_head_or_lock(struct qspinlock *lock,
+ struct qnode *node)
+ { return 0; }
+static __always_inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) { BUILD_BUG(); }
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
-static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
+static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, bool paravirt)
{
struct qnode *prev, *next, *node;
u32 val, old, tail;
@@ -338,8 +322,13 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
*/
if (unlikely(idx >= MAX_NODES)) {
lockevent_inc(lock_no_node);
- while (!queued_spin_trylock(lock))
- cpu_relax();
+ if (paravirt) {
+ while (!pv_hybrid_queued_unfair_trylock(lock))
+ cpu_relax();
+ } else {
+ while (!queued_spin_trylock(lock))
+ cpu_relax();
+ }
goto release;
}
@@ -359,15 +348,21 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
node->locked = 0;
node->next = NULL;
- pv_init_node(node);
+ if (paravirt)
+ pv_init_node(node);
/*
* We touched a (possibly) cold cacheline in the per-cpu queue node;
* attempt the trylock once more in the hope someone let go while we
* weren't watching.
*/
- if (queued_spin_trylock(lock))
- goto release;
+ if (paravirt) {
+ if (pv_hybrid_queued_unfair_trylock(lock))
+ goto release;
+ } else {
+ if (queued_spin_trylock(lock))
+ goto release;
+ }
/*
* Ensure that the initialisation of @node is complete before we
@@ -396,7 +391,8 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
/* Link @node into the waitqueue. */
WRITE_ONCE(prev->next, node);
- pv_wait_node(node, prev);
+ if (paravirt)
+ pv_wait_node(node, prev);
/* Wait for mcs node lock to be released */
smp_cond_load_acquire(&node->locked, VAL);
@@ -432,8 +428,10 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
* If PV isn't active, 0 will be returned instead.
*
*/
- if ((val = pv_wait_head_or_lock(lock, node)))
- goto locked;
+ if (paravirt) {
+ if ((val = pv_wait_head_or_lock(lock, node)))
+ goto locked;
+ }
val = atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_PENDING_MASK));
@@ -478,7 +476,8 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
next = smp_cond_load_relaxed(&node->next, (VAL));
smp_store_release(&next->locked, 1); /* unlock the mcs node lock */
- pv_kick_node(lock, next);
+ if (paravirt)
+ pv_kick_node(lock, next);
release:
trace_contention_end(lock, 0);
@@ -510,13 +509,12 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
* contended : (*,x,y) +--> (*,0,0) ---> (*,0,1) -' :
* queue : ^--' :
*/
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
+#endif
+
void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{
- if (pv_enabled()) {
- queued_spin_lock_mcs_queue(lock);
- return;
- }
-
if (virt_spin_lock(lock))
return;
@@ -590,31 +588,17 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
*/
queue:
lockevent_inc(lock_slowpath);
- queued_spin_lock_mcs_queue(lock);
+ queued_spin_lock_mcs_queue(lock, false);
}
EXPORT_SYMBOL(queued_spin_lock_slowpath);
-/*
- * Generate the paravirt code for queued_spin_unlock_slowpath().
- */
-#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
-#define _GEN_PV_LOCK_SLOWPATH
-
-#undef pv_enabled
-#define pv_enabled() true
-
-#undef pv_init_node
-#undef pv_wait_node
-#undef pv_kick_node
-#undef pv_wait_head_or_lock
-
-#define queued_spin_lock_mcs_queue __pv_queued_spin_lock_mcs_queue
-
-#undef queued_spin_lock_slowpath
-#define queued_spin_lock_slowpath __pv_queued_spin_lock_slowpath
-
-#include "qspinlock_paravirt.h"
-#include "qspinlock.c"
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#undef queued_spin_lock_slowpath
+void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+ queued_spin_lock_mcs_queue(lock, true);
+}
+EXPORT_SYMBOL(__pv_queued_spin_lock_slowpath);
bool nopvspin __initdata;
static __init int parse_nopvspin(char *arg)
@@ -623,4 +607,4 @@ static __init int parse_nopvspin(char *arg)
return 0;
}
early_param("nopvspin", parse_nopvspin);
-#endif
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 97385861adc2..f1922e3a0f7d 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -1,8 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _GEN_PV_LOCK_SLOWPATH
-#error "do not include this file"
-#endif
-
#include <linux/hash.h>
#include <linux/memblock.h>
#include <linux/debug_locks.h>
@@ -50,9 +46,8 @@ enum vcpu_state {
/*
* Hybrid PV queued/unfair lock
*
- * By replacing the regular queued_spin_trylock() with the function below,
- * it will be called once when a lock waiter enter the PV slowpath before
- * being queued.
+ * This function is called once when a lock waiter enters the PV slowpath
+ * before being queued.
*
* The pending bit is set by the queue head vCPU of the MCS wait queue in
* pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
@@ -71,7 +66,6 @@ enum vcpu_state {
* queued lock (no lock starvation) and an unfair lock (good performance
* on not heavily contended locks).
*/
-#define queued_spin_trylock(l) pv_hybrid_queued_unfair_trylock(l)
static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
{
/*
--
2.35.1
next prev parent reply other threads:[~2022-07-04 14:39 UTC|newest]
Thread overview: 38+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-07-04 14:38 [PATCH 00/13] locking/qspinlock: simplify code generation Nicholas Piggin
2022-07-04 14:38 ` [PATCH 01/13] locking/qspinlock: remove pv_node abstraction Nicholas Piggin
2022-07-06 23:23 ` Boqun Feng
2022-07-04 14:38 ` [PATCH 02/13] locking/qspinlock: inline mcs_spinlock functions into qspinlock Nicholas Piggin
2022-07-05 16:57 ` Peter Zijlstra
2022-07-12 0:06 ` Nicholas Piggin
2022-07-04 14:38 ` [PATCH 03/13] locking/qspinlock: split common mcs queueing code into its own function Nicholas Piggin
2022-07-05 17:01 ` Peter Zijlstra
2022-07-12 0:10 ` Nicholas Piggin
2022-07-04 14:38 ` [PATCH 04/13] locking/qspinlock: move pv lock word helpers into qspinlock.c Nicholas Piggin
2022-07-05 19:34 ` Waiman Long
2022-07-12 0:11 ` Nicholas Piggin
2022-07-04 14:38 ` Nicholas Piggin [this message]
2022-07-05 17:08 ` [PATCH 05/13] locking/qspinlock: be less clever with the preprocessor Peter Zijlstra
2022-07-12 0:29 ` Nicholas Piggin
2022-07-05 20:02 ` Waiman Long
2022-07-12 0:33 ` Nicholas Piggin
2022-07-04 14:38 ` [PATCH 06/13] locking/qspinlock: merge qspinlock_paravirt.h into qspinlock.c Nicholas Piggin
2022-07-05 17:20 ` Peter Zijlstra
2022-07-05 17:36 ` Peter Zijlstra
2022-07-12 0:46 ` Nicholas Piggin
2022-07-06 13:35 ` Waiman Long
2022-07-06 14:16 ` Peter Zijlstra
2022-07-04 14:38 ` [PATCH 07/13] locking/qspinlock: remove arch qspinlock_paravirt.h includes Nicholas Piggin
2022-07-04 14:38 ` [PATCH 08/13] locking/qspinlock: stop renaming queued_spin_lock_slowpath to native_queued_spin_lock_slowpath Nicholas Piggin
2022-07-05 17:28 ` Peter Zijlstra
2022-07-04 14:38 ` [PATCH 09/13] locking/qspinlock: rename __pv_init_lock_hash to pv_spinlocks_init Nicholas Piggin
2022-07-04 14:38 ` [PATCH 10/13] locking/qspinlock: paravirt use simple trylock in case idx overflows Nicholas Piggin
2022-07-04 14:38 ` [PATCH 11/13] locking/qspinlock: Use queued_spin_trylock in pv_hybrid_queued_unfair_trylock Nicholas Piggin
2022-07-05 17:31 ` Peter Zijlstra
2022-07-05 20:15 ` Waiman Long
2022-07-12 0:48 ` Nicholas Piggin
2022-07-04 14:38 ` [PATCH 12/13] locking/qspinlock: separate pv_wait_node from the non-paravirt path Nicholas Piggin
2022-07-05 17:34 ` Peter Zijlstra
2022-07-12 0:50 ` Nicholas Piggin
2022-07-04 14:38 ` [PATCH 13/13] locking/qspinlock: simplify pv_wait_head_or_lock calling scheme Nicholas Piggin
2022-07-05 17:59 ` [PATCH 00/13] locking/qspinlock: simplify code generation Peter Zijlstra
2022-07-12 0:56 ` Nicholas Piggin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20220704143820.3071004-6-npiggin@gmail.com \
--to=npiggin@gmail.com \
--cc=boqun.feng@gmail.com \
--cc=linux-kernel@vger.kernel.org \
--cc=longman@redhat.com \
--cc=mingo@redhat.com \
--cc=peterz@infradead.org \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).