linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Nicholas Piggin <npiggin@gmail.com>
To: Peter Zijlstra <peterz@infradead.org>
Cc: Nicholas Piggin <npiggin@gmail.com>,
	Ingo Molnar <mingo@redhat.com>, Will Deacon <will@kernel.org>,
	Waiman Long <longman@redhat.com>,
	Boqun Feng <boqun.feng@gmail.com>,
	linux-kernel@vger.kernel.org
Subject: [PATCH 04/13] locking/qspinlock: move pv lock word helpers into qspinlock.c
Date: Tue,  5 Jul 2022 00:38:11 +1000	[thread overview]
Message-ID: <20220704143820.3071004-5-npiggin@gmail.com> (raw)
In-Reply-To: <20220704143820.3071004-1-npiggin@gmail.com>

There is no real reason not to keep all the bit manipulation together.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 kernel/locking/qspinlock.c          | 107 ++++++++++++++++------------
 kernel/locking/qspinlock_paravirt.h |  51 -------------
 2 files changed, 63 insertions(+), 95 deletions(-)

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 7360d643de29..8f2173e22479 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -141,7 +141,24 @@ struct qnode *grab_qnode(struct qnode *base, int idx)
 
 #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
 
+/**
+ * set_pending - set the pending bit.
+ * @lock: Pointer to queued spinlock structure
+ *
+ * *,0,* -> *,1,*
+ *
+ * The pending bit is used by the queue head vCPU to indicate that it
+ * is actively spinning on the lock and no lock stealing is allowed.
+ */
+static __always_inline void set_pending(struct qspinlock *lock)
+{
 #if _Q_PENDING_BITS == 8
+	WRITE_ONCE(lock->pending, 1);
+#else
+	atomic_or(_Q_PENDING_VAL, &lock->val);
+#endif
+}
+
 /**
  * clear_pending - clear the pending bit.
  * @lock: Pointer to queued spinlock structure
@@ -150,7 +167,11 @@ struct qnode *grab_qnode(struct qnode *base, int idx)
  */
 static __always_inline void clear_pending(struct qspinlock *lock)
 {
+#if _Q_PENDING_BITS == 8
 	WRITE_ONCE(lock->pending, 0);
+#else
+	atomic_andnot(_Q_PENDING_VAL, &lock->val);
+#endif
 }
 
 /**
@@ -163,74 +184,72 @@ static __always_inline void clear_pending(struct qspinlock *lock)
  */
 static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
 {
+#if _Q_PENDING_BITS == 8
 	WRITE_ONCE(lock->locked_pending, _Q_LOCKED_VAL);
+#else
+	atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
+#endif
 }
 
-/*
- * xchg_tail - Put in the new queue tail code word & retrieve previous one
- * @lock : Pointer to queued spinlock structure
- * @tail : The new queue tail code word
- * Return: The previous queue tail code word
- *
- * xchg(lock, tail), which heads an address dependency
- *
- * p,*,* -> n,*,* ; prev = xchg(lock, node)
- */
-static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
-{
-	/*
-	 * We can use relaxed semantics since the caller ensures that the
-	 * MCS node is properly initialized before updating the tail.
-	 */
-	return (u32)xchg_relaxed(&lock->tail,
-				 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
-}
-
-#else /* _Q_PENDING_BITS == 8 */
-
 /**
- * clear_pending - clear the pending bit.
+ * trylock_clear_pending - try to take ownership and clear the pending bit
  * @lock: Pointer to queued spinlock structure
  *
- * *,1,* -> *,0,*
+ * 0,1,0 -> 0,0,1
  */
-static __always_inline void clear_pending(struct qspinlock *lock)
+static __always_inline int trylock_clear_pending(struct qspinlock *lock)
 {
-	atomic_andnot(_Q_PENDING_VAL, &lock->val);
-}
+#if _Q_PENDING_BITS == 8
+	return !READ_ONCE(lock->locked) &&
+	       (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
+				_Q_LOCKED_VAL) == _Q_PENDING_VAL);
+#else
+	int val = atomic_read(&lock->val);
 
-/**
- * clear_pending_set_locked - take ownership and clear the pending bit.
- * @lock: Pointer to queued spinlock structure
- *
- * *,1,0 -> *,0,1
- */
-static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
-{
-	atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
+	for (;;) {
+		int old, new;
+
+		if (val  & _Q_LOCKED_MASK)
+			break;
+
+		/*
+		 * Try to clear pending bit & set locked bit
+		 */
+		old = val;
+		new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
+		val = atomic_cmpxchg_acquire(&lock->val, old, new);
+
+		if (val == old)
+			return 1;
+	}
+	return 0;
+#endif
 }
 
-/**
+/*
  * xchg_tail - Put in the new queue tail code word & retrieve previous one
  * @lock : Pointer to queued spinlock structure
  * @tail : The new queue tail code word
  * Return: The previous queue tail code word
  *
- * xchg(lock, tail)
+ * xchg(lock, tail), which heads an address dependency
  *
  * p,*,* -> n,*,* ; prev = xchg(lock, node)
  */
 static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
 {
+	/*
+	 * We can use relaxed semantics since the caller ensures that the
+	 * MCS node is properly initialized before updating the tail.
+	 */
+#if _Q_PENDING_BITS == 8
+	return (u32)xchg_relaxed(&lock->tail,
+				 tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
+#else
 	u32 old, new, val = atomic_read(&lock->val);
 
 	for (;;) {
 		new = (val & _Q_LOCKED_PENDING_MASK) | tail;
-		/*
-		 * We can use relaxed semantics since the caller ensures that
-		 * the MCS node is properly initialized before updating the
-		 * tail.
-		 */
 		old = atomic_cmpxchg_relaxed(&lock->val, val, new);
 		if (old == val)
 			break;
@@ -238,8 +257,8 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
 		val = old;
 	}
 	return old;
+#endif
 }
-#endif /* _Q_PENDING_BITS == 8 */
 
 /**
  * queued_fetch_set_pending_acquire - fetch the whole lock value and set pending
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index cce3d3dde216..97385861adc2 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -95,57 +95,6 @@ static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock)
 	return false;
 }
 
-/*
- * The pending bit is used by the queue head vCPU to indicate that it
- * is actively spinning on the lock and no lock stealing is allowed.
- */
-#if _Q_PENDING_BITS == 8
-static __always_inline void set_pending(struct qspinlock *lock)
-{
-	WRITE_ONCE(lock->pending, 1);
-}
-
-/*
- * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
- * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the
- * lock just to be sure that it will get it.
- */
-static __always_inline int trylock_clear_pending(struct qspinlock *lock)
-{
-	return !READ_ONCE(lock->locked) &&
-	       (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL,
-				_Q_LOCKED_VAL) == _Q_PENDING_VAL);
-}
-#else /* _Q_PENDING_BITS == 8 */
-static __always_inline void set_pending(struct qspinlock *lock)
-{
-	atomic_or(_Q_PENDING_VAL, &lock->val);
-}
-
-static __always_inline int trylock_clear_pending(struct qspinlock *lock)
-{
-	int val = atomic_read(&lock->val);
-
-	for (;;) {
-		int old, new;
-
-		if (val  & _Q_LOCKED_MASK)
-			break;
-
-		/*
-		 * Try to clear pending bit & set locked bit
-		 */
-		old = val;
-		new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
-		val = atomic_cmpxchg_acquire(&lock->val, old, new);
-
-		if (val == old)
-			return 1;
-	}
-	return 0;
-}
-#endif /* _Q_PENDING_BITS == 8 */
-
 /*
  * Lock and MCS node addresses hash table for fast lookup
  *
-- 
2.35.1


  parent reply	other threads:[~2022-07-04 14:39 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-07-04 14:38 [PATCH 00/13] locking/qspinlock: simplify code generation Nicholas Piggin
2022-07-04 14:38 ` [PATCH 01/13] locking/qspinlock: remove pv_node abstraction Nicholas Piggin
2022-07-06 23:23   ` Boqun Feng
2022-07-04 14:38 ` [PATCH 02/13] locking/qspinlock: inline mcs_spinlock functions into qspinlock Nicholas Piggin
2022-07-05 16:57   ` Peter Zijlstra
2022-07-12  0:06     ` Nicholas Piggin
2022-07-04 14:38 ` [PATCH 03/13] locking/qspinlock: split common mcs queueing code into its own function Nicholas Piggin
2022-07-05 17:01   ` Peter Zijlstra
2022-07-12  0:10     ` Nicholas Piggin
2022-07-04 14:38 ` Nicholas Piggin [this message]
2022-07-05 19:34   ` [PATCH 04/13] locking/qspinlock: move pv lock word helpers into qspinlock.c Waiman Long
2022-07-12  0:11     ` Nicholas Piggin
2022-07-04 14:38 ` [PATCH 05/13] locking/qspinlock: be less clever with the preprocessor Nicholas Piggin
2022-07-05 17:08   ` Peter Zijlstra
2022-07-12  0:29     ` Nicholas Piggin
2022-07-05 20:02   ` Waiman Long
2022-07-12  0:33     ` Nicholas Piggin
2022-07-04 14:38 ` [PATCH 06/13] locking/qspinlock: merge qspinlock_paravirt.h into qspinlock.c Nicholas Piggin
2022-07-05 17:20   ` Peter Zijlstra
2022-07-05 17:36     ` Peter Zijlstra
2022-07-12  0:46       ` Nicholas Piggin
2022-07-06 13:35     ` Waiman Long
2022-07-06 14:16       ` Peter Zijlstra
2022-07-04 14:38 ` [PATCH 07/13] locking/qspinlock: remove arch qspinlock_paravirt.h includes Nicholas Piggin
2022-07-04 14:38 ` [PATCH 08/13] locking/qspinlock: stop renaming queued_spin_lock_slowpath to native_queued_spin_lock_slowpath Nicholas Piggin
2022-07-05 17:28   ` Peter Zijlstra
2022-07-04 14:38 ` [PATCH 09/13] locking/qspinlock: rename __pv_init_lock_hash to pv_spinlocks_init Nicholas Piggin
2022-07-04 14:38 ` [PATCH 10/13] locking/qspinlock: paravirt use simple trylock in case idx overflows Nicholas Piggin
2022-07-04 14:38 ` [PATCH 11/13] locking/qspinlock: Use queued_spin_trylock in pv_hybrid_queued_unfair_trylock Nicholas Piggin
2022-07-05 17:31   ` Peter Zijlstra
2022-07-05 20:15   ` Waiman Long
2022-07-12  0:48     ` Nicholas Piggin
2022-07-04 14:38 ` [PATCH 12/13] locking/qspinlock: separate pv_wait_node from the non-paravirt path Nicholas Piggin
2022-07-05 17:34   ` Peter Zijlstra
2022-07-12  0:50     ` Nicholas Piggin
2022-07-04 14:38 ` [PATCH 13/13] locking/qspinlock: simplify pv_wait_head_or_lock calling scheme Nicholas Piggin
2022-07-05 17:59 ` [PATCH 00/13] locking/qspinlock: simplify code generation Peter Zijlstra
2022-07-12  0:56   ` Nicholas Piggin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220704143820.3071004-5-npiggin@gmail.com \
    --to=npiggin@gmail.com \
    --cc=boqun.feng@gmail.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=longman@redhat.com \
    --cc=mingo@redhat.com \
    --cc=peterz@infradead.org \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).