From: Andi Kleen <andi@firstfloor.org>
To: linux-kernel@vger.kernel.org
Cc: torvalds@linux-foundation.org, akpm@linux-foundation.org,
x86@kernel.org, Andi Kleen <ak@linux.intel.com>
Subject: [PATCH 08/29] locking, tsx: Add support for arch_read/write_unlock_irq/flags
Date: Fri, 22 Mar 2013 18:25:02 -0700 [thread overview]
Message-ID: <1364001923-10796-9-git-send-email-andi@firstfloor.org> (raw)
In-Reply-To: <1364001923-10796-1-git-send-email-andi@firstfloor.org>
From: Andi Kleen <ak@linux.intel.com>
The TSX RTM lock elision code needs to distinguish unlocks that reenable
interrupts from others. Add arch_read/write_unlock_irq/flags for rwlocks
similar to the ones for spinlocks. This is opt in by the architecture.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
---
include/linux/rwlock.h | 76 ++++++++++++++++++++++++++++++++++++++-
include/linux/rwlock_api_smp.h | 12 ++----
2 files changed, 78 insertions(+), 10 deletions(-)
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index bc2994e..82a7f61 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -14,6 +14,34 @@
* Released under the General Public License (GPL).
*/
+#if !defined(ARCH_HAS_RWLOCK_UNLOCK_IRQ) && defined(CONFIG_SMP)
+static inline void arch_read_unlock_irqrestore(arch_rwlock_t *lock,
+ unsigned long flags)
+{
+ arch_read_unlock(lock);
+ local_irq_restore(flags);
+}
+
+static inline void arch_read_unlock_irq(arch_rwlock_t *lock)
+{
+ arch_read_unlock(lock);
+ local_irq_enable();
+}
+
+static inline void arch_write_unlock_irqrestore(arch_rwlock_t *lock,
+ unsigned long flags)
+{
+ arch_write_unlock(lock);
+ local_irq_restore(flags);
+}
+
+static inline void arch_write_unlock_irq(arch_rwlock_t *lock)
+{
+ arch_write_unlock(lock);
+ local_irq_enable();
+}
+#endif
+
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key);
@@ -37,17 +65,61 @@ do { \
#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
+
+static inline void do_raw_write_unlock_irq(rwlock_t *lock) __releases(lock)
+{
+ do_raw_write_unlock(lock);
+ local_irq_enable();
+}
+
+static inline void do_raw_read_unlock_irq(rwlock_t *lock) __releases(lock)
+{
+ do_raw_read_unlock(lock);
+ local_irq_enable();
+}
+
+static inline void do_raw_write_unlock_irqrestore(rwlock_t *lock,
+ unsigned long flags)
+ __releases(lock)
+{
+ do_raw_write_unlock(lock);
+ local_irq_restore(flags);
+}
+
+static inline void do_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ __releases(lock)
+{
+ do_raw_read_unlock(lock);
+ local_irq_restore(flags);
+}
+
#else
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
-# define do_raw_read_lock_flags(lock, flags) \
- do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
+# define do_raw_read_lock_flags(lock, flags) \
+ do { __acquire(lock); \
+ arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
+# define do_raw_read_unlock_irqrestore(rwlock, flags) \
+ do { \
+ arch_read_unlock_irqrestore(&(rwlock)->raw_lock, flags); \
+ __release(lock); \
+ } while (0)
+# define do_raw_read_unlock_irq(rwlock) \
+ do {arch_read_unlock_irq(&(rwlock)->raw_lock); __release(lock); } while (0)
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_write_lock_flags(lock, flags) \
do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
+# define do_raw_write_unlock_irqrestore(rwlock, flags) \
+ do { \
+ arch_write_unlock_irqrestore(&(rwlock)->raw_lock, flags); \
+ __release(lock); \
+ } while (0)
+# define do_raw_write_unlock_irq(rwlock) \
+ do { arch_write_unlock_irq(&(rwlock)->raw_lock); \
+ __release(lock); } while (0)
#endif
#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 9c9f049..8ac4a73 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -233,16 +233,14 @@ static inline void
__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
- do_raw_read_unlock(lock);
- local_irq_restore(flags);
+ do_raw_read_unlock_irqrestore(lock, flags);
preempt_enable();
}
static inline void __raw_read_unlock_irq(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
- do_raw_read_unlock(lock);
- local_irq_enable();
+ do_raw_read_unlock_irq(lock);
preempt_enable();
}
@@ -258,16 +256,14 @@ static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
unsigned long flags)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
- do_raw_write_unlock(lock);
- local_irq_restore(flags);
+ do_raw_write_unlock_irqrestore(lock, flags);
preempt_enable();
}
static inline void __raw_write_unlock_irq(rwlock_t *lock)
{
rwlock_release(&lock->dep_map, 1, _RET_IP_);
- do_raw_write_unlock(lock);
- local_irq_enable();
+ do_raw_write_unlock_irq(lock);
preempt_enable();
}
--
1.7.7.6
next prev parent reply other threads:[~2013-03-23 1:28 UTC|newest]
Thread overview: 50+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-03-23 1:24 RFC: Kernel lock elision for TSX Andi Kleen
2013-03-23 1:24 ` [PATCH 01/29] tsx: Add generic noop macros for RTM intrinsics Andi Kleen
2013-03-25 3:39 ` Michael Neuling
2013-03-25 8:19 ` Andi Kleen
2013-03-25 8:50 ` Michael Neuling
2013-03-23 1:24 ` [PATCH 02/29] x86, tsx: Add " Andi Kleen
2013-03-25 3:40 ` Michael Neuling
2013-03-25 8:15 ` Andi Kleen
2013-03-25 8:54 ` Michael Neuling
2013-03-25 9:32 ` Andi Kleen
2013-03-23 1:24 ` [PATCH 03/29] tsx: Add generic disable_txn macros Andi Kleen
2013-03-23 1:24 ` [PATCH 04/29] tsx: Add generic linux/elide.h macros Andi Kleen
2013-03-23 1:24 ` [PATCH 05/29] x86, tsx: Add a minimal RTM tester at bootup Andi Kleen
2013-03-23 1:25 ` [PATCH 06/29] checkpatch: Don't warn about if ((status = _xbegin()) == _XBEGIN_STARTED) Andi Kleen
2013-03-25 3:39 ` Michael Neuling
2013-03-23 1:25 ` [PATCH 07/29] x86, tsx: Don't abort immediately in __read/write_lock_failed Andi Kleen
2013-03-23 1:25 ` Andi Kleen [this message]
2013-03-23 1:25 ` [PATCH 09/29] x86, xen: Support arch_spin_unlock_irq/flags Andi Kleen
2013-03-23 1:25 ` [PATCH 10/29] locking, tsx: Add support for arch_spin_unlock_irq/flags Andi Kleen
2013-03-23 1:25 ` [PATCH 11/29] x86, paravirt: Add support for arch_spin_unlock_flags/irq Andi Kleen
2013-03-23 1:25 ` [PATCH 12/29] x86, tsx: Add a per thread transaction disable count Andi Kleen
2013-03-23 11:51 ` Borislav Petkov
2013-03-23 13:51 ` Andi Kleen
2013-03-23 15:52 ` Borislav Petkov
2013-03-23 16:25 ` Borislav Petkov
2013-03-23 17:16 ` Linus Torvalds
2013-03-23 17:32 ` Borislav Petkov
2013-03-23 18:01 ` Andi Kleen
2013-03-23 1:25 ` [PATCH 13/29] params: Add a per cpu module param type Andi Kleen
2013-03-23 1:25 ` [PATCH 14/29] params: Add static key module param Andi Kleen
2013-03-23 1:25 ` [PATCH 15/29] x86, tsx: Add TSX lock elision infrastructure Andi Kleen
2013-03-23 1:25 ` [PATCH 16/29] locking, tsx: Allow architecture to control mutex fast path owner field Andi Kleen
2013-03-23 1:25 ` [PATCH 17/29] x86, tsx: Enable lock elision for mutexes Andi Kleen
2013-03-23 1:25 ` [PATCH 18/29] locking, tsx: Abort is mutex_is_locked() Andi Kleen
2013-03-23 1:25 ` [PATCH 19/29] x86, tsx: Add support for rwsem elision Andi Kleen
2013-03-23 1:25 ` [PATCH 20/29] x86, tsx: Enable elision for read write spinlocks Andi Kleen
2013-03-23 1:25 ` [PATCH 21/29] locking, tsx: Protect assert_spin_locked() with _xtest() Andi Kleen
2013-03-23 1:25 ` [PATCH 22/29] locking, tsx: Add a trace point for elision skipping Andi Kleen
2013-03-23 1:25 ` [PATCH 23/29] x86, tsx: Add generic per-lock adaptive lock elision support Andi Kleen
2013-03-23 1:25 ` [PATCH 24/29] x86, tsx: Use adaptive elision for mutexes Andi Kleen
2013-03-23 1:25 ` [PATCH 25/29] x86, tsx: Add adaption support for spinlocks Andi Kleen
2013-03-23 1:25 ` [PATCH 26/29] x86, tsx: Add adaptation support to rw spinlocks Andi Kleen
2013-03-23 1:25 ` [PATCH 27/29] locking, tsx: Add elision to bit spinlocks Andi Kleen
2013-03-23 1:25 ` [PATCH 28/29] x86, tsx: Add adaptive elision for rwsems Andi Kleen
2013-03-23 1:25 ` [PATCH 29/29] tsx: Add documentation for lock-elision Andi Kleen
2013-03-23 17:11 ` RFC: Kernel lock elision for TSX Linus Torvalds
2013-03-23 18:00 ` Andi Kleen
2013-03-23 18:02 ` Andi Kleen
2013-03-24 14:17 ` Benjamin Herrenschmidt
2013-03-25 0:59 ` Michael Neuling
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1364001923-10796-9-git-send-email-andi@firstfloor.org \
--to=andi@firstfloor.org \
--cc=ak@linux.intel.com \
--cc=akpm@linux-foundation.org \
--cc=linux-kernel@vger.kernel.org \
--cc=torvalds@linux-foundation.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox