From: Oleg Nesterov <oleg@redhat.com>
To: Paul McKenney <paulmck@linux.vnet.ibm.com>,
Peter Zijlstra <peterz@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>, Rik van Riel <riel@redhat.com>,
Srikar Dronamraju <srikar@linux.vnet.ibm.com>,
Ingo Molnar <mingo@kernel.org>,
Andrea Arcangeli <aarcange@redhat.com>,
Johannes Weiner <hannes@cmpxchg.org>,
Thomas Gleixner <tglx@linutronix.de>,
Steven Rostedt <rostedt@goodmis.org>,
Linus Torvalds <torvalds@linux-foundation.org>,
linux-kernel@vger.kernel.org
Subject: [PATCH 4/5] rcusync: introduce rcu_sync_struct->exclusive mode
Date: Fri, 4 Oct 2013 20:46:40 +0200 [thread overview]
Message-ID: <20131004184640.GA17567@redhat.com> (raw)
In-Reply-To: <20131004184614.GA17536@redhat.com>
Add rcu_sync_struct->exclusive boolean set by rcu_sync_init(),
it obviously controls the exclusiveness of rcu_sync_enter().
This is what percpu_down_write() actually wants.
We turn ->gp_wait into "struct completion gp_comp", it is used
as a resource counter in "exclusive" mode. Otherwise we only use
its completion->wait member for wait_event/wake_up_all. We never
mix the completion/wait_queue_head_t operations.
Note: it would be more clean to do __complete_locked() under
->rss_lock in rcu_sync_exit() in the "else" branch, but we don't
have this trivial helper.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
---
include/linux/rcusync.h | 29 ++++++++++++++++-------------
kernel/cpu.c | 2 +-
kernel/rcusync.c | 22 +++++++++++++++++-----
3 files changed, 34 insertions(+), 19 deletions(-)
diff --git a/include/linux/rcusync.h b/include/linux/rcusync.h
index 33864a0..5689f24 100644
--- a/include/linux/rcusync.h
+++ b/include/linux/rcusync.h
@@ -1,8 +1,8 @@
#ifndef _LINUX_RCUSYNC_H_
#define _LINUX_RCUSYNC_H_
-#include <linux/wait.h>
#include <linux/rcupdate.h>
+#include <linux/completion.h>
struct rcu_sync_ops {
void (*sync)(void);
@@ -16,11 +16,12 @@ struct rcu_sync_ops {
struct rcu_sync_struct {
int gp_state;
int gp_count;
- wait_queue_head_t gp_wait;
+ struct completion gp_comp;
int cb_state;
struct rcu_head cb_head;
+ bool exclusive;
struct rcu_sync_ops *ops;
};
@@ -34,32 +35,34 @@ static inline bool rcu_sync_is_idle(struct rcu_sync_struct *rss)
enum rcu_sync_type { RCU_SYNC, RCU_SCHED_SYNC, RCU_BH_SYNC };
-extern void rcu_sync_init(struct rcu_sync_struct *, enum rcu_sync_type);
+extern void rcu_sync_init(struct rcu_sync_struct *,
+ enum rcu_sync_type, bool excl);
extern void rcu_sync_enter(struct rcu_sync_struct *);
extern void rcu_sync_exit(struct rcu_sync_struct *);
extern void rcu_sync_dtor(struct rcu_sync_struct *);
extern struct rcu_sync_ops rcu_sync_ops_array[];
-#define __RCU_SYNC_INITIALIZER(name, type) { \
+#define __RCU_SYNC_INITIALIZER(name, type, excl) { \
.gp_state = 0, \
.gp_count = 0, \
- .gp_wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.gp_wait), \
+ .gp_comp = COMPLETION_INITIALIZER(name.gp_comp), \
.cb_state = 0, \
+ .exclusive = excl, \
.ops = rcu_sync_ops_array + (type), \
}
-#define __DEFINE_RCU_SYNC(name, type) \
- struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type)
+#define __DEFINE_RCU_SYNC(name, type, excl) \
+ struct rcu_sync_struct name = __RCU_SYNC_INITIALIZER(name, type, excl)
-#define DEFINE_RCU_SYNC(name) \
- __DEFINE_RCU_SYNC(name, RCU_SYNC)
+#define DEFINE_RCU_SYNC(name, excl) \
+ __DEFINE_RCU_SYNC(name, RCU_SYNC, excl)
-#define DEFINE_RCU_SCHED_SYNC(name) \
- __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC)
+#define DEFINE_RCU_SCHED_SYNC(name, excl) \
+ __DEFINE_RCU_SYNC(name, RCU_SCHED_SYNC, excl)
-#define DEFINE_RCU_BH_SYNC(name) \
- __DEFINE_RCU_SYNC(name, RCU_BH_SYNC)
+#define DEFINE_RCU_BH_SYNC(name, excl) \
+ __DEFINE_RCU_SYNC(name, RCU_BH_SYNC, excl)
#endif /* _LINUX_RCUSYNC_H_ */
diff --git a/kernel/cpu.c b/kernel/cpu.c
index d5f475a..fb1bdf0 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -51,7 +51,7 @@ static int cpu_hotplug_disabled;
enum { readers_slow, readers_block };
-DEFINE_RCU_SCHED_SYNC(__cpuhp_rss);
+DEFINE_RCU_SCHED_SYNC(__cpuhp_rss, false);
EXPORT_SYMBOL_GPL(__cpuhp_rss);
DEFINE_PER_CPU(unsigned int, __cpuhp_refcount);
diff --git a/kernel/rcusync.c b/kernel/rcusync.c
index bb311eb..667eb7d 100644
--- a/kernel/rcusync.c
+++ b/kernel/rcusync.c
@@ -4,7 +4,7 @@
enum { GP_IDLE = 0, GP_PENDING, GP_PASSED };
enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY };
-#define rss_lock gp_wait.lock
+#define rss_lock gp_comp.wait.lock
#ifdef CONFIG_PROVE_RCU
#define __INIT_HELD(func) .held = func,
@@ -33,11 +33,13 @@ struct rcu_sync_ops rcu_sync_ops_array[] = {
},
};
-void rcu_sync_init(struct rcu_sync_struct *rss, enum rcu_sync_type type)
+void rcu_sync_init(struct rcu_sync_struct *rss,
+ enum rcu_sync_type type, bool excl)
{
memset(rss, 0, sizeof(*rss));
- init_waitqueue_head(&rss->gp_wait);
+ init_completion(&rss->gp_comp);
rss->ops = rcu_sync_ops_array + type;
+ rss->exclusive = excl;
}
void rcu_sync_enter(struct rcu_sync_struct *rss)
@@ -56,9 +58,13 @@ void rcu_sync_enter(struct rcu_sync_struct *rss)
if (need_sync) {
rss->ops->sync();
rss->gp_state = GP_PASSED;
- wake_up_all(&rss->gp_wait);
+ if (!rss->exclusive)
+ wake_up_all(&rss->gp_comp.wait);
} else if (need_wait) {
- wait_event(rss->gp_wait, rss->gp_state == GP_PASSED);
+ if (!rss->exclusive)
+ wait_event(rss->gp_comp.wait, rss->gp_state == GP_PASSED);
+ else
+ wait_for_completion(&rss->gp_comp);
} else {
/*
* Possible when there's a pending CB from a rcu_sync_exit().
@@ -105,6 +111,8 @@ static void rcu_sync_func(struct rcu_head *rcu)
void rcu_sync_exit(struct rcu_sync_struct *rss)
{
+ bool wakeup_excl = rss->exclusive;
+
spin_lock_irq(&rss->rss_lock);
if (!--rss->gp_count) {
if (rss->cb_state == CB_IDLE) {
@@ -113,8 +121,12 @@ void rcu_sync_exit(struct rcu_sync_struct *rss)
} else if (rss->cb_state == CB_PENDING) {
rss->cb_state = CB_REPLAY;
}
+ wakeup_excl = false;
}
spin_unlock_irq(&rss->rss_lock);
+
+ if (wakeup_excl)
+ complete(&rss->gp_comp);
}
void rcu_sync_dtor(struct rcu_sync_struct *rss)
--
1.5.5.1
next prev parent reply other threads:[~2013-10-04 18:53 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2013-10-04 18:46 [PATCH 0/5] rcusync: validations + dtor + exclusive Oleg Nesterov
2013-10-04 18:46 ` [PATCH 1/5] rcusync: introduce struct rcu_sync_ops Oleg Nesterov
2013-10-04 19:12 ` Linus Torvalds
2013-10-04 19:22 ` Oleg Nesterov
2013-10-04 19:30 ` Steven Rostedt
2013-10-04 19:38 ` Linus Torvalds
2013-10-04 19:42 ` Peter Zijlstra
2013-10-05 17:21 ` Oleg Nesterov
2013-10-05 17:17 ` Oleg Nesterov
2013-10-08 9:13 ` Peter Zijlstra
2013-10-08 15:33 ` Oleg Nesterov
2013-10-08 16:34 ` Paul E. McKenney
2013-10-04 18:46 ` [PATCH 2/5] rcusync: add the CONFIG_PROVE_RCU checks Oleg Nesterov
2013-10-04 18:46 ` [PATCH 3/5] rcusync: introduce rcu_sync_dtor() Oleg Nesterov
2013-10-04 18:46 ` Oleg Nesterov [this message]
2013-10-04 19:29 ` [PATCH 4/5] rcusync: introduce rcu_sync_struct->exclusive mode Peter Zijlstra
2013-10-04 19:56 ` Oleg Nesterov
2013-10-04 20:41 ` Peter Zijlstra
2013-10-06 13:22 ` Oleg Nesterov
2013-10-07 10:49 ` Peter Zijlstra
2013-10-04 18:46 ` [PATCH 5/5] rcusync: make rcu_sync_enter() return "bool" Oleg Nesterov
2013-10-04 19:32 ` [PATCH 0/5] rcusync: validations + dtor + exclusive Peter Zijlstra
2013-10-04 21:28 ` Paul E. McKenney
2013-10-05 17:22 ` Oleg Nesterov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20131004184640.GA17567@redhat.com \
--to=oleg@redhat.com \
--cc=aarcange@redhat.com \
--cc=hannes@cmpxchg.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mgorman@suse.de \
--cc=mingo@kernel.org \
--cc=paulmck@linux.vnet.ibm.com \
--cc=peterz@infradead.org \
--cc=riel@redhat.com \
--cc=rostedt@goodmis.org \
--cc=srikar@linux.vnet.ibm.com \
--cc=tglx@linutronix.de \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).