From: Tejun Heo <tj@kernel.org>
To: axboe@kernel.dk, akinobu.mita@gmail.com, tom.leiming@gmail.com
Cc: kent.overstreet@gmail.com, cl@linux-foundation.org,
linux-kernel@vger.kernel.org, kernel-team@fb.com,
Tejun Heo <tj@kernel.org>
Subject: [PATCH 4/5] percpu_ref: restructure operation mode switching
Date: Tue, 29 Sep 2015 17:47:19 -0400 [thread overview]
Message-ID: <1443563240-29306-5-git-send-email-tj@kernel.org> (raw)
In-Reply-To: <1443563240-29306-1-git-send-email-tj@kernel.org>
Restructure atomic/percpu mode switching.
* The users of __percpu_ref_switch_to_atomic/percpu() now call a new
function __percpu_ref_switch_mode() which calls either of the
original switching functions depending on the current state of
ref->force_atomic and the __PERCPU_REF_DEAD flag. The callers no
longer check whether switching is necessary but always invoke
__percpu_ref_switch_mode().
* !ref->confirm_switch waiting is collected into
__percpu_ref_switch_mode().
This patch doesn't cause any behavior differences.
Signed-off-by: Tejun Heo <tj@kernel.org>
---
lib/percpu-refcount.c | 64 +++++++++++++++++++++++----------------------------
1 file changed, 29 insertions(+), 35 deletions(-)
diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
index 6a36597..1e69c3b 100644
--- a/lib/percpu-refcount.c
+++ b/lib/percpu-refcount.c
@@ -161,16 +161,6 @@ static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref)
static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
- /*
- * If the previous ATOMIC switching hasn't finished yet, wait for
- * its completion. If the caller ensures that ATOMIC switching
- * isn't in progress, this function can be called from any context.
- * Do an extra confirm_switch test to circumvent the unconditional
- * might_sleep() in wait_event().
- */
- if (ref->confirm_switch)
- wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
-
if (ref->percpu_count_ptr & __PERCPU_REF_ATOMIC) {
if (confirm_switch)
confirm_switch(ref);
@@ -195,16 +185,6 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
unsigned long __percpu *percpu_count = percpu_count_ptr(ref);
int cpu;
- /*
- * If the previous ATOMIC switching hasn't finished yet, wait for
- * its completion. If the caller ensures that ATOMIC switching
- * isn't in progress, this function can be called from any context.
- * Do an extra confirm_switch test to circumvent the unconditional
- * might_sleep() in wait_event().
- */
- if (ref->confirm_switch)
- wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
-
BUG_ON(!percpu_count);
if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC))
@@ -225,6 +205,25 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC);
}
+static void __percpu_ref_switch_mode(struct percpu_ref *ref,
+ percpu_ref_func_t *confirm_switch)
+{
+ /*
+ * If the previous ATOMIC switching hasn't finished yet, wait for
+ * its completion. If the caller ensures that ATOMIC switching
+ * isn't in progress, this function can be called from any context.
+ * Do an extra confirm_switch test to circumvent the unconditional
+ * might_sleep() in wait_event().
+ */
+ if (ref->confirm_switch)
+ wait_event(percpu_ref_switch_waitq, !ref->confirm_switch);
+
+ if (ref->force_atomic || (ref->percpu_count_ptr & __PERCPU_REF_DEAD))
+ __percpu_ref_switch_to_atomic(ref, confirm_switch);
+ else
+ __percpu_ref_switch_to_percpu(ref);
+}
+
/**
* percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode
* @ref: percpu_ref to switch to atomic mode
@@ -241,16 +240,15 @@ static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref)
* operations. Note that @ref will stay in atomic mode across kill/reinit
* cycles until percpu_ref_switch_to_percpu() is called.
*
- * This function normally doesn't block and can be called from any context
- * but it may block if @confirm_kill is specified and @ref is already in
- * the process of switching to atomic mode. In such cases, @confirm_switch
- * will be invoked after the switching is complete.
+ * This function may block if @ref is in the process of switching to atomic
+ * mode. If the caller ensures that @ref is not in the process of
+ * switching to atomic mode, this function can be called from any context.
*/
void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch)
{
ref->force_atomic = true;
- __percpu_ref_switch_to_atomic(ref, confirm_switch);
+ __percpu_ref_switch_mode(ref, confirm_switch);
}
/**
@@ -267,17 +265,14 @@ void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
* dying or dead, the actual switching takes place on the following
* percpu_ref_reinit().
*
- * This function normally doesn't block and can be called from any context
- * but it may block if @ref is in the process of switching to atomic mode
- * by percpu_ref_switch_atomic().
+ * This function may block if @ref is in the process of switching to atomic
+ * mode. If the caller ensures that @ref is not in the process of
+ * switching to atomic mode, this function can be called from any context.
*/
void percpu_ref_switch_to_percpu(struct percpu_ref *ref)
{
ref->force_atomic = false;
-
- /* a dying or dead ref can't be switched to percpu mode w/o reinit */
- if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD))
- __percpu_ref_switch_to_percpu(ref);
+ __percpu_ref_switch_mode(ref, NULL);
}
/**
@@ -302,7 +297,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
"%s called more than once on %pf!", __func__, ref->release);
ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
- __percpu_ref_switch_to_atomic(ref, confirm_kill);
+ __percpu_ref_switch_mode(ref, confirm_kill);
percpu_ref_put(ref);
}
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
@@ -324,7 +319,6 @@ void percpu_ref_reinit(struct percpu_ref *ref)
ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD;
percpu_ref_get(ref);
- if (!ref->force_atomic)
- __percpu_ref_switch_to_percpu(ref);
+ __percpu_ref_switch_mode(ref, NULL);
}
EXPORT_SYMBOL_GPL(percpu_ref_reinit);
--
2.4.3
next prev parent reply other threads:[~2015-09-29 21:48 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2015-09-29 21:47 [PATCHSET percpu/for-4.4] percpu_ref: make mode switching operations synchronize themselves Tejun Heo
2015-09-29 21:47 ` [PATCH 1/5] percpu_ref: remove unnecessary RCU grace period for staggered atomic switching confirmation Tejun Heo
2015-09-29 21:47 ` [PATCH 2/5] percpu_ref: reorganize __percpu_ref_switch_to_atomic() and relocate percpu_ref_switch_to_atomic() Tejun Heo
2015-09-29 21:47 ` [PATCH 3/5] percpu_ref: unify staggered atomic switching wait behavior Tejun Heo
2015-09-29 21:47 ` Tejun Heo [this message]
2015-09-29 21:47 ` [PATCH 5/5] percpu_ref: allow operation mode switching operations to be called concurrently Tejun Heo
2016-08-10 19:06 ` [PATCHSET percpu/for-4.4] percpu_ref: make mode switching operations synchronize themselves Tejun Heo
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1443563240-29306-5-git-send-email-tj@kernel.org \
--to=tj@kernel.org \
--cc=akinobu.mita@gmail.com \
--cc=axboe@kernel.dk \
--cc=cl@linux-foundation.org \
--cc=kent.overstreet@gmail.com \
--cc=kernel-team@fb.com \
--cc=linux-kernel@vger.kernel.org \
--cc=tom.leiming@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox