* [PATCH v4] rcu/tree: Reduce wake up for synchronize_rcu() common case
@ 2024-03-19 18:54 Joel Fernandes (Google)
2024-03-20 14:36 ` Uladzislau Rezki
2024-04-04 20:19 ` Frederic Weisbecker
0 siblings, 2 replies; 4+ messages in thread
From: Joel Fernandes (Google) @ 2024-03-19 18:54 UTC (permalink / raw)
To: linux-kernel, Paul E. McKenney, Frederic Weisbecker,
Neeraj Upadhyay, Joel Fernandes, Josh Triplett, Boqun Feng,
Steven Rostedt, Mathieu Desnoyers, Lai Jiangshan, Zqiang
Cc: urezki, neeraj.iitr10, rcu
In the synchronize_rcu() common case, we will have less than
SR_MAX_USERS_WAKE_FROM_GP number of users per GP. Waking up the kworker
is pointless just to free the last injected wait head since at that point,
all the users have already been awakened.
Introduce a new counter to track this and prevent the wakeup in the
common case.
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
---
v1->v2: Rebase on paul/dev
v2->v3: Additional optimization for wait_tail->next == NULL case.
v3->v4: Apply clean ups from Vlad. Tested rcutorture all scenarios.
---
kernel/rcu/tree.c | 35 ++++++++++++++++++++++++++++++-----
kernel/rcu/tree.h | 1 +
2 files changed, 31 insertions(+), 5 deletions(-)
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 9fbb5ab57c84..f3193670fe42 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -96,6 +96,7 @@ static struct rcu_state rcu_state = {
.ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
.srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
rcu_sr_normal_gp_cleanup_work),
+ .srs_cleanups_pending = ATOMIC_INIT(0),
};
/* Dump rcu_node combining tree at boot to verify correct setup. */
@@ -1642,8 +1643,11 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
* the done tail list manipulations are protected here.
*/
done = smp_load_acquire(&rcu_state.srs_done_tail);
- if (!done)
+ if (!done) {
+ /* See comments below. */
+ atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
return;
+ }
WARN_ON_ONCE(!rcu_sr_is_wait_head(done));
head = done->next;
@@ -1666,6 +1670,9 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
rcu_sr_put_wait_head(rcu);
}
+
+ /* Order list manipulations with atomic access. */
+ atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
}
/*
@@ -1673,7 +1680,7 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
*/
static void rcu_sr_normal_gp_cleanup(void)
{
- struct llist_node *wait_tail, *next, *rcu;
+ struct llist_node *wait_tail, *next = NULL, *rcu = NULL;
int done = 0;
wait_tail = rcu_state.srs_wait_tail;
@@ -1699,16 +1706,34 @@ static void rcu_sr_normal_gp_cleanup(void)
break;
}
- // concurrent sr_normal_gp_cleanup work might observe this update.
- smp_store_release(&rcu_state.srs_done_tail, wait_tail);
+ /*
+ * Fast path, no more users to process except putting the second last
+ * wait head if no inflight-workers. If there are in-flight workers,
+ * they will remove the last wait head.
+ *
+ * Note that the ACQUIRE orders atomic access with list manipulation.
+ */
+ if (wait_tail->next && wait_tail->next->next == NULL &&
+ rcu_sr_is_wait_head(wait_tail->next) &&
+ !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) {
+ rcu_sr_put_wait_head(wait_tail->next);
+ wait_tail->next = NULL;
+ }
+
+ /* Concurrent sr_normal_gp_cleanup work might observe this update. */
ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
+ smp_store_release(&rcu_state.srs_done_tail, wait_tail);
/*
* We schedule a work in order to perform a final processing
* of outstanding users(if still left) and releasing wait-heads
* added by rcu_sr_normal_gp_init() call.
*/
- queue_work(sync_wq, &rcu_state.srs_cleanup_work);
+ if (wait_tail->next) {
+ atomic_inc(&rcu_state.srs_cleanups_pending);
+ if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work))
+ atomic_dec(&rcu_state.srs_cleanups_pending);
+ }
}
/*
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bae7925c497f..affcb92a358c 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -420,6 +420,7 @@ struct rcu_state {
struct llist_node *srs_done_tail; /* ready for GP users. */
struct sr_wait_node srs_wait_nodes[SR_NORMAL_GP_WAIT_HEAD_MAX];
struct work_struct srs_cleanup_work;
+ atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */
};
/* Values for rcu_state structure's gp_flags field. */
--
2.34.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH v4] rcu/tree: Reduce wake up for synchronize_rcu() common case
2024-03-19 18:54 [PATCH v4] rcu/tree: Reduce wake up for synchronize_rcu() common case Joel Fernandes (Google)
@ 2024-03-20 14:36 ` Uladzislau Rezki
2024-03-20 23:26 ` Paul E. McKenney
2024-04-04 20:19 ` Frederic Weisbecker
1 sibling, 1 reply; 4+ messages in thread
From: Uladzislau Rezki @ 2024-03-20 14:36 UTC (permalink / raw)
To: Joel Fernandes (Google)
Cc: linux-kernel, Paul E. McKenney, Frederic Weisbecker,
Neeraj Upadhyay, Josh Triplett, Boqun Feng, Steven Rostedt,
Mathieu Desnoyers, Lai Jiangshan, Zqiang, urezki, neeraj.iitr10,
rcu
On Tue, Mar 19, 2024 at 02:54:57PM -0400, Joel Fernandes (Google) wrote:
> In the synchronize_rcu() common case, we will have less than
> SR_MAX_USERS_WAKE_FROM_GP number of users per GP. Waking up the kworker
> is pointless just to free the last injected wait head since at that point,
> all the users have already been awakened.
>
> Introduce a new counter to track this and prevent the wakeup in the
> common case.
>
> Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> ---
> v1->v2: Rebase on paul/dev
> v2->v3: Additional optimization for wait_tail->next == NULL case.
> v3->v4: Apply clean ups from Vlad. Tested rcutorture all scenarios.
> ---
> kernel/rcu/tree.c | 35 ++++++++++++++++++++++++++++++-----
> kernel/rcu/tree.h | 1 +
> 2 files changed, 31 insertions(+), 5 deletions(-)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 9fbb5ab57c84..f3193670fe42 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -96,6 +96,7 @@ static struct rcu_state rcu_state = {
> .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
> .srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
> rcu_sr_normal_gp_cleanup_work),
> + .srs_cleanups_pending = ATOMIC_INIT(0),
> };
>
> /* Dump rcu_node combining tree at boot to verify correct setup. */
> @@ -1642,8 +1643,11 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
> * the done tail list manipulations are protected here.
> */
> done = smp_load_acquire(&rcu_state.srs_done_tail);
> - if (!done)
> + if (!done) {
> + /* See comments below. */
> + atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
> return;
> + }
>
> WARN_ON_ONCE(!rcu_sr_is_wait_head(done));
> head = done->next;
> @@ -1666,6 +1670,9 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
>
> rcu_sr_put_wait_head(rcu);
> }
> +
> + /* Order list manipulations with atomic access. */
> + atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
> }
>
> /*
> @@ -1673,7 +1680,7 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
> */
> static void rcu_sr_normal_gp_cleanup(void)
> {
> - struct llist_node *wait_tail, *next, *rcu;
> + struct llist_node *wait_tail, *next = NULL, *rcu = NULL;
> int done = 0;
>
> wait_tail = rcu_state.srs_wait_tail;
> @@ -1699,16 +1706,34 @@ static void rcu_sr_normal_gp_cleanup(void)
> break;
> }
>
> - // concurrent sr_normal_gp_cleanup work might observe this update.
> - smp_store_release(&rcu_state.srs_done_tail, wait_tail);
> + /*
> + * Fast path, no more users to process except putting the second last
> + * wait head if no inflight-workers. If there are in-flight workers,
> + * they will remove the last wait head.
> + *
> + * Note that the ACQUIRE orders atomic access with list manipulation.
> + */
> + if (wait_tail->next && wait_tail->next->next == NULL &&
> + rcu_sr_is_wait_head(wait_tail->next) &&
> + !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) {
> + rcu_sr_put_wait_head(wait_tail->next);
> + wait_tail->next = NULL;
> + }
> +
> + /* Concurrent sr_normal_gp_cleanup work might observe this update. */
> ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
> + smp_store_release(&rcu_state.srs_done_tail, wait_tail);
>
> /*
> * We schedule a work in order to perform a final processing
> * of outstanding users(if still left) and releasing wait-heads
> * added by rcu_sr_normal_gp_init() call.
> */
> - queue_work(sync_wq, &rcu_state.srs_cleanup_work);
> + if (wait_tail->next) {
> + atomic_inc(&rcu_state.srs_cleanups_pending);
> + if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work))
> + atomic_dec(&rcu_state.srs_cleanups_pending);
> + }
> }
>
> /*
> diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
> index bae7925c497f..affcb92a358c 100644
> --- a/kernel/rcu/tree.h
> +++ b/kernel/rcu/tree.h
> @@ -420,6 +420,7 @@ struct rcu_state {
> struct llist_node *srs_done_tail; /* ready for GP users. */
> struct sr_wait_node srs_wait_nodes[SR_NORMAL_GP_WAIT_HEAD_MAX];
> struct work_struct srs_cleanup_work;
> + atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */
> };
>
> /* Values for rcu_state structure's gp_flags field. */
> --
> 2.34.1
>
Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
--
Uladzislau Rezki
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH v4] rcu/tree: Reduce wake up for synchronize_rcu() common case
2024-03-20 14:36 ` Uladzislau Rezki
@ 2024-03-20 23:26 ` Paul E. McKenney
0 siblings, 0 replies; 4+ messages in thread
From: Paul E. McKenney @ 2024-03-20 23:26 UTC (permalink / raw)
To: Uladzislau Rezki
Cc: Joel Fernandes (Google), linux-kernel, Frederic Weisbecker,
Neeraj Upadhyay, Josh Triplett, Boqun Feng, Steven Rostedt,
Mathieu Desnoyers, Lai Jiangshan, Zqiang, neeraj.iitr10, rcu
On Wed, Mar 20, 2024 at 03:36:13PM +0100, Uladzislau Rezki wrote:
> On Tue, Mar 19, 2024 at 02:54:57PM -0400, Joel Fernandes (Google) wrote:
> > In the synchronize_rcu() common case, we will have less than
> > SR_MAX_USERS_WAKE_FROM_GP number of users per GP. Waking up the kworker
> > is pointless just to free the last injected wait head since at that point,
> > all the users have already been awakened.
> >
> > Introduce a new counter to track this and prevent the wakeup in the
> > common case.
> >
> > Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> > ---
> > v1->v2: Rebase on paul/dev
> > v2->v3: Additional optimization for wait_tail->next == NULL case.
> > v3->v4: Apply clean ups from Vlad. Tested rcutorture all scenarios.
> > ---
> > kernel/rcu/tree.c | 35 ++++++++++++++++++++++++++++++-----
> > kernel/rcu/tree.h | 1 +
> > 2 files changed, 31 insertions(+), 5 deletions(-)
> >
> > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> > index 9fbb5ab57c84..f3193670fe42 100644
> > --- a/kernel/rcu/tree.c
> > +++ b/kernel/rcu/tree.c
> > @@ -96,6 +96,7 @@ static struct rcu_state rcu_state = {
> > .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
> > .srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
> > rcu_sr_normal_gp_cleanup_work),
> > + .srs_cleanups_pending = ATOMIC_INIT(0),
> > };
> >
> > /* Dump rcu_node combining tree at boot to verify correct setup. */
> > @@ -1642,8 +1643,11 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
> > * the done tail list manipulations are protected here.
> > */
> > done = smp_load_acquire(&rcu_state.srs_done_tail);
> > - if (!done)
> > + if (!done) {
> > + /* See comments below. */
> > + atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
> > return;
> > + }
> >
> > WARN_ON_ONCE(!rcu_sr_is_wait_head(done));
> > head = done->next;
> > @@ -1666,6 +1670,9 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
> >
> > rcu_sr_put_wait_head(rcu);
> > }
> > +
> > + /* Order list manipulations with atomic access. */
> > + atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
> > }
> >
> > /*
> > @@ -1673,7 +1680,7 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
> > */
> > static void rcu_sr_normal_gp_cleanup(void)
> > {
> > - struct llist_node *wait_tail, *next, *rcu;
> > + struct llist_node *wait_tail, *next = NULL, *rcu = NULL;
> > int done = 0;
> >
> > wait_tail = rcu_state.srs_wait_tail;
> > @@ -1699,16 +1706,34 @@ static void rcu_sr_normal_gp_cleanup(void)
> > break;
> > }
> >
> > - // concurrent sr_normal_gp_cleanup work might observe this update.
> > - smp_store_release(&rcu_state.srs_done_tail, wait_tail);
> > + /*
> > + * Fast path, no more users to process except putting the second last
> > + * wait head if no inflight-workers. If there are in-flight workers,
> > + * they will remove the last wait head.
> > + *
> > + * Note that the ACQUIRE orders atomic access with list manipulation.
> > + */
> > + if (wait_tail->next && wait_tail->next->next == NULL &&
> > + rcu_sr_is_wait_head(wait_tail->next) &&
> > + !atomic_read_acquire(&rcu_state.srs_cleanups_pending)) {
> > + rcu_sr_put_wait_head(wait_tail->next);
> > + wait_tail->next = NULL;
> > + }
> > +
> > + /* Concurrent sr_normal_gp_cleanup work might observe this update. */
> > ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
> > + smp_store_release(&rcu_state.srs_done_tail, wait_tail);
> >
> > /*
> > * We schedule a work in order to perform a final processing
> > * of outstanding users(if still left) and releasing wait-heads
> > * added by rcu_sr_normal_gp_init() call.
> > */
> > - queue_work(sync_wq, &rcu_state.srs_cleanup_work);
> > + if (wait_tail->next) {
> > + atomic_inc(&rcu_state.srs_cleanups_pending);
> > + if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work))
> > + atomic_dec(&rcu_state.srs_cleanups_pending);
> > + }
> > }
> >
> > /*
> > diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
> > index bae7925c497f..affcb92a358c 100644
> > --- a/kernel/rcu/tree.h
> > +++ b/kernel/rcu/tree.h
> > @@ -420,6 +420,7 @@ struct rcu_state {
> > struct llist_node *srs_done_tail; /* ready for GP users. */
> > struct sr_wait_node srs_wait_nodes[SR_NORMAL_GP_WAIT_HEAD_MAX];
> > struct work_struct srs_cleanup_work;
> > + atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */
> > };
> >
> > /* Values for rcu_state structure's gp_flags field. */
> > --
> > 2.34.1
> >
> Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Queued and pushed, thank you both!
Thanx, Paul
^ permalink raw reply [flat|nested] 4+ messages in thread
* Re: [PATCH v4] rcu/tree: Reduce wake up for synchronize_rcu() common case
2024-03-19 18:54 [PATCH v4] rcu/tree: Reduce wake up for synchronize_rcu() common case Joel Fernandes (Google)
2024-03-20 14:36 ` Uladzislau Rezki
@ 2024-04-04 20:19 ` Frederic Weisbecker
1 sibling, 0 replies; 4+ messages in thread
From: Frederic Weisbecker @ 2024-04-04 20:19 UTC (permalink / raw)
To: Joel Fernandes (Google)
Cc: linux-kernel, Paul E. McKenney, Neeraj Upadhyay, Josh Triplett,
Boqun Feng, Steven Rostedt, Mathieu Desnoyers, Lai Jiangshan,
Zqiang, urezki, neeraj.iitr10, rcu
Le Tue, Mar 19, 2024 at 02:54:57PM -0400, Joel Fernandes (Google) a écrit :
> In the synchronize_rcu() common case, we will have less than
> SR_MAX_USERS_WAKE_FROM_GP number of users per GP. Waking up the kworker
> is pointless just to free the last injected wait head since at that point,
> all the users have already been awakened.
>
> Introduce a new counter to track this and prevent the wakeup in the
> common case.
>
> Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
> ---
> v1->v2: Rebase on paul/dev
> v2->v3: Additional optimization for wait_tail->next == NULL case.
> v3->v4: Apply clean ups from Vlad. Tested rcutorture all scenarios.
> ---
> kernel/rcu/tree.c | 35 ++++++++++++++++++++++++++++++-----
> kernel/rcu/tree.h | 1 +
> 2 files changed, 31 insertions(+), 5 deletions(-)
>
> diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
> index 9fbb5ab57c84..f3193670fe42 100644
> --- a/kernel/rcu/tree.c
> +++ b/kernel/rcu/tree.c
> @@ -96,6 +96,7 @@ static struct rcu_state rcu_state = {
> .ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
> .srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
> rcu_sr_normal_gp_cleanup_work),
> + .srs_cleanups_pending = ATOMIC_INIT(0),
> };
>
> /* Dump rcu_node combining tree at boot to verify correct setup. */
> @@ -1642,8 +1643,11 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
> * the done tail list manipulations are protected here.
> */
> done = smp_load_acquire(&rcu_state.srs_done_tail);
> - if (!done)
> + if (!done) {
Can this really happen? And if not should we warn?
> + /* See comments below. */
> + atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
And if not should we really do that?
> return;
> + }
>
> WARN_ON_ONCE(!rcu_sr_is_wait_head(done));
> head = done->next;
> @@ -1666,6 +1670,9 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
>
> rcu_sr_put_wait_head(rcu);
> }
> +
> + /* Order list manipulations with atomic access. */
> + atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
Can we have a sanity check here ensuring this never goes negative?
Thanks.
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2024-04-04 20:19 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2024-03-19 18:54 [PATCH v4] rcu/tree: Reduce wake up for synchronize_rcu() common case Joel Fernandes (Google)
2024-03-20 14:36 ` Uladzislau Rezki
2024-03-20 23:26 ` Paul E. McKenney
2024-04-04 20:19 ` Frederic Weisbecker
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox