From: NeilBrown <neilb@suse.de>
To: Chuck Lever <chuck.lever@oracle.com>, Jeff Layton <jlayton@kernel.org>
Cc: linux-nfs@vger.kernel.org
Subject: [PATCH 08/10] SUNRPC: change sp_nrthreads to atomic_t
Date: Wed, 30 Aug 2023 12:54:51 +1000 [thread overview]
Message-ID: <20230830025755.21292-9-neilb@suse.de> (raw)
In-Reply-To: <20230830025755.21292-1-neilb@suse.de>
Using an atomic_t avoids the need to take a spinlock (which can soon be
removed).
Choosing a thread to kill needs to be careful as we cannot set the "die
now" bit atomically with the test on the count. Instead we temporarily
increase the count.
Signed-off-by: NeilBrown <neilb@suse.de>
---
fs/nfsd/nfssvc.c | 3 ++-
include/linux/sunrpc/svc.h | 2 +-
net/sunrpc/svc.c | 37 ++++++++++++++++++++-----------------
3 files changed, 23 insertions(+), 19 deletions(-)
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 062f51fe4dfb..5e455ced0711 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -718,7 +718,8 @@ int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
if (nn->nfsd_serv != NULL) {
for (i = 0; i < nn->nfsd_serv->sv_nrpools && i < n; i++)
- nthreads[i] = nn->nfsd_serv->sv_pools[i].sp_nrthreads;
+ nthreads[i] =
+ atomic_read(&nn->nfsd_serv->sv_pools[i].sp_nrthreads);
}
return 0;
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 7ff9fe785e49..9d0fcd6148ae 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -36,7 +36,7 @@ struct svc_pool {
unsigned int sp_id; /* pool id; also node id on NUMA */
spinlock_t sp_lock; /* protects all fields */
struct lwq sp_xprts; /* pending transports */
- unsigned int sp_nrthreads; /* # of threads in pool */
+ atomic_t sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct llist_head sp_idle_threads; /* idle server threads */
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 63cddb8cb08d..9524af33ace9 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -681,8 +681,8 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
serv->sv_nrthreads += 1;
spin_unlock_bh(&serv->sv_lock);
+ atomic_inc(&pool->sp_nrthreads);
spin_lock_bh(&pool->sp_lock);
- pool->sp_nrthreads++;
list_add_rcu(&rqstp->rq_all, &pool->sp_all_threads);
spin_unlock_bh(&pool->sp_lock);
return rqstp;
@@ -727,23 +727,24 @@ svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
}
static struct svc_pool *
-svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
+svc_pool_victim(struct svc_serv *serv, struct svc_pool *target_pool,
+ unsigned int *state)
{
+ struct svc_pool *pool;
unsigned int i;
+retry:
+ pool = target_pool;
+
if (pool != NULL) {
- spin_lock_bh(&pool->sp_lock);
- if (pool->sp_nrthreads)
+ if (atomic_inc_not_zero(&pool->sp_nrthreads))
goto found_pool;
- spin_unlock_bh(&pool->sp_lock);
return NULL;
} else {
for (i = 0; i < serv->sv_nrpools; i++) {
pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
- spin_lock_bh(&pool->sp_lock);
- if (pool->sp_nrthreads)
+ if (atomic_inc_not_zero(&pool->sp_nrthreads))
goto found_pool;
- spin_unlock_bh(&pool->sp_lock);
}
return NULL;
}
@@ -751,8 +752,12 @@ svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *stat
found_pool:
set_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
set_bit(SP_NEED_VICTIM, &pool->sp_flags);
- spin_unlock_bh(&pool->sp_lock);
- return pool;
+ if (!atomic_dec_and_test(&pool->sp_nrthreads))
+ return pool;
+ /* Nothing left in this pool any more */
+ clear_bit(SP_NEED_VICTIM, &pool->sp_flags);
+ clear_bit(SP_VICTIM_REMAINS, &pool->sp_flags);
+ goto retry;
}
static int
@@ -828,13 +833,10 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
int
svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
{
- if (pool == NULL) {
+ if (!pool)
nrservs -= serv->sv_nrthreads;
- } else {
- spin_lock_bh(&pool->sp_lock);
- nrservs -= pool->sp_nrthreads;
- spin_unlock_bh(&pool->sp_lock);
- }
+ else
+ nrservs -= atomic_read(&pool->sp_nrthreads);
if (nrservs > 0)
return svc_start_kthreads(serv, pool, nrservs);
@@ -921,10 +923,11 @@ svc_exit_thread(struct svc_rqst *rqstp)
struct svc_pool *pool = rqstp->rq_pool;
spin_lock_bh(&pool->sp_lock);
- pool->sp_nrthreads--;
list_del_rcu(&rqstp->rq_all);
spin_unlock_bh(&pool->sp_lock);
+ atomic_dec(&pool->sp_nrthreads);
+
spin_lock_bh(&serv->sv_lock);
serv->sv_nrthreads -= 1;
spin_unlock_bh(&serv->sv_lock);
--
2.41.0
next prev parent reply other threads:[~2023-08-30 2:59 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-30 2:54 [PATCH 00/10] SUNRPC thread management changes NeilBrown
2023-08-30 2:54 ` [PATCH 01/10] SQUASH: revise comments in SUNRPC: change service idle list to be an llist NeilBrown
2023-08-30 2:54 ` [PATCH 02/10] llist: add interface to check if a node is on a list NeilBrown
2023-08-30 2:54 ` [PATCH 03/10] SQUASH use new llist interfaces in SUNRPC: change service idle list to be an llist NeilBrown
2023-08-30 2:54 ` [PATCH 04/10] llist: add llist_del_first_this() NeilBrown
2023-08-30 2:54 ` [PATCH 05/10] lib: add light-weight queuing mechanism NeilBrown
2023-08-30 15:21 ` Chuck Lever
2023-09-03 23:57 ` NeilBrown
2023-08-30 15:35 ` Chuck Lever
2023-09-03 23:59 ` NeilBrown
2023-08-30 16:03 ` Chuck Lever
2023-09-04 0:02 ` NeilBrown
2023-08-30 2:54 ` [PATCH 06/10] SUNRPC: only have one thread waking up at a time NeilBrown
2023-08-30 15:28 ` Chuck Lever
2023-09-04 0:35 ` NeilBrown
2023-08-30 2:54 ` [PATCH 07/10] SUNRPC: use lwq for sp_sockets - renamed to sp_xprts NeilBrown
2023-08-30 2:54 ` NeilBrown [this message]
2023-08-30 2:54 ` [PATCH 09/10] SUNRPC: discard sp_lock NeilBrown
2023-08-30 2:54 ` [PATCH 10/10] SUNRPC: change the back-channel queue to lwq NeilBrown
-- strict thread matches above, loose matches on Subject: below --
2023-08-15 1:54 [PATCH 00/10] SUNRPC: remainder of srv queueing work NeilBrown
2023-08-15 1:54 ` [PATCH 08/10] SUNRPC: change sp_nrthreads to atomic_t NeilBrown
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230830025755.21292-9-neilb@suse.de \
--to=neilb@suse.de \
--cc=chuck.lever@oracle.com \
--cc=jlayton@kernel.org \
--cc=linux-nfs@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox