From: NeilBrown <neilb@suse.de>
To: Chuck Lever <chuck.lever@oracle.com>, Jeff Layton <jlayton@kernel.org>
Cc: linux-nfs@vger.kernel.org
Subject: [PATCH 4/7] SUNRPC: use lwq for sp_sockets - renamed to sp_xprts
Date: Fri, 18 Aug 2023 11:45:09 +1000 [thread overview]
Message-ID: <20230818014512.26880-5-neilb@suse.de> (raw)
In-Reply-To: <20230818014512.26880-1-neilb@suse.de>
lwq avoids using back pointers in lists, and uses less locking.
This introduces a new spinlock, but the other one will be removed in a
future patch.
For svc_clean_up_xprts(), we now dequeue the entire queue, walk it to
remove and process the xprts that need cleaning up, then re-enqueue the
remaining queue.
Signed-off-by: NeilBrown <neilb@suse.de>
---
include/linux/sunrpc/svc.h | 3 +-
include/linux/sunrpc/svc_xprt.h | 2 +-
net/sunrpc/svc.c | 2 +-
net/sunrpc/svc_xprt.c | 57 ++++++++++-----------------------
4 files changed, 21 insertions(+), 43 deletions(-)
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index dafa362b4fdd..8b2bd034a760 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -17,6 +17,7 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/svcauth.h>
+#include <linux/sunrpc/svc_lwq.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/pagevec.h>
@@ -34,7 +35,7 @@
struct svc_pool {
unsigned int sp_id; /* pool id; also node id on NUMA */
spinlock_t sp_lock; /* protects all fields */
- struct list_head sp_sockets; /* pending sockets */
+ struct lwq sp_xprts; /* pending transports */
unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct llist_head sp_idle_threads; /* idle server threads */
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index fa55d12dc765..8e20cd60e2e7 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -54,7 +54,7 @@ struct svc_xprt {
const struct svc_xprt_ops *xpt_ops;
struct kref xpt_ref;
struct list_head xpt_list;
- struct list_head xpt_ready;
+ struct lwq_node xpt_ready;
unsigned long xpt_flags;
struct svc_serv *xpt_server; /* service for transport */
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 3267d740235e..63cddb8cb08d 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -508,7 +508,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
i, serv->sv_name);
pool->sp_id = i;
- INIT_LIST_HEAD(&pool->sp_sockets);
+ lwq_init(&pool->sp_xprts);
INIT_LIST_HEAD(&pool->sp_all_threads);
init_llist_head(&pool->sp_idle_threads);
spin_lock_init(&pool->sp_lock);
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 397ca12299a2..39843c064361 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -201,7 +201,6 @@ void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl,
kref_init(&xprt->xpt_ref);
xprt->xpt_server = serv;
INIT_LIST_HEAD(&xprt->xpt_list);
- INIT_LIST_HEAD(&xprt->xpt_ready);
INIT_LIST_HEAD(&xprt->xpt_deferred);
INIT_LIST_HEAD(&xprt->xpt_users);
mutex_init(&xprt->xpt_mutex);
@@ -472,9 +471,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
pool = svc_pool_for_cpu(xprt->xpt_server);
percpu_counter_inc(&pool->sp_sockets_queued);
- spin_lock_bh(&pool->sp_lock);
- list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
- spin_unlock_bh(&pool->sp_lock);
+ lwq_enqueue(&xprt->xpt_ready, &pool->sp_xprts);
svc_pool_wake_idle_thread(pool);
}
@@ -487,18 +484,9 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool)
{
struct svc_xprt *xprt = NULL;
- if (list_empty(&pool->sp_sockets))
- goto out;
-
- spin_lock_bh(&pool->sp_lock);
- if (likely(!list_empty(&pool->sp_sockets))) {
- xprt = list_first_entry(&pool->sp_sockets,
- struct svc_xprt, xpt_ready);
- list_del_init(&xprt->xpt_ready);
+ xprt = lwq_dequeue(&pool->sp_xprts, struct svc_xprt, xpt_ready);
+ if (xprt)
svc_xprt_get(xprt);
- }
- spin_unlock_bh(&pool->sp_lock);
-out:
return xprt;
}
@@ -708,7 +696,7 @@ rqst_should_sleep(struct svc_rqst *rqstp)
return false;
/* was a socket queued? */
- if (!list_empty(&pool->sp_sockets))
+ if (!lwq_empty(&pool->sp_xprts))
return false;
/* are we shutting down? */
@@ -1052,7 +1040,6 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
spin_lock_bh(&serv->sv_lock);
list_del_init(&xprt->xpt_list);
- WARN_ON_ONCE(!list_empty(&xprt->xpt_ready));
if (test_bit(XPT_TEMP, &xprt->xpt_flags))
serv->sv_tmpcnt--;
spin_unlock_bh(&serv->sv_lock);
@@ -1103,36 +1090,26 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st
return ret;
}
-static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
+static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
{
- struct svc_pool *pool;
struct svc_xprt *xprt;
- struct svc_xprt *tmp;
int i;
for (i = 0; i < serv->sv_nrpools; i++) {
- pool = &serv->sv_pools[i];
-
- spin_lock_bh(&pool->sp_lock);
- list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) {
- if (xprt->xpt_net != net)
- continue;
- list_del_init(&xprt->xpt_ready);
- spin_unlock_bh(&pool->sp_lock);
- return xprt;
+ struct svc_pool *pool = &serv->sv_pools[i];
+ struct llist_node *q, **t1, *t2;
+
+ q = lwq_dequeue_all(&pool->sp_xprts);
+ lwq_for_each_safe(xprt, t1, t2, &q, xpt_ready) {
+ if (xprt->xpt_net == net) {
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ svc_delete_xprt(xprt);
+ xprt = NULL;
+ }
}
- spin_unlock_bh(&pool->sp_lock);
- }
- return NULL;
-}
-static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
-{
- struct svc_xprt *xprt;
-
- while ((xprt = svc_dequeue_net(serv, net))) {
- set_bit(XPT_CLOSE, &xprt->xpt_flags);
- svc_delete_xprt(xprt);
+ if (q)
+ lwq_enqueue_batch(q, &pool->sp_xprts);
}
}
--
2.40.1
next prev parent reply other threads:[~2023-08-18 1:46 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-08-18 1:45 [PATCH 0/7] SUNRPC - queuing improvements NeilBrown
2023-08-18 1:45 ` [PATCH 1/7] SUNRPC: change service idle list to be an llist NeilBrown
2023-08-18 15:53 ` Chuck Lever
2023-08-30 1:52 ` NeilBrown
2023-08-18 1:45 ` [PATCH 2/7] SUNRPC: only have one thread waking up at a time NeilBrown
2023-08-18 1:45 ` [PATCH 3/7] SUNRPC/svc: add light-weight queuing mechanism NeilBrown
2023-08-18 1:45 ` NeilBrown [this message]
2023-08-18 1:45 ` [PATCH 5/7] SUNRPC: change sp_nrthreads to atomic_t NeilBrown
2023-08-18 1:45 ` [PATCH 6/7] SUNRPC: discard sp_lock NeilBrown
2023-08-18 1:45 ` [PATCH 7/7] SUNRPC: change the back-channel queue to lwq NeilBrown
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230818014512.26880-5-neilb@suse.de \
--to=neilb@suse.de \
--cc=chuck.lever@oracle.com \
--cc=jlayton@kernel.org \
--cc=linux-nfs@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox