From: Dai Ngo <dai.ngo@oracle.com>
To: chuck.lever@oracle.com, jlayton@kernel.org
Cc: linux-nfs@vger.kernel.org
Subject: [PATCH 3/3] NFSD: drop TCP connections when NFSv4 client enters courtesy state
Date: Mon, 22 Apr 2024 14:31:15 -0700 [thread overview]
Message-ID: <1713821475-21474-4-git-send-email-dai.ngo@oracle.com> (raw)
In-Reply-To: <1713821475-21474-1-git-send-email-dai.ngo@oracle.com>
When a v4.0 client enters courtesy state all its v4 states remain valid
and its fore and back channel TCP connection remained in ESTABLISHED
state until the TCP keep-alive mechanism timed out and shuts down the
back channel connection. The fore channel connection remains in ESTABLISHED
state between 6 - 12 minutes before the NFSv4 server's 6-minute idle timer
(svc_age_temp_xprts) shuts down the idle connection.
Since NFSv4.1 mount uses the same TCP connection for both fore and back
channel connection there is no TCP keep-alive packet sent from the server
to the client. The server's idle timer does not shutdown an idle v4.1
connection since the svc_xprt->xpt_ref is more than 1: 1 for sv_tempsocks
list, one for the session's nfsd4_conn and 1 for the back channel.
To conserve system resources in large configuration where there are lots
of idle clients, this patch drops the fore and back channel connection
of NFSv4 client as soon as it enters the courtesy state. The fore and back
channel connections are automatically re-established when the courtesy
client reconnects.
Signed-off-by: Dai Ngo <dai.ngo@oracle.com>
---
fs/nfsd/nfs4state.c | 26 +++++++++++++++++++++++++-
fs/nfsd/state.h | 1 +
2 files changed, 26 insertions(+), 1 deletion(-)
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index a20c2c9d7d45..bafd3f664ff3 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -6369,6 +6369,22 @@ nfs4_anylock_blockers(struct nfs4_client *clp)
return false;
}
+static void nfsd4_drop_conns(struct nfsd_net *nn, struct nfs4_client *clp)
+{
+ struct svc_xprt *xprt;
+
+ /* stop requeueing callback in nfsd4_run_cb_work */
+ nfsd4_kill_callback(clp);
+
+ spin_lock_bh(&nn->nfsd_serv->sv_lock);
+ list_for_each_entry(xprt, &nn->nfsd_serv->sv_tempsocks, xpt_list) {
+ if (rpc_cmp_addr((struct sockaddr *)&clp->cl_addr,
+ (struct sockaddr *)&xprt->xpt_remote))
+ svc_xprt_deferred_close(xprt);
+ }
+ spin_unlock_bh(&nn->nfsd_serv->sv_lock);
+}
+
static void
nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
struct laundry_time *lt)
@@ -6376,10 +6392,12 @@ nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
unsigned int maxreap, reapcnt = 0;
struct list_head *pos, *next;
struct nfs4_client *clp;
+ struct list_head conn_reaplist;
maxreap = (atomic_read(&nn->nfs4_client_count) >= nn->nfs4_max_clients) ?
NFSD_CLIENT_MAX_TRIM_PER_RUN : 0;
INIT_LIST_HEAD(reaplist);
+ INIT_LIST_HEAD(&conn_reaplist);
spin_lock(&nn->client_lock);
list_for_each_safe(pos, next, &nn->client_lru) {
clp = list_entry(pos, struct nfs4_client, cl_lru);
@@ -6395,8 +6413,11 @@ nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
if (!client_has_state(clp))
goto exp_client;
if (!nfs4_anylock_blockers(clp))
- if (reapcnt >= maxreap)
+ if (reapcnt >= maxreap) {
+ if (clp->cl_cb_client)
+ list_add(&clp->cl_conn_lru, &conn_reaplist);
continue;
+ }
exp_client:
if (!mark_client_expired_locked(clp)) {
list_add(&clp->cl_lru, reaplist);
@@ -6404,6 +6425,9 @@ nfs4_get_client_reaplist(struct nfsd_net *nn, struct list_head *reaplist,
}
}
spin_unlock(&nn->client_lock);
+
+ list_for_each_entry(clp, &conn_reaplist, cl_conn_lru)
+ nfsd4_drop_conns(nn, clp);
}
static void
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index cde05c26afd8..fe7b5bd6460b 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -420,6 +420,7 @@ struct nfs4_client {
int cl_cb_state;
struct nfsd4_callback cl_cb_null;
struct nfsd4_session *cl_cb_session;
+ struct list_head cl_conn_lru;
/* for all client information that callback code might need: */
spinlock_t cl_lock;
--
2.39.3
prev parent reply other threads:[~2024-04-22 21:31 UTC|newest]
Thread overview: 4+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-04-22 21:31 PATCH [0/3] NFSD: drop TCP connections when NFSv4 client enters courtesy state Dai Ngo
2024-04-22 21:31 ` [PATCH 1/3] NFSD: mark cl_cb_state as NFSD4_CB_DOWN if cl_cb_client is NULL Dai Ngo
2024-04-22 21:31 ` [PATCH 2/3] NFSD: add helper to set NFSD4_CLIENT_CB_KILL to stop the callback Dai Ngo
2024-04-22 21:31 ` Dai Ngo [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1713821475-21474-4-git-send-email-dai.ngo@oracle.com \
--to=dai.ngo@oracle.com \
--cc=chuck.lever@oracle.com \
--cc=jlayton@kernel.org \
--cc=linux-nfs@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).