From: Chuck Lever <cel@kernel.org>
To: NeilBrown <neilb@ownmail.net>, Jeff Layton <jlayton@kernel.org>,
Olga Kornievskaia <okorniev@redhat.com>,
Dai Ngo <dai.ngo@oracle.com>, Tom Talpey <tom@talpey.com>
Cc: <linux-nfs@vger.kernel.org>, Chuck Lever <chuck.lever@oracle.com>
Subject: [PATCH v4 02/14] lockd: Relocate and rename nlm_drop_reply
Date: Wed, 28 Jan 2026 10:19:23 -0500 [thread overview]
Message-ID: <20260128151935.1646063-3-cel@kernel.org> (raw)
In-Reply-To: <20260128151935.1646063-1-cel@kernel.org>
From: Chuck Lever <chuck.lever@oracle.com>
The nlm_drop_reply status code is internal to the kernel's lockd
implementation and must never appear on the wire. Its previous
location in xdr.h grouped it with legitimate NLM protocol status
codes, obscuring this critical distinction.
Relocate the definition to lockd.h with a comment block for internal
status codes, and rename to nlm__int__drop_reply to make its
internal-only nature explicit. This prepares for adding additional
internal status codes in subsequent patches.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
fs/lockd/svc4proc.c | 22 ++++++++++++++--------
fs/lockd/svclock.c | 4 ++--
fs/lockd/svcproc.c | 24 +++++++++++++++---------
fs/nfsd/lockd.c | 2 +-
include/linux/lockd/lockd.h | 6 ++++++
include/linux/lockd/xdr.h | 2 --
6 files changed, 38 insertions(+), 22 deletions(-)
diff --git a/fs/lockd/svc4proc.c b/fs/lockd/svc4proc.c
index 4b6f18d97734..9c756d07223a 100644
--- a/fs/lockd/svc4proc.c
+++ b/fs/lockd/svc4proc.c
@@ -104,12 +104,13 @@ __nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
- return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ return resp->status == nlm__int__drop_reply ?
+ rpc_drop_reply : rpc_success;
/* Now check for conflicting locks */
resp->status = nlmsvc_testlock(rqstp, file, host, &argp->lock,
&resp->lock);
- if (resp->status == nlm_drop_reply)
+ if (resp->status == nlm__int__drop_reply)
rc = rpc_drop_reply;
else
dprintk("lockd: TEST4 status %d\n", ntohl(resp->status));
@@ -140,13 +141,14 @@ __nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_res *resp)
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
- return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ return resp->status == nlm__int__drop_reply ?
+ rpc_drop_reply : rpc_success;
/* Now try to lock the file */
resp->status = nlmsvc_lock(rqstp, file, host, &argp->lock,
argp->block, &argp->cookie,
argp->reclaim);
- if (resp->status == nlm_drop_reply)
+ if (resp->status == nlm__int__drop_reply)
rc = rpc_drop_reply;
else
dprintk("lockd: LOCK status %d\n", ntohl(resp->status));
@@ -182,7 +184,8 @@ __nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_res *resp)
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
- return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ return resp->status == nlm__int__drop_reply ?
+ rpc_drop_reply : rpc_success;
/* Try to cancel request. */
resp->status = nlmsvc_cancel_blocked(SVC_NET(rqstp), file, &argp->lock);
@@ -222,7 +225,8 @@ __nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_res *resp)
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
- return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ return resp->status == nlm__int__drop_reply ?
+ rpc_drop_reply : rpc_success;
/* Now try to remove the lock */
resp->status = nlmsvc_unlock(SVC_NET(rqstp), file, &argp->lock);
@@ -369,7 +373,8 @@ nlm4svc_proc_share(struct svc_rqst *rqstp)
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
- return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ return resp->status == nlm__int__drop_reply ?
+ rpc_drop_reply : rpc_success;
/* Now try to create the share */
resp->status = nlmsvc_share_file(host, file, argp);
@@ -404,7 +409,8 @@ nlm4svc_proc_unshare(struct svc_rqst *rqstp)
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
- return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ return resp->status == nlm__int__drop_reply ?
+ rpc_drop_reply : rpc_success;
/* Now try to lock the file */
resp->status = nlmsvc_unshare_file(host, file, argp);
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index 712df1e025d8..83b6dd243bcd 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -463,7 +463,7 @@ nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block)
block->b_deferred_req =
rqstp->rq_chandle.defer(block->b_cache_req);
if (block->b_deferred_req != NULL)
- status = nlm_drop_reply;
+ status = nlm__int__drop_reply;
}
dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n",
block, block->b_flags, ntohl(status));
@@ -531,7 +531,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
ret = nlm_lck_denied;
goto out;
}
- ret = nlm_drop_reply;
+ ret = nlm__int__drop_reply;
goto out;
}
diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c
index 95c6bf7ab757..2a2e48a9bd12 100644
--- a/fs/lockd/svcproc.c
+++ b/fs/lockd/svcproc.c
@@ -25,7 +25,7 @@ static inline __be32 cast_status(__be32 status)
case nlm_lck_denied_nolocks:
case nlm_lck_blocked:
case nlm_lck_denied_grace_period:
- case nlm_drop_reply:
+ case nlm__int__drop_reply:
break;
case nlm4_deadlock:
status = nlm_lck_denied;
@@ -122,12 +122,13 @@ __nlmsvc_proc_test(struct svc_rqst *rqstp, struct nlm_res *resp)
/* Obtain client and file */
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
- return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ return resp->status == nlm__int__drop_reply ?
+ rpc_drop_reply : rpc_success;
/* Now check for conflicting locks */
resp->status = cast_status(nlmsvc_testlock(rqstp, file, host,
&argp->lock, &resp->lock));
- if (resp->status == nlm_drop_reply)
+ if (resp->status == nlm__int__drop_reply)
rc = rpc_drop_reply;
else
dprintk("lockd: TEST status %d vers %d\n",
@@ -159,13 +160,14 @@ __nlmsvc_proc_lock(struct svc_rqst *rqstp, struct nlm_res *resp)
/* Obtain client and file */
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
- return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ return resp->status == nlm__int__drop_reply ?
+ rpc_drop_reply : rpc_success;
/* Now try to lock the file */
resp->status = cast_status(nlmsvc_lock(rqstp, file, host, &argp->lock,
argp->block, &argp->cookie,
argp->reclaim));
- if (resp->status == nlm_drop_reply)
+ if (resp->status == nlm__int__drop_reply)
rc = rpc_drop_reply;
else
dprintk("lockd: LOCK status %d\n", ntohl(resp->status));
@@ -202,7 +204,8 @@ __nlmsvc_proc_cancel(struct svc_rqst *rqstp, struct nlm_res *resp)
/* Obtain client and file */
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
- return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ return resp->status == nlm__int__drop_reply ?
+ rpc_drop_reply : rpc_success;
/* Try to cancel request. */
resp->status = cast_status(nlmsvc_cancel_blocked(net, file, &argp->lock));
@@ -243,7 +246,8 @@ __nlmsvc_proc_unlock(struct svc_rqst *rqstp, struct nlm_res *resp)
/* Obtain client and file */
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
- return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ return resp->status == nlm__int__drop_reply ?
+ rpc_drop_reply : rpc_success;
/* Now try to remove the lock */
resp->status = cast_status(nlmsvc_unlock(net, file, &argp->lock));
@@ -400,7 +404,8 @@ nlmsvc_proc_share(struct svc_rqst *rqstp)
/* Obtain client and file */
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
- return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ return resp->status == nlm__int__drop_reply ?
+ rpc_drop_reply : rpc_success;
/* Now try to create the share */
resp->status = cast_status(nlmsvc_share_file(host, file, argp));
@@ -435,7 +440,8 @@ nlmsvc_proc_unshare(struct svc_rqst *rqstp)
/* Obtain client and file */
if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
- return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+ return resp->status == nlm__int__drop_reply ?
+ rpc_drop_reply : rpc_success;
/* Now try to unshare the file */
resp->status = cast_status(nlmsvc_unshare_file(host, file, argp));
diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
index c774ce9aa296..8c230ccd6645 100644
--- a/fs/nfsd/lockd.c
+++ b/fs/nfsd/lockd.c
@@ -71,7 +71,7 @@ nlm_fopen(struct svc_rqst *rqstp, struct nfs_fh *f, struct file **filp,
* to callback when the delegation is returned but might
* not have a proper lock request to block on.
*/
- return nlm_drop_reply;
+ return nlm__int__drop_reply;
case nfserr_stale:
return nlm_stale_fh;
default:
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 330e38776bb2..fdefec39553f 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -38,6 +38,12 @@
*/
#define LOCKD_DFLT_TIMEO 10
+/*
+ * Internal-use status codes, not to be placed on the wire.
+ * Version handlers translate these to appropriate wire values.
+ */
+#define nlm__int__drop_reply cpu_to_be32(30000)
+
/*
* Lockd host handle (used both by the client and server personality).
*/
diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h
index 17d53165d9f2..292e4e38d17d 100644
--- a/include/linux/lockd/xdr.h
+++ b/include/linux/lockd/xdr.h
@@ -33,8 +33,6 @@ struct svc_rqst;
#define nlm_lck_blocked cpu_to_be32(NLM_LCK_BLOCKED)
#define nlm_lck_denied_grace_period cpu_to_be32(NLM_LCK_DENIED_GRACE_PERIOD)
-#define nlm_drop_reply cpu_to_be32(30000)
-
/* Lock info passed via NLM */
struct nlm_lock {
char * caller;
--
2.52.0
next prev parent reply other threads:[~2026-01-28 15:19 UTC|newest]
Thread overview: 16+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-01-28 15:19 [PATCH v4 00/14] Subject: Clarify module API boundaries Chuck Lever
2026-01-28 15:19 ` [PATCH v4 01/14] lockd: Simplify cast_status() in svcproc.c Chuck Lever
2026-01-28 15:19 ` Chuck Lever [this message]
2026-01-28 15:19 ` [PATCH v4 03/14] lockd: Introduce nlm__int__deadlock Chuck Lever
2026-01-28 15:19 ` [PATCH v4 04/14] lockd: Have nlm_fopen() return errno values Chuck Lever
2026-01-28 15:19 ` [PATCH v4 05/14] lockd: Relocate nlmsvc_unlock API declarations Chuck Lever
2026-01-28 15:19 ` [PATCH v4 06/14] NFS: Use nlmclnt_shutdown_rpc_clnt() to safely shut down NLM Chuck Lever
2026-01-28 15:19 ` [PATCH v4 07/14] lockd: Move xdr4.h from include/linux/lockd/ to fs/lockd/ Chuck Lever
2026-01-28 15:19 ` [PATCH v4 08/14] lockd: Move share.h " Chuck Lever
2026-01-28 15:19 ` [PATCH v4 09/14] lockd: Relocate include/linux/lockd/lockd.h Chuck Lever
2026-01-28 15:19 ` [PATCH v4 10/14] lockd: Remove lockd/debug.h Chuck Lever
2026-01-28 15:19 ` [PATCH v4 11/14] lockd: Move xdr.h from include/linux/lockd/ to fs/lockd/ Chuck Lever
2026-01-29 7:06 ` kernel test robot
2026-01-28 15:19 ` [PATCH v4 12/14] lockd: Make linux/lockd/nlm.h an internal header Chuck Lever
2026-01-28 15:19 ` [PATCH v4 13/14] lockd: Move nlm4svc_set_file_lock_range() Chuck Lever
2026-01-28 15:19 ` [PATCH v4 14/14] lockd: Relocate svc_version definitions to XDR layer Chuck Lever
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260128151935.1646063-3-cel@kernel.org \
--to=cel@kernel.org \
--cc=chuck.lever@oracle.com \
--cc=dai.ngo@oracle.com \
--cc=jlayton@kernel.org \
--cc=linux-nfs@vger.kernel.org \
--cc=neilb@ownmail.net \
--cc=okorniev@redhat.com \
--cc=tom@talpey.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox