public inbox for linux-nfs@vger.kernel.org
 help / color / mirror / Atom feed
From: Jeff Layton <jlayton@kernel.org>
To: cel@kernel.org, NeilBrown <neil@brown.name>,
	Olga Kornievskaia	 <okorniev@redhat.com>,
	Dai Ngo <dai.ngo@oracle.com>, Tom Talpey <tom@talpey.com>
Cc: linux-nfs@vger.kernel.org, linux-rdma@vger.kernel.org,
	Chuck Lever <chuck.lever@oracle.com>
Subject: Re: [PATCH v5 07/19] NFSD: De-duplicate the svc_fill_write_vector() call sites
Date: Tue, 13 May 2025 07:02:22 -0500	[thread overview]
Message-ID: <a0f1232d29e699a40da0e41c423bcad8e99887cd.camel@kernel.org> (raw)
In-Reply-To: <20250509190354.5393-8-cel@kernel.org>

On Fri, 2025-05-09 at 15:03 -0400, cel@kernel.org wrote:
> From: Chuck Lever <chuck.lever@oracle.com>
> 
> All three call sites do the same thing.
> 
> I'm struggling with this a bit, however. struct xdr_buf is an XDR
> layer object and unmarshaling a WRITE payload is clearly a task
> intended to be done by the proc and xdr functions, not by VFS. This
> feels vaguely like a layering violation.
> 

I think it's fine.

> Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
> ---
>  fs/nfsd/nfs3proc.c         |  5 +---
>  fs/nfsd/nfs4proc.c         |  8 ++----
>  fs/nfsd/nfsproc.c          |  9 +++----
>  fs/nfsd/vfs.c              | 52 +++++++++++++++++++++++++++++---------
>  fs/nfsd/vfs.h              | 10 ++++----
>  include/linux/sunrpc/svc.h |  2 +-
>  net/sunrpc/svc.c           |  4 +--
>  7 files changed, 54 insertions(+), 36 deletions(-)
> 
> diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
> index 8902fae8c62d..12e1eef810e7 100644
> --- a/fs/nfsd/nfs3proc.c
> +++ b/fs/nfsd/nfs3proc.c
> @@ -220,7 +220,6 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
>  	struct nfsd3_writeargs *argp = rqstp->rq_argp;
>  	struct nfsd3_writeres *resp = rqstp->rq_resp;
>  	unsigned long cnt = argp->len;
> -	unsigned int nvecs;
>  
>  	dprintk("nfsd: WRITE(3)    %s %d bytes at %Lu%s\n",
>  				SVCFH_fmt(&argp->fh),
> @@ -235,10 +234,8 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
>  
>  	fh_copy(&resp->fh, &argp->fh);
>  	resp->committed = argp->stable;
> -	nvecs = svc_fill_write_vector(rqstp, &argp->payload);
> -
>  	resp->status = nfsd_write(rqstp, &resp->fh, argp->offset,
> -				  rqstp->rq_vec, nvecs, &cnt,
> +				  &argp->payload, &cnt,
>  				  resp->committed, resp->verf);
>  	resp->count = cnt;
>  	resp->status = nfsd3_map_status(resp->status);
> diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
> index 2b16ee1ae461..ffd8b1d499df 100644
> --- a/fs/nfsd/nfs4proc.c
> +++ b/fs/nfsd/nfs4proc.c
> @@ -1216,7 +1216,6 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
>  	struct nfsd_file *nf = NULL;
>  	__be32 status = nfs_ok;
>  	unsigned long cnt;
> -	int nvecs;
>  
>  	if (write->wr_offset > (u64)OFFSET_MAX ||
>  	    write->wr_offset + write->wr_buflen > (u64)OFFSET_MAX)
> @@ -1231,12 +1230,9 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
>  		return status;
>  
>  	write->wr_how_written = write->wr_stable_how;
> -
> -	nvecs = svc_fill_write_vector(rqstp, &write->wr_payload);
> -
>  	status = nfsd_vfs_write(rqstp, &cstate->current_fh, nf,
> -				write->wr_offset, rqstp->rq_vec, nvecs, &cnt,
> -				write->wr_how_written,
> +				write->wr_offset, &write->wr_payload,
> +				&cnt, write->wr_how_written,
>  				(__be32 *)write->wr_verifier.data);
>  	nfsd_file_put(nf);
>  
> diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
> index 7c573d792252..65cbe04184f3 100644
> --- a/fs/nfsd/nfsproc.c
> +++ b/fs/nfsd/nfsproc.c
> @@ -251,17 +251,14 @@ nfsd_proc_write(struct svc_rqst *rqstp)
>  	struct nfsd_writeargs *argp = rqstp->rq_argp;
>  	struct nfsd_attrstat *resp = rqstp->rq_resp;
>  	unsigned long cnt = argp->len;
> -	unsigned int nvecs;
>  
>  	dprintk("nfsd: WRITE    %s %u bytes at %d\n",
>  		SVCFH_fmt(&argp->fh),
>  		argp->len, argp->offset);
>  
> -	nvecs = svc_fill_write_vector(rqstp, &argp->payload);
> -
> -	resp->status = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh),
> -				  argp->offset, rqstp->rq_vec, nvecs,
> -				  &cnt, NFS_DATA_SYNC, NULL);
> +	fh_copy(&resp->fh, &argp->fh);
> +	resp->status = nfsd_write(rqstp, &resp->fh, argp->offset,
> +				  &argp->payload, &cnt, NFS_DATA_SYNC, NULL);
>  	if (resp->status == nfs_ok)
>  		resp->status = fh_getattr(&resp->fh, &resp->stat);
>  	else if (resp->status == nfserr_jukebox)
> diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
> index 7cfd26dec5a8..43ecc5ae0c3f 100644
> --- a/fs/nfsd/vfs.c
> +++ b/fs/nfsd/vfs.c
> @@ -1143,11 +1143,27 @@ static int wait_for_concurrent_writes(struct file *file)
>  	return err;
>  }
>  
> +/**
> + * nfsd_vfs_write - write data to an already-open file
> + * @rqstp: RPC execution context
> + * @fhp: File handle of file to write into
> + * @nf: An open file matching @fhp
> + * @offset: Byte offset of start
> + * @payload: xdr_buf containing the write payload
> + * @cnt: IN: number of bytes to write, OUT: number of bytes actually written
> + * @stable: An NFS stable_how value
> + * @verf: NFS WRITE verifier
> + *
> + * Upon return, caller must invoke fh_put on @fhp.
> + *
> + * Return values:
> + *   An nfsstat value in network byte order.
> + */
>  __be32
> -nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
> -				loff_t offset, struct kvec *vec, int vlen,
> -				unsigned long *cnt, int stable,
> -				__be32 *verf)
> +nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp,
> +	       struct nfsd_file *nf, loff_t offset,
> +	       const struct xdr_buf *payload, unsigned long *cnt,
> +	       int stable, __be32 *verf)
>  {
>  	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
>  	struct file		*file = nf->nf_file;
> @@ -1162,6 +1178,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
>  	unsigned int		pflags = current->flags;
>  	rwf_t			flags = 0;
>  	bool			restore_flags = false;
> +	unsigned int		nvecs;
>  
>  	trace_nfsd_write_opened(rqstp, fhp, offset, *cnt);
>  
> @@ -1189,7 +1206,8 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
>  	if (stable && !fhp->fh_use_wgather)
>  		flags |= RWF_SYNC;
>  
> -	iov_iter_kvec(&iter, ITER_SOURCE, vec, vlen, *cnt);
> +	nvecs = svc_fill_write_vector(rqstp, payload);
> +	iov_iter_kvec(&iter, ITER_SOURCE, rqstp->rq_vec, nvecs, *cnt);
>  	since = READ_ONCE(file->f_wb_err);
>  	if (verf)
>  		nfsd_copy_write_verifier(verf, nn);
> @@ -1289,14 +1307,24 @@ __be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
>  	return err;
>  }
>  
> -/*
> - * Write data to a file.
> - * The stable flag requests synchronous writes.
> - * N.B. After this call fhp needs an fh_put
> +/**
> + * nfsd_write - open a file and write data to it
> + * @rqstp: RPC execution context
> + * @fhp: File handle of file to write into; nfsd_write() may modify it
> + * @offset: Byte offset of start
> + * @payload: xdr_buf containing the write payload
> + * @cnt: IN: number of bytes to write, OUT: number of bytes actually written
> + * @stable: An NFS stable_how value
> + * @verf: NFS WRITE verifier
> + *
> + * Upon return, caller must invoke fh_put on @fhp.
> + *
> + * Return values:
> + *   An nfsstat value in network byte order.
>   */
>  __be32
>  nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
> -	   struct kvec *vec, int vlen, unsigned long *cnt, int stable,
> +	   const struct xdr_buf *payload, unsigned long *cnt, int stable,
>  	   __be32 *verf)
>  {
>  	struct nfsd_file *nf;
> @@ -1308,8 +1336,8 @@ nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t offset,
>  	if (err)
>  		goto out;
>  
> -	err = nfsd_vfs_write(rqstp, fhp, nf, offset, vec,
> -			vlen, cnt, stable, verf);
> +	err = nfsd_vfs_write(rqstp, fhp, nf, offset, payload, cnt,
> +			     stable, verf);
>  	nfsd_file_put(nf);
>  out:
>  	trace_nfsd_write_done(rqstp, fhp, offset, *cnt);
> diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
> index f9b09b842856..eff04959606f 100644
> --- a/fs/nfsd/vfs.h
> +++ b/fs/nfsd/vfs.h
> @@ -128,13 +128,13 @@ bool		nfsd_read_splice_ok(struct svc_rqst *rqstp);
>  __be32		nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
>  				loff_t offset, unsigned long *count,
>  				u32 *eof);
> -__be32 		nfsd_write(struct svc_rqst *, struct svc_fh *, loff_t,
> -				struct kvec *, int, unsigned long *,
> -				int stable, __be32 *verf);
> +__be32		nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp,
> +				loff_t offset, const struct xdr_buf *payload,
> +				unsigned long *cnt, int stable, __be32 *verf);
>  __be32		nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp,
>  				struct nfsd_file *nf, loff_t offset,
> -				struct kvec *vec, int vlen, unsigned long *cnt,
> -				int stable, __be32 *verf);
> +				const struct xdr_buf *payload,
> +				unsigned long *cnt, int stable, __be32 *verf);
>  __be32		nfsd_readlink(struct svc_rqst *, struct svc_fh *,
>  				char *, int *);
>  __be32		nfsd_symlink(struct svc_rqst *, struct svc_fh *,
> diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
> index 538bea716c6b..dec636345ee2 100644
> --- a/include/linux/sunrpc/svc.h
> +++ b/include/linux/sunrpc/svc.h
> @@ -471,7 +471,7 @@ int		   svc_encode_result_payload(struct svc_rqst *rqstp,
>  					     unsigned int offset,
>  					     unsigned int length);
>  unsigned int	   svc_fill_write_vector(struct svc_rqst *rqstp,
> -					 struct xdr_buf *payload);
> +					 const struct xdr_buf *payload);
>  char		  *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
>  					     struct kvec *first, void *p,
>  					     size_t total);
> diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
> index 2c1c4aa93f43..a8861a80bc04 100644
> --- a/net/sunrpc/svc.c
> +++ b/net/sunrpc/svc.c
> @@ -1727,10 +1727,10 @@ EXPORT_SYMBOL_GPL(svc_encode_result_payload);
>   * Fills in rqstp::rq_vec, and returns the number of elements.
>   */
>  unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
> -				   struct xdr_buf *payload)
> +				   const struct xdr_buf *payload)
>  {
> +	const struct kvec *first = payload->head;
>  	struct page **pages = payload->pages;
> -	struct kvec *first = payload->head;
>  	struct kvec *vec = rqstp->rq_vec;
>  	size_t total = payload->len;
>  	unsigned int i;

Reviewed-by: Jeff Layton <jlayton@kernel.org>

  reply	other threads:[~2025-05-13 12:02 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-05-09 19:03 [PATCH v5 00/19] Allocate payload arrays dynamically cel
2025-05-09 19:03 ` [PATCH v5 01/19] svcrdma: Reduce the number of rdma_rw contexts per-QP cel
2025-05-13 12:00   ` Jeff Layton
2025-05-09 19:03 ` [PATCH v5 02/19] sunrpc: Add a helper to derive maxpages from sv_max_mesg cel
2025-05-09 19:03 ` [PATCH v5 03/19] sunrpc: Remove backchannel check in svc_init_buffer() cel
2025-05-09 19:03 ` [PATCH v5 04/19] sunrpc: Replace the rq_pages array with dynamically-allocated memory cel
2025-05-09 19:03 ` [PATCH v5 05/19] sunrpc: Replace the rq_bvec " cel
2025-05-09 19:03 ` [PATCH v5 06/19] NFSD: Use rqstp->rq_bvec in nfsd_iter_read() cel
2025-05-09 19:03 ` [PATCH v5 07/19] NFSD: De-duplicate the svc_fill_write_vector() call sites cel
2025-05-13 12:02   ` Jeff Layton [this message]
2025-05-09 19:03 ` [PATCH v5 08/19] SUNRPC: Export xdr_buf_to_bvec() cel
2025-05-09 19:03 ` [PATCH v5 09/19] NFSD: Use rqstp->rq_bvec in nfsd_iter_write() cel
2025-05-09 19:03 ` [PATCH v5 10/19] SUNRPC: Remove svc_fill_write_vector() cel
2025-05-09 19:03 ` [PATCH v5 11/19] SUNRPC: Remove svc_rqst :: rq_vec cel
2025-05-09 19:03 ` [PATCH v5 12/19] sunrpc: Adjust size of socket's receive page array dynamically cel
2025-05-09 19:03 ` [PATCH v5 13/19] svcrdma: Adjust the number of entries in svc_rdma_recv_ctxt::rc_pages cel
2025-05-09 19:03 ` [PATCH v5 14/19] svcrdma: Adjust the number of entries in svc_rdma_send_ctxt::sc_pages cel
2025-05-09 19:03 ` [PATCH v5 15/19] sunrpc: Remove the RPCSVC_MAXPAGES macro cel
2025-05-09 19:03 ` [PATCH v5 16/19] NFSD: Remove NFSD_BUFSIZE cel
2025-05-09 19:03 ` [PATCH v5 17/19] NFSD: Remove NFSSVC_MAXBLKSIZE_V2 macro cel
2025-05-09 19:03 ` [PATCH v5 18/19] NFSD: Add a "default" block size cel
2025-05-09 19:03 ` [PATCH v5 19/19] SUNRPC: Bump the maximum payload size for the server cel
2025-05-12 16:44   ` Aurélien Couderc
2025-05-12 18:09     ` Chuck Lever
2025-05-13  8:42       ` Aurélien Couderc
2025-05-13 12:08         ` Chuck Lever
2025-05-14  0:11         ` NeilBrown
2025-05-13 12:05 ` [PATCH v5 00/19] Allocate payload arrays dynamically Jeff Layton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a0f1232d29e699a40da0e41c423bcad8e99887cd.camel@kernel.org \
    --to=jlayton@kernel.org \
    --cc=cel@kernel.org \
    --cc=chuck.lever@oracle.com \
    --cc=dai.ngo@oracle.com \
    --cc=linux-nfs@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=neil@brown.name \
    --cc=okorniev@redhat.com \
    --cc=tom@talpey.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox