linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Jeff Layton <jlayton@kernel.org>
To: David Howells <dhowells@redhat.com>, Steve French <smfrench@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>,
	Marc Dionne <marc.dionne@auristor.com>,
	 Paulo Alcantara <pc@manguebit.com>,
	Shyam Prasad N <sprasad@microsoft.com>,
	Tom Talpey <tom@talpey.com>,
	Dominique Martinet <asmadeus@codewreck.org>,
	Eric Van Hensbergen <ericvh@kernel.org>,
	Ilya Dryomov <idryomov@gmail.com>,
	Christian Brauner <christian@brauner.io>,
	linux-cachefs@redhat.com, linux-afs@lists.infradead.org,
	 linux-cifs@vger.kernel.org, linux-nfs@vger.kernel.org,
	 ceph-devel@vger.kernel.org, v9fs@lists.linux.dev,
	linux-fsdevel@vger.kernel.org,  linux-mm@kvack.org,
	netdev@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH v4 10/39] netfs: Provide invalidate_folio and release_folio calls
Date: Wed, 13 Dec 2023 11:05:54 -0500	[thread overview]
Message-ID: <987d3f0ac5cafc9706f5d532e60f9cc0379b3153.camel@kernel.org> (raw)
In-Reply-To: <20231213152350.431591-11-dhowells@redhat.com>

On Wed, 2023-12-13 at 15:23 +0000, David Howells wrote:
> Provide default invalidate_folio and release_folio calls.  These will need
> to interact with invalidation correctly at some point.  They will be needed
> if netfslib is to make use of folio->private for its own purposes.
> 
> Signed-off-by: David Howells <dhowells@redhat.com>
> Reviewed-by: Jeff Layton <jlayton@kernel.org>
> cc: linux-cachefs@redhat.com
> cc: linux-fsdevel@vger.kernel.org
> cc: linux-mm@kvack.org
> ---
>  fs/9p/vfs_addr.c      | 33 ++-------------------------
>  fs/afs/file.c         | 53 ++++---------------------------------------
>  fs/ceph/addr.c        | 24 ++------------------
>  fs/netfs/misc.c       | 42 ++++++++++++++++++++++++++++++++++
>  include/linux/netfs.h |  6 +++--
>  5 files changed, 54 insertions(+), 104 deletions(-)
> 
> diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
> index 131b83c31f85..055b672a247d 100644
> --- a/fs/9p/vfs_addr.c
> +++ b/fs/9p/vfs_addr.c
> @@ -88,35 +88,6 @@ const struct netfs_request_ops v9fs_req_ops = {
>  	.issue_read		= v9fs_issue_read,
>  };
>  
> -/**
> - * v9fs_release_folio - release the private state associated with a folio
> - * @folio: The folio to be released
> - * @gfp: The caller's allocation restrictions
> - *
> - * Returns true if the page can be released, false otherwise.
> - */
> -
> -static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
> -{
> -	if (folio_test_private(folio))
> -		return false;
> -#ifdef CONFIG_9P_FSCACHE
> -	if (folio_test_fscache(folio)) {
> -		if (current_is_kswapd() || !(gfp & __GFP_FS))
> -			return false;
> -		folio_wait_fscache(folio);
> -	}
> -	fscache_note_page_release(v9fs_inode_cookie(V9FS_I(folio_inode(folio))));
> -#endif
> -	return true;
> -}
> -
> -static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
> -				 size_t length)
> -{
> -	folio_wait_fscache(folio);
> -}
> -
>  #ifdef CONFIG_9P_FSCACHE
>  static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
>  				     bool was_async)
> @@ -324,8 +295,8 @@ const struct address_space_operations v9fs_addr_operations = {
>  	.writepage	= v9fs_vfs_writepage,
>  	.write_begin	= v9fs_write_begin,
>  	.write_end	= v9fs_write_end,
> -	.release_folio	= v9fs_release_folio,
> -	.invalidate_folio = v9fs_invalidate_folio,
> +	.release_folio	= netfs_release_folio,
> +	.invalidate_folio = netfs_invalidate_folio,
>  	.launder_folio	= v9fs_launder_folio,
>  	.direct_IO	= v9fs_direct_IO,
>  };
> diff --git a/fs/afs/file.c b/fs/afs/file.c
> index 5e2bca3b02fd..c5013ec3c1dc 100644
> --- a/fs/afs/file.c
> +++ b/fs/afs/file.c
> @@ -20,9 +20,6 @@
>  
>  static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
>  static int afs_symlink_read_folio(struct file *file, struct folio *folio);
> -static void afs_invalidate_folio(struct folio *folio, size_t offset,
> -			       size_t length);
> -static bool afs_release_folio(struct folio *folio, gfp_t gfp_flags);
>  
>  static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
>  static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
> @@ -57,8 +54,8 @@ const struct address_space_operations afs_file_aops = {
>  	.readahead	= netfs_readahead,
>  	.dirty_folio	= netfs_dirty_folio,
>  	.launder_folio	= afs_launder_folio,
> -	.release_folio	= afs_release_folio,
> -	.invalidate_folio = afs_invalidate_folio,
> +	.release_folio	= netfs_release_folio,
> +	.invalidate_folio = netfs_invalidate_folio,
>  	.write_begin	= afs_write_begin,
>  	.write_end	= afs_write_end,
>  	.writepages	= afs_writepages,
> @@ -67,8 +64,8 @@ const struct address_space_operations afs_file_aops = {
>  
>  const struct address_space_operations afs_symlink_aops = {
>  	.read_folio	= afs_symlink_read_folio,
> -	.release_folio	= afs_release_folio,
> -	.invalidate_folio = afs_invalidate_folio,
> +	.release_folio	= netfs_release_folio,
> +	.invalidate_folio = netfs_invalidate_folio,
>  	.migrate_folio	= filemap_migrate_folio,
>  };
>  
> @@ -383,48 +380,6 @@ const struct netfs_request_ops afs_req_ops = {
>  	.issue_read		= afs_issue_read,
>  };
>  
> -/*
> - * invalidate part or all of a page
> - * - release a page and clean up its private data if offset is 0 (indicating
> - *   the entire page)
> - */
> -static void afs_invalidate_folio(struct folio *folio, size_t offset,
> -			       size_t length)
> -{
> -	_enter("{%lu},%zu,%zu", folio->index, offset, length);
> -
> -	folio_wait_fscache(folio);
> -	_leave("");
> -}
> -
> -/*
> - * release a page and clean up its private state if it's not busy
> - * - return true if the page can now be released, false if not
> - */
> -static bool afs_release_folio(struct folio *folio, gfp_t gfp)
> -{
> -	struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
> -
> -	_enter("{{%llx:%llu}[%lu],%lx},%x",
> -	       vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags,
> -	       gfp);
> -
> -	/* deny if folio is being written to the cache and the caller hasn't
> -	 * elected to wait */
> -#ifdef CONFIG_AFS_FSCACHE
> -	if (folio_test_fscache(folio)) {
> -		if (current_is_kswapd() || !(gfp & __GFP_FS))
> -			return false;
> -		folio_wait_fscache(folio);
> -	}
> -	fscache_note_page_release(afs_vnode_cache(vnode));
> -#endif
> -
> -	/* Indicate that the folio can be released */
> -	_leave(" = T");
> -	return true;
> -}
> -
>  static void afs_add_open_mmap(struct afs_vnode *vnode)
>  {
>  	if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
> diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
> index 654f408a0aca..500a87b68a9a 100644
> --- a/fs/ceph/addr.c
> +++ b/fs/ceph/addr.c
> @@ -159,27 +159,7 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset,
>  		ceph_put_snap_context(snapc);
>  	}
>  
> -	folio_wait_fscache(folio);
> -}
> -
> -static bool ceph_release_folio(struct folio *folio, gfp_t gfp)
> -{
> -	struct inode *inode = folio->mapping->host;
> -	struct ceph_client *cl = ceph_inode_to_client(inode);
> -
> -	doutc(cl, "%llx.%llx idx %lu (%sdirty)\n", ceph_vinop(inode),
> -	      folio->index, folio_test_dirty(folio) ? "" : "not ");
> -
> -	if (folio_test_private(folio))
> -		return false;
> -
> -	if (folio_test_fscache(folio)) {
> -		if (current_is_kswapd() || !(gfp & __GFP_FS))
> -			return false;
> -		folio_wait_fscache(folio);
> -	}
> -	ceph_fscache_note_page_release(inode);

I think this is the only call to ceph_fscache_note_page_release, so that
can likely be removed as well.

> -	return true;
> +	netfs_invalidate_folio(folio, offset, length);
>  }
>  
>  static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
> @@ -1585,7 +1565,7 @@ const struct address_space_operations ceph_aops = {
>  	.write_end = ceph_write_end,
>  	.dirty_folio = ceph_dirty_folio,
>  	.invalidate_folio = ceph_invalidate_folio,
> -	.release_folio = ceph_release_folio,
> +	.release_folio = netfs_release_folio,
>  	.direct_IO = noop_direct_IO,
>  };
>  
> diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
> index 68baf55c47a4..d946d85764de 100644
> --- a/fs/netfs/misc.c
> +++ b/fs/netfs/misc.c
> @@ -84,3 +84,45 @@ void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
>  	}
>  }
>  EXPORT_SYMBOL(netfs_clear_inode_writeback);
> +
> +/*
> + * netfs_invalidate_folio - Invalidate or partially invalidate a folio
> + * @folio: Folio proposed for release
> + * @offset: Offset of the invalidated region
> + * @length: Length of the invalidated region
> + *
> + * Invalidate part or all of a folio for a network filesystem.  The folio will
> + * be removed afterwards if the invalidated region covers the entire folio.
> + */
> +void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
> +{
> +	_enter("{%lx},%zx,%zx", folio_index(folio), offset, length);
> +
> +	folio_wait_fscache(folio);
> +}
> +EXPORT_SYMBOL(netfs_invalidate_folio);
> +
> +/**
> + * netfs_release_folio - Try to release a folio
> + * @folio: Folio proposed for release
> + * @gfp: Flags qualifying the release
> + *
> + * Request release of a folio and clean up its private state if it's not busy.
> + * Returns true if the folio can now be released, false if not
> + */
> +bool netfs_release_folio(struct folio *folio, gfp_t gfp)
> +{
> +	struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
> +
> +	if (folio_test_private(folio))
> +		return false;
> +	if (folio_test_fscache(folio)) {
> +		if (current_is_kswapd() || !(gfp & __GFP_FS))
> +			return false;
> +		folio_wait_fscache(folio);
> +	}
> +
> +	fscache_note_page_release(netfs_i_cookie(ctx));
> +	return true;
> +}
> +EXPORT_SYMBOL(netfs_release_folio);
> diff --git a/include/linux/netfs.h b/include/linux/netfs.h
> index 06f57d9d09f6..8efbfd3b2820 100644
> --- a/include/linux/netfs.h
> +++ b/include/linux/netfs.h
> @@ -293,11 +293,13 @@ struct readahead_control;
>  void netfs_readahead(struct readahead_control *);
>  int netfs_read_folio(struct file *, struct folio *);
>  int netfs_write_begin(struct netfs_inode *, struct file *,
> -		struct address_space *, loff_t pos, unsigned int len,
> -		struct folio **, void **fsdata);
> +		      struct address_space *, loff_t pos, unsigned int len,
> +		      struct folio **, void **fsdata);
>  bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio);
>  int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
>  void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
> +void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
> +bool netfs_release_folio(struct folio *folio, gfp_t gfp);
>  
>  void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
>  void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
> 

-- 
Jeff Layton <jlayton@kernel.org>

  reply	other threads:[~2023-12-13 16:05 UTC|newest]

Thread overview: 64+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-13 15:23 [PATCH v4 00/39] netfs, afs, 9p: Delegate high-level I/O to netfslib David Howells
2023-12-13 15:23 ` [PATCH v4 01/39] netfs, fscache: Move fs/fscache/* into fs/netfs/ David Howells
2023-12-13 15:23 ` [PATCH v4 02/39] netfs, fscache: Combine fscache with netfs David Howells
2023-12-13 15:23 ` [PATCH v4 03/39] netfs, fscache: Remove ->begin_cache_operation David Howells
2023-12-13 15:23 ` [PATCH v4 04/39] netfs, fscache: Move /proc/fs/fscache to /proc/fs/netfs and put in a symlink David Howells
2023-12-13 15:23 ` [PATCH v4 05/39] netfs: Move pinning-for-writeback from fscache to netfs David Howells
2023-12-13 15:23 ` [PATCH v4 06/39] netfs: Add a procfile to list in-progress requests David Howells
2023-12-13 15:59   ` Jeff Layton
2023-12-13 15:23 ` [PATCH v4 07/39] netfs: Allow the netfs to make the io (sub)request alloc larger David Howells
2023-12-13 15:23 ` [PATCH v4 08/39] netfs: Add a ->free_subrequest() op David Howells
2023-12-13 15:23 ` [PATCH v4 09/39] afs: Don't use folio->private to record partial modification David Howells
2023-12-13 15:23 ` [PATCH v4 10/39] netfs: Provide invalidate_folio and release_folio calls David Howells
2023-12-13 16:05   ` Jeff Layton [this message]
2023-12-13 15:23 ` [PATCH v4 11/39] netfs: Implement unbuffered/DIO vs buffered I/O locking David Howells
2023-12-13 16:08   ` Jeff Layton
2023-12-13 16:30     ` Jeff Layton
2023-12-13 15:23 ` [PATCH v4 12/39] netfs: Add iov_iters to (sub)requests to describe various buffers David Howells
2023-12-13 16:37   ` Jeff Layton
2023-12-19 14:31   ` David Howells
2023-12-19 14:40   ` David Howells
2023-12-13 15:23 ` [PATCH v4 13/39] netfs: Add support for DIO buffering David Howells
2023-12-13 15:23 ` [PATCH v4 14/39] netfs: Provide tools to create a buffer in an xarray David Howells
2023-12-13 15:23 ` [PATCH v4 15/39] netfs: Add bounce buffering support David Howells
2023-12-13 15:23 ` [PATCH v4 16/39] netfs: Add func to calculate pagecount/size-limited span of an iterator David Howells
2023-12-13 15:23 ` [PATCH v4 17/39] netfs: Limit subrequest by size or number of segments David Howells
2023-12-13 15:23 ` [PATCH v4 18/39] netfs: Export netfs_put_subrequest() and some tracepoints David Howells
2023-12-13 18:01   ` Jeff Layton
2023-12-19 14:42   ` David Howells
2023-12-19 14:48   ` David Howells
2023-12-13 15:23 ` [PATCH v4 19/39] netfs: Extend the netfs_io_*request structs to handle writes David Howells
2023-12-13 15:23 ` [PATCH v4 20/39] netfs: Add a hook to allow tell the netfs to update its i_size David Howells
2023-12-13 15:23 ` [PATCH v4 21/39] netfs: Make netfs_put_request() handle a NULL pointer David Howells
2023-12-13 15:23 ` [PATCH v4 22/39] netfs: Make the refcounting of netfs_begin_read() easier to use David Howells
2023-12-13 15:23 ` [PATCH v4 23/39] netfs: Prep to use folio->private for write grouping and streaming write David Howells
2023-12-13 15:23 ` [PATCH v4 24/39] netfs: Dispatch write requests to process a writeback slice David Howells
2023-12-13 15:23 ` [PATCH v4 25/39] netfs: Provide func to copy data to pagecache for buffered write David Howells
2023-12-13 15:23 ` [PATCH v4 26/39] netfs: Make netfs_read_folio() handle streaming-write pages David Howells
2023-12-13 15:23 ` [PATCH v4 27/39] netfs: Allocate multipage folios in the writepath David Howells
2023-12-13 15:23 ` [PATCH v4 28/39] netfs: Implement support for unbuffered/DIO read David Howells
2023-12-14 12:43   ` Jeff Layton
2023-12-19 15:46   ` David Howells
2023-12-13 15:23 ` [PATCH v4 29/39] netfs: Implement unbuffered/DIO write support David Howells
2023-12-13 15:23 ` [PATCH v4 30/39] netfs: Implement buffered write API David Howells
2023-12-13 15:23 ` [PATCH v4 31/39] netfs: Allow buffered shared-writeable mmap through netfs_page_mkwrite() David Howells
2023-12-13 15:23 ` [PATCH v4 32/39] netfs: Provide netfs_file_read_iter() David Howells
2023-12-13 15:23 ` [PATCH v4 33/39] netfs, cachefiles: Pass upper bound length to allow expansion David Howells
2023-12-13 15:23 ` [PATCH v4 34/39] netfs: Provide a writepages implementation David Howells
2023-12-13 15:23 ` [PATCH v4 35/39] netfs: Provide a launder_folio implementation David Howells
2023-12-13 15:23 ` [PATCH v4 36/39] netfs: Implement a write-through caching option David Howells
2023-12-14 13:49   ` Jeff Layton
2023-12-19 16:51   ` David Howells
2023-12-19 17:19     ` Jeff Layton
2023-12-13 15:23 ` [PATCH v4 37/39] netfs: Optimise away reads above the point at which there can be no data David Howells
2023-12-14 14:07   ` Jeff Layton
2023-12-19 16:56   ` David Howells
2023-12-13 15:23 ` [PATCH v4 38/39] afs: Use the netfs write helpers David Howells
2023-12-13 15:23 ` [PATCH v4 39/39] 9p: Use netfslib read/write_iter David Howells
2023-12-13 15:39   ` Christian Schoenebeck
2023-12-14 14:11 ` [PATCH v4 00/39] netfs, afs, 9p: Delegate high-level I/O to netfslib Jeff Layton
2023-12-15 12:03 ` Christian Brauner
2023-12-15 13:29   ` Dominique Martinet
2023-12-18 11:05     ` Christian Brauner
2023-12-20 10:04   ` David Howells
2023-12-20 13:26     ` Christian Brauner

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=987d3f0ac5cafc9706f5d532e60f9cc0379b3153.camel@kernel.org \
    --to=jlayton@kernel.org \
    --cc=asmadeus@codewreck.org \
    --cc=ceph-devel@vger.kernel.org \
    --cc=christian@brauner.io \
    --cc=dhowells@redhat.com \
    --cc=ericvh@kernel.org \
    --cc=idryomov@gmail.com \
    --cc=linux-afs@lists.infradead.org \
    --cc=linux-cachefs@redhat.com \
    --cc=linux-cifs@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=marc.dionne@auristor.com \
    --cc=netdev@vger.kernel.org \
    --cc=pc@manguebit.com \
    --cc=smfrench@gmail.com \
    --cc=sprasad@microsoft.com \
    --cc=tom@talpey.com \
    --cc=v9fs@lists.linux.dev \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).