public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: "Darrick J. Wong" <djwong@kernel.org>
To: Hans Holmberg <Hans.Holmberg@wdc.com>
Cc: "linux-xfs@vger.kernel.org" <linux-xfs@vger.kernel.org>,
	Carlos Maiolino <cem@kernel.org>,
	Dave Chinner <david@fromorbit.com>, hch <hch@lst.de>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>
Subject: Re: [RFC PATCH 2/2] xfs: add inode to zone caching for data placement
Date: Fri, 2 May 2025 13:04:15 -0700	[thread overview]
Message-ID: <20250502200415.GS25675@frogsfrogsfrogs> (raw)
In-Reply-To: <20250430084117.9850-3-hans.holmberg@wdc.com>

On Wed, Apr 30, 2025 at 08:41:21AM +0000, Hans Holmberg wrote:
> Placing data from the same file in the same zone is a great heuristic
> for reducing write amplification and we do this already - but only
> for sequential writes.
> 
> To support placing data in the same way for random writes, reuse the
> xfs mru cache to map inodes to open zones on first write. If a mapping
> is present, use the open zone for data placement for this file until
> the zone is full.
> 
> Signed-off-by: Hans Holmberg <hans.holmberg@wdc.com>

It seems like a decent idea to try to land random writes to the same
file in the same zone.  This helps us reduce seeking out of the zone on
subsequent reads, right?

If so, then I've understood the purpose, and:
Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>

--D

> ---
>  fs/xfs/xfs_mount.h      |   1 +
>  fs/xfs/xfs_zone_alloc.c | 109 ++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 110 insertions(+)
> 
> diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
> index e5192c12e7ac..f90c0a16766f 100644
> --- a/fs/xfs/xfs_mount.h
> +++ b/fs/xfs/xfs_mount.h
> @@ -230,6 +230,7 @@ typedef struct xfs_mount {
>  	bool			m_update_sb;	/* sb needs update in mount */
>  	unsigned int		m_max_open_zones;
>  	unsigned int		m_zonegc_low_space;
> +	struct xfs_mru_cache	*m_zone_cache;  /* Inode to open zone cache */
>  
>  	/*
>  	 * Bitsets of per-fs metadata that have been checked and/or are sick.
> diff --git a/fs/xfs/xfs_zone_alloc.c b/fs/xfs/xfs_zone_alloc.c
> index d509e49b2aaa..80add26c0111 100644
> --- a/fs/xfs/xfs_zone_alloc.c
> +++ b/fs/xfs/xfs_zone_alloc.c
> @@ -24,6 +24,7 @@
>  #include "xfs_zone_priv.h"
>  #include "xfs_zones.h"
>  #include "xfs_trace.h"
> +#include "xfs_mru_cache.h"
>  
>  void
>  xfs_open_zone_put(
> @@ -796,6 +797,100 @@ xfs_submit_zoned_bio(
>  	submit_bio(&ioend->io_bio);
>  }
>  
> +/*
> + * Cache the last zone written to for an inode so that it is considered first
> + * for subsequent writes.
> + */
> +struct xfs_zone_cache_item {
> +	struct xfs_mru_cache_elem	mru;
> +	struct xfs_open_zone		*oz;
> +};
> +
> +static inline struct xfs_zone_cache_item *
> +xfs_zone_cache_item(struct xfs_mru_cache_elem *mru)
> +{
> +	return container_of(mru, struct xfs_zone_cache_item, mru);
> +}
> +
> +static void
> +xfs_zone_cache_free_func(
> +	void				*data,
> +	struct xfs_mru_cache_elem	*mru)
> +{
> +	struct xfs_zone_cache_item	*item = xfs_zone_cache_item(mru);
> +
> +	xfs_open_zone_put(item->oz);
> +	kfree(item);
> +}
> +
> +/*
> + * Check if we have a cached last open zone available for the inode and
> + * if yes return a reference to it.
> + */
> +static struct xfs_open_zone *
> +xfs_cached_zone(
> +	struct xfs_mount		*mp,
> +	struct xfs_inode		*ip)
> +{
> +	struct xfs_mru_cache_elem	*mru;
> +	struct xfs_open_zone		*oz;
> +
> +	mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino);
> +	if (!mru)
> +		return NULL;
> +	oz = xfs_zone_cache_item(mru)->oz;
> +	if (oz) {
> +		/*
> +		 * GC only steals open zones at mount time, so no GC zones
> +		 * should end up in the cache.
> +		 */
> +		ASSERT(!oz->oz_is_gc);
> +		ASSERT(atomic_read(&oz->oz_ref) > 0);
> +		atomic_inc(&oz->oz_ref);
> +	}
> +	xfs_mru_cache_done(mp->m_zone_cache);
> +	return oz;
> +}
> +
> +/*
> + * Update the last used zone cache for a given inode.
> + *
> + * The caller must have a reference on the open zone.
> + */
> +static void
> +xfs_zone_cache_create_association(
> +	struct xfs_inode		*ip,
> +	struct xfs_open_zone		*oz)
> +{
> +	struct xfs_mount		*mp = ip->i_mount;
> +	struct xfs_zone_cache_item	*item = NULL;
> +	struct xfs_mru_cache_elem	*mru;
> +
> +	ASSERT(atomic_read(&oz->oz_ref) > 0);
> +	atomic_inc(&oz->oz_ref);
> +
> +	mru = xfs_mru_cache_lookup(mp->m_zone_cache, ip->i_ino);
> +	if (mru) {
> +		/*
> +		 * If we have an association already, update it to point to the
> +		 * new zone.
> +		 */
> +		item = xfs_zone_cache_item(mru);
> +		xfs_open_zone_put(item->oz);
> +		item->oz = oz;
> +		xfs_mru_cache_done(mp->m_zone_cache);
> +		return;
> +	}
> +
> +	item = kmalloc(sizeof(*item), GFP_KERNEL);
> +	if (!item) {
> +		xfs_open_zone_put(oz);
> +		return;
> +	}
> +	item->oz = oz;
> +	xfs_mru_cache_insert(mp->m_zone_cache, ip->i_ino, &item->mru);
> +}
> +
>  void
>  xfs_zone_alloc_and_submit(
>  	struct iomap_ioend	*ioend,
> @@ -819,11 +914,16 @@ xfs_zone_alloc_and_submit(
>  	 */
>  	if (!*oz && ioend->io_offset)
>  		*oz = xfs_last_used_zone(ioend);
> +	if (!*oz)
> +		*oz = xfs_cached_zone(mp, ip);
> +
>  	if (!*oz) {
>  select_zone:
>  		*oz = xfs_select_zone(mp, write_hint, pack_tight);
>  		if (!*oz)
>  			goto out_error;
> +
> +		xfs_zone_cache_create_association(ip, *oz);
>  	}
>  
>  	alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size),
> @@ -1211,6 +1311,14 @@ xfs_mount_zones(
>  	error = xfs_zone_gc_mount(mp);
>  	if (error)
>  		goto out_free_zone_info;
> +
> +	/*
> +	 * Set up a mru cache to track inode to open zone for data placement
> +	 * purposes. The magic values for group count and life time is the
> +	 * same as the defaults for file streams, which seems sane enough.
> +	 */
> +	xfs_mru_cache_create(&mp->m_zone_cache, mp,
> +			5000, 10, xfs_zone_cache_free_func);
>  	return 0;
>  
>  out_free_zone_info:
> @@ -1224,4 +1332,5 @@ xfs_unmount_zones(
>  {
>  	xfs_zone_gc_unmount(mp);
>  	xfs_free_zone_info(mp->m_zone_info);
> +	xfs_mru_cache_destroy(mp->m_zone_cache);
>  }
> -- 
> 2.34.1
> 

  reply	other threads:[~2025-05-02 20:04 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-04-30  8:41 [RFC PATCH 0/2] Add mru cache for inode to zone allocation mapping Hans Holmberg
2025-04-30  8:41 ` [RFC PATCH 2/2] xfs: add inode to zone caching for data placement Hans Holmberg
2025-05-02 20:04   ` Darrick J. Wong [this message]
2025-05-05  5:55     ` hch
2025-04-30  8:41 ` [RFC PATCH 1/2] xfs: free the item in xfs_mru_cache_insert on failure Hans Holmberg
2025-05-02 20:06   ` Darrick J. Wong
2025-05-05  5:45     ` hch
2025-05-05 15:07       ` Darrick J. Wong
2025-05-05  5:57 ` [RFC PATCH 0/2] Add mru cache for inode to zone allocation mapping hch

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250502200415.GS25675@frogsfrogsfrogs \
    --to=djwong@kernel.org \
    --cc=Hans.Holmberg@wdc.com \
    --cc=cem@kernel.org \
    --cc=david@fromorbit.com \
    --cc=hch@lst.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox