linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-fsdevel@vger.kernel.org, linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: [PATCH 14/48] filemap: Convert find_get_entry to return a folio
Date: Wed,  8 Dec 2021 04:22:22 +0000	[thread overview]
Message-ID: <20211208042256.1923824-15-willy@infradead.org> (raw)
In-Reply-To: <20211208042256.1923824-1-willy@infradead.org>

Convert callers to cope.  Saves 580 bytes of kernel text; all five
callers are reduced in size.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 mm/filemap.c | 146 +++++++++++++++++++++++++--------------------------
 1 file changed, 72 insertions(+), 74 deletions(-)

diff --git a/mm/filemap.c b/mm/filemap.c
index 4fe845b30f33..2a51ec720e9e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1976,37 +1976,36 @@ struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
 }
 EXPORT_SYMBOL(__filemap_get_folio);
 
-static inline struct page *find_get_entry(struct xa_state *xas, pgoff_t max,
+static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,
 		xa_mark_t mark)
 {
-	struct page *page;
+	struct folio *folio;
 
 retry:
 	if (mark == XA_PRESENT)
-		page = xas_find(xas, max);
+		folio = xas_find(xas, max);
 	else
-		page = xas_find_marked(xas, max, mark);
+		folio = xas_find_marked(xas, max, mark);
 
-	if (xas_retry(xas, page))
+	if (xas_retry(xas, folio))
 		goto retry;
 	/*
 	 * A shadow entry of a recently evicted page, a swap
 	 * entry from shmem/tmpfs or a DAX entry.  Return it
 	 * without attempting to raise page count.
 	 */
-	if (!page || xa_is_value(page))
-		return page;
+	if (!folio || xa_is_value(folio))
+		return folio;
 
-	if (!page_cache_get_speculative(page))
+	if (!folio_try_get_rcu(folio))
 		goto reset;
 
-	/* Has the page moved or been split? */
-	if (unlikely(page != xas_reload(xas))) {
-		put_page(page);
+	if (unlikely(folio != xas_reload(xas))) {
+		folio_put(folio);
 		goto reset;
 	}
 
-	return page;
+	return folio;
 reset:
 	xas_reset(xas);
 	goto retry;
@@ -2042,19 +2041,20 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
 		pgoff_t end, struct pagevec *pvec, pgoff_t *indices)
 {
 	XA_STATE(xas, &mapping->i_pages, start);
-	struct page *page;
+	struct folio *folio;
 	unsigned int ret = 0;
 	unsigned nr_entries = PAGEVEC_SIZE;
 
 	rcu_read_lock();
-	while ((page = find_get_entry(&xas, end, XA_PRESENT))) {
+	while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {
+		struct page *page = &folio->page;
 		/*
 		 * Terminate early on finding a THP, to allow the caller to
 		 * handle it all at once; but continue if this is hugetlbfs.
 		 */
-		if (!xa_is_value(page) && PageTransHuge(page) &&
-				!PageHuge(page)) {
-			page = find_subpage(page, xas.xa_index);
+		if (!xa_is_value(folio) && folio_test_large(folio) &&
+				!folio_test_hugetlb(folio)) {
+			page = folio_file_page(folio, xas.xa_index);
 			nr_entries = ret + 1;
 		}
 
@@ -2078,15 +2078,14 @@ unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
  * @indices:	The cache indices of the entries in @pvec.
  *
  * find_lock_entries() will return a batch of entries from @mapping.
- * Swap, shadow and DAX entries are included.  Pages are returned
- * locked and with an incremented refcount.  Pages which are locked by
- * somebody else or under writeback are skipped.  Only the head page of
- * a THP is returned.  Pages which are partially outside the range are
- * not returned.
+ * Swap, shadow and DAX entries are included.  Folios are returned
+ * locked and with an incremented refcount.  Folios which are locked
+ * by somebody else or under writeback are skipped.  Folios which are
+ * partially outside the range are not returned.
  *
  * The entries have ascending indexes.  The indices may not be consecutive
- * due to not-present entries, THP pages, pages which could not be locked
- * or pages under writeback.
+ * due to not-present entries, large folios, folios which could not be
+ * locked or folios under writeback.
  *
  * Return: The number of entries which were found.
  */
@@ -2094,37 +2093,36 @@ unsigned find_lock_entries(struct address_space *mapping, pgoff_t start,
 		pgoff_t end, struct pagevec *pvec, pgoff_t *indices)
 {
 	XA_STATE(xas, &mapping->i_pages, start);
-	struct page *page;
+	struct folio *folio;
 
 	rcu_read_lock();
-	while ((page = find_get_entry(&xas, end, XA_PRESENT))) {
-		if (!xa_is_value(page)) {
-			if (page->index < start)
+	while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
+		if (!xa_is_value(folio)) {
+			if (folio->index < start)
 				goto put;
-			if (page->index + thp_nr_pages(page) - 1 > end)
+			if (folio->index + folio_nr_pages(folio) - 1 > end)
 				goto put;
-			if (!trylock_page(page))
+			if (!folio_trylock(folio))
 				goto put;
-			if (page->mapping != mapping || PageWriteback(page))
+			if (folio->mapping != mapping ||
+			    folio_test_writeback(folio))
 				goto unlock;
-			VM_BUG_ON_PAGE(!thp_contains(page, xas.xa_index),
-					page);
+			VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),
+					folio);
 		}
 		indices[pvec->nr] = xas.xa_index;
-		if (!pagevec_add(pvec, page))
+		if (!pagevec_add(pvec, &folio->page))
 			break;
 		goto next;
 unlock:
-		unlock_page(page);
+		folio_unlock(folio);
 put:
-		put_page(page);
+		folio_put(folio);
 next:
-		if (!xa_is_value(page) && PageTransHuge(page)) {
-			unsigned int nr_pages = thp_nr_pages(page);
-
-			/* Final THP may cross MAX_LFS_FILESIZE on 32-bit */
-			xas_set(&xas, page->index + nr_pages);
-			if (xas.xa_index < nr_pages)
+		if (!xa_is_value(folio) && folio_test_large(folio)) {
+			xas_set(&xas, folio->index + folio_nr_pages(folio));
+			/* Did we wrap on 32-bit? */
+			if (!xas.xa_index)
 				break;
 		}
 	}
@@ -2159,19 +2157,19 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
 			      struct page **pages)
 {
 	XA_STATE(xas, &mapping->i_pages, *start);
-	struct page *page;
+	struct folio *folio;
 	unsigned ret = 0;
 
 	if (unlikely(!nr_pages))
 		return 0;
 
 	rcu_read_lock();
-	while ((page = find_get_entry(&xas, end, XA_PRESENT))) {
+	while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {
 		/* Skip over shadow, swap and DAX entries */
-		if (xa_is_value(page))
+		if (xa_is_value(folio))
 			continue;
 
-		pages[ret] = find_subpage(page, xas.xa_index);
+		pages[ret] = folio_file_page(folio, xas.xa_index);
 		if (++ret == nr_pages) {
 			*start = xas.xa_index + 1;
 			goto out;
@@ -2268,25 +2266,25 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
 			struct page **pages)
 {
 	XA_STATE(xas, &mapping->i_pages, *index);
-	struct page *page;
+	struct folio *folio;
 	unsigned ret = 0;
 
 	if (unlikely(!nr_pages))
 		return 0;
 
 	rcu_read_lock();
-	while ((page = find_get_entry(&xas, end, tag))) {
+	while ((folio = find_get_entry(&xas, end, tag))) {
 		/*
 		 * Shadow entries should never be tagged, but this iteration
 		 * is lockless so there is a window for page reclaim to evict
 		 * a page we saw tagged.  Skip over it.
 		 */
-		if (xa_is_value(page))
+		if (xa_is_value(folio))
 			continue;
 
-		pages[ret] = page;
+		pages[ret] = &folio->page;
 		if (++ret == nr_pages) {
-			*index = page->index + thp_nr_pages(page);
+			*index = folio->index + folio_nr_pages(folio);
 			goto out;
 		}
 	}
@@ -2794,44 +2792,44 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
 }
 EXPORT_SYMBOL(generic_file_read_iter);
 
-static inline loff_t page_seek_hole_data(struct xa_state *xas,
-		struct address_space *mapping, struct page *page,
+static inline loff_t folio_seek_hole_data(struct xa_state *xas,
+		struct address_space *mapping, struct folio *folio,
 		loff_t start, loff_t end, bool seek_data)
 {
 	const struct address_space_operations *ops = mapping->a_ops;
 	size_t offset, bsz = i_blocksize(mapping->host);
 
-	if (xa_is_value(page) || PageUptodate(page))
+	if (xa_is_value(folio) || folio_test_uptodate(folio))
 		return seek_data ? start : end;
 	if (!ops->is_partially_uptodate)
 		return seek_data ? end : start;
 
 	xas_pause(xas);
 	rcu_read_unlock();
-	lock_page(page);
-	if (unlikely(page->mapping != mapping))
+	folio_lock(folio);
+	if (unlikely(folio->mapping != mapping))
 		goto unlock;
 
-	offset = offset_in_thp(page, start) & ~(bsz - 1);
+	offset = offset_in_folio(folio, start) & ~(bsz - 1);
 
 	do {
-		if (ops->is_partially_uptodate(page, offset, bsz) == seek_data)
+		if (ops->is_partially_uptodate(&folio->page, offset, bsz) ==
+							seek_data)
 			break;
 		start = (start + bsz) & ~(bsz - 1);
 		offset += bsz;
-	} while (offset < thp_size(page));
+	} while (offset < folio_size(folio));
 unlock:
-	unlock_page(page);
+	folio_unlock(folio);
 	rcu_read_lock();
 	return start;
 }
 
-static inline
-unsigned int seek_page_size(struct xa_state *xas, struct page *page)
+static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)
 {
-	if (xa_is_value(page))
+	if (xa_is_value(folio))
 		return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index);
-	return thp_size(page);
+	return folio_size(folio);
 }
 
 /**
@@ -2858,15 +2856,15 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
 	XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);
 	pgoff_t max = (end - 1) >> PAGE_SHIFT;
 	bool seek_data = (whence == SEEK_DATA);
-	struct page *page;
+	struct folio *folio;
 
 	if (end <= start)
 		return -ENXIO;
 
 	rcu_read_lock();
-	while ((page = find_get_entry(&xas, max, XA_PRESENT))) {
+	while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {
 		loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;
-		unsigned int seek_size;
+		size_t seek_size;
 
 		if (start < pos) {
 			if (!seek_data)
@@ -2874,9 +2872,9 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
 			start = pos;
 		}
 
-		seek_size = seek_page_size(&xas, page);
-		pos = round_up(pos + 1, seek_size);
-		start = page_seek_hole_data(&xas, mapping, page, start, pos,
+		seek_size = seek_folio_size(&xas, folio);
+		pos = round_up((u64)pos + 1, seek_size);
+		start = folio_seek_hole_data(&xas, mapping, folio, start, pos,
 				seek_data);
 		if (start < pos)
 			goto unlock;
@@ -2884,15 +2882,15 @@ loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,
 			break;
 		if (seek_size > PAGE_SIZE)
 			xas_set(&xas, pos >> PAGE_SHIFT);
-		if (!xa_is_value(page))
-			put_page(page);
+		if (!xa_is_value(folio))
+			folio_put(folio);
 	}
 	if (seek_data)
 		start = -ENXIO;
 unlock:
 	rcu_read_unlock();
-	if (page && !xa_is_value(page))
-		put_page(page);
+	if (folio && !xa_is_value(folio))
+		folio_put(folio);
 	if (start > end)
 		return end;
 	return start;
-- 
2.33.0


  parent reply	other threads:[~2021-12-08  4:23 UTC|newest]

Thread overview: 126+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-12-08  4:22 [PATCH 00/48] Folios for 5.17 Matthew Wilcox (Oracle)
2021-12-08  4:22 ` [PATCH 01/48] filemap: Remove PageHWPoison check from next_uptodate_page() Matthew Wilcox (Oracle)
2021-12-23  6:48   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 02/48] fs/writeback: Convert inode_switch_wbs_work_fn to folios Matthew Wilcox (Oracle)
2021-12-23  6:50   ` Christoph Hellwig
2021-12-23 13:50     ` Matthew Wilcox
2021-12-08  4:22 ` [PATCH 03/48] mm/doc: Add documentation for folio_test_uptodate Matthew Wilcox (Oracle)
2021-12-23  6:51   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 04/48] mm/writeback: Improve __folio_mark_dirty() comment Matthew Wilcox (Oracle)
2021-12-23  6:52   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 05/48] pagevec: Add folio_batch Matthew Wilcox (Oracle)
2021-12-23  6:54   ` Christoph Hellwig
2021-12-23 14:18     ` Matthew Wilcox
2021-12-24  6:13       ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 06/48] iov_iter: Add copy_folio_to_iter() Matthew Wilcox (Oracle)
2021-12-23  6:55   ` Christoph Hellwig
2021-12-23 14:22     ` Matthew Wilcox
2021-12-08  4:22 ` [PATCH 07/48] iov_iter: Convert iter_xarray to use folios Matthew Wilcox (Oracle)
2021-12-23  6:57   ` Christoph Hellwig
2021-12-23 14:31     ` Matthew Wilcox
2021-12-23 15:24   ` David Howells
2021-12-24  6:14     ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 08/48] mm: Add folio_test_pmd_mappable() Matthew Wilcox (Oracle)
2021-12-23  6:58   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 09/48] filemap: Add folio_put_wait_locked() Matthew Wilcox (Oracle)
2021-12-23  7:00   ` Christoph Hellwig
2021-12-23 14:32     ` Matthew Wilcox
2021-12-08  4:22 ` [PATCH 10/48] filemap: Convert page_cache_delete to take a folio Matthew Wilcox (Oracle)
2021-12-23  7:01   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 11/48] filemap: Add filemap_unaccount_folio() Matthew Wilcox (Oracle)
2021-12-23  7:03   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 12/48] filemap: Convert tracing of page cache operations to folio Matthew Wilcox (Oracle)
2021-12-23  7:04   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 13/48] filemap: Add filemap_remove_folio and __filemap_remove_folio Matthew Wilcox (Oracle)
2021-12-23  7:06   ` Christoph Hellwig
2021-12-08  4:22 ` Matthew Wilcox (Oracle) [this message]
2021-12-23  7:08   ` [PATCH 14/48] filemap: Convert find_get_entry to return a folio Christoph Hellwig
2021-12-08  4:22 ` [PATCH 15/48] filemap: Remove thp_contains() Matthew Wilcox (Oracle)
2021-12-23  7:09   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 16/48] filemap: Convert filemap_get_read_batch to use folios Matthew Wilcox (Oracle)
2021-12-23  7:10   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 17/48] filemap: Convert find_get_pages_contig to folios Matthew Wilcox (Oracle)
2021-12-23  7:16   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 18/48] filemap: Convert filemap_read_page to take a folio Matthew Wilcox (Oracle)
2021-12-23  7:16   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 19/48] filemap: Convert filemap_create_page to folio Matthew Wilcox (Oracle)
2021-12-23  7:17   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 20/48] filemap: Convert filemap_range_uptodate to folios Matthew Wilcox (Oracle)
2021-12-23  7:18   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 21/48] readahead: Convert page_cache_async_ra() to take a folio Matthew Wilcox (Oracle)
2021-12-23  7:19   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 22/48] readahead: Convert page_cache_ra_unbounded to folios Matthew Wilcox (Oracle)
2021-12-23  7:19   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 23/48] filemap: Convert do_async_mmap_readahead to take a folio Matthew Wilcox (Oracle)
2021-12-23  7:23   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 24/48] filemap: Convert filemap_fault to folio Matthew Wilcox (Oracle)
2021-12-23  7:25   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 25/48] filemap: Add read_cache_folio and read_mapping_folio Matthew Wilcox (Oracle)
2021-12-23  7:39   ` Christoph Hellwig
2021-12-23 15:18     ` Matthew Wilcox
2021-12-23 16:20       ` Matthew Wilcox
2021-12-23 18:36   ` Matthew Wilcox
2021-12-08  4:22 ` [PATCH 26/48] filemap: Convert filemap_get_pages to use folios Matthew Wilcox (Oracle)
2021-12-23  7:40   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 27/48] filemap: Convert page_cache_delete_batch to folios Matthew Wilcox (Oracle)
2021-12-23  7:40   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 28/48] filemap: Use folios in next_uptodate_page Matthew Wilcox (Oracle)
2021-12-23  8:20   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 29/48] filemap: Use a folio in filemap_map_pages Matthew Wilcox (Oracle)
2021-12-23  8:21   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 30/48] filemap: Use a folio in filemap_page_mkwrite Matthew Wilcox (Oracle)
2021-12-23  8:21   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 31/48] filemap: Add filemap_release_folio() Matthew Wilcox (Oracle)
2021-12-23  8:21   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 32/48] truncate: Add truncate_cleanup_folio() Matthew Wilcox (Oracle)
2021-12-23  8:21   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 33/48] mm: Add unmap_mapping_folio() Matthew Wilcox (Oracle)
2021-12-23  7:36   ` Christoph Hellwig
2022-01-02 16:11     ` Matthew Wilcox
2022-01-03  7:53       ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 34/48] shmem: Convert part of shmem_undo_range() to use a folio Matthew Wilcox (Oracle)
2021-12-23  7:39   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 35/48] truncate,shmem: Add truncate_inode_folio() Matthew Wilcox (Oracle)
2021-12-23  8:21   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 36/48] truncate: Skip known-truncated indices Matthew Wilcox (Oracle)
2021-12-23  8:21   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 37/48] truncate: Convert invalidate_inode_pages2_range() to use a folio Matthew Wilcox (Oracle)
2021-12-23  8:21   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 38/48] truncate: Add invalidate_complete_folio2() Matthew Wilcox (Oracle)
2021-12-23  8:21   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 39/48] filemap: Convert filemap_read() to use a folio Matthew Wilcox (Oracle)
2021-12-23  8:22   ` Christoph Hellwig
2022-01-01 16:14     ` Matthew Wilcox
2021-12-08  4:22 ` [PATCH 40/48] filemap: Convert filemap_get_read_batch() to use a folio_batch Matthew Wilcox (Oracle)
2021-12-23  8:22   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 41/48] filemap: Return only folios from find_get_entries() Matthew Wilcox (Oracle)
2021-12-23  8:22   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 42/48] mm: Convert find_lock_entries() to use a folio_batch Matthew Wilcox (Oracle)
2021-12-08 11:29   ` kernel test robot
2021-12-08 14:30     ` Matthew Wilcox
2021-12-23  8:22   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 43/48] mm: Remove pagevec_remove_exceptionals() Matthew Wilcox (Oracle)
2021-12-23  8:22   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 44/48] fs: Convert vfs_dedupe_file_range_compare to folios Matthew Wilcox (Oracle)
2021-12-23  8:22   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 45/48] truncate: Convert invalidate_inode_pages2_range " Matthew Wilcox (Oracle)
2021-12-23  8:22   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 46/48] truncate,shmem: Handle truncates that split large folios Matthew Wilcox (Oracle)
2021-12-08 16:43   ` Matthew Wilcox
2021-12-23  8:43   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 47/48] XArray: Add xas_advance() Matthew Wilcox (Oracle)
2021-12-23  8:29   ` Christoph Hellwig
2021-12-08  4:22 ` [PATCH 48/48] mm: Use multi-index entries in the page cache Matthew Wilcox (Oracle)
2021-12-23  8:47   ` Christoph Hellwig
2021-12-26 22:26 ` [PATCH 00/48] Folios for 5.17 William Kucharski
2022-01-03  1:27   ` Matthew Wilcox
2022-01-03 19:28     ` William Kucharski
2022-01-02 16:19 ` Matthew Wilcox
2022-01-02 23:46   ` William Kucharski
2022-01-03  1:29   ` Hugh Dickins
2022-01-03  1:44   ` Matthew Wilcox
2022-01-03  9:29   ` Christoph Hellwig
2022-01-08  5:32   ` Matthew Wilcox
2022-01-08 16:47     ` Hugh Dickins
2022-01-08 16:53       ` Matthew Wilcox
2022-01-08 17:20         ` Hugh Dickins

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211208042256.1923824-15-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).