linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
	linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
	Jan Kara <jack@suse.com>, David Howells <dhowells@redhat.com>
Subject: [PATCH 01/12] writeback: Factor out writeback_finish()
Date: Mon, 26 Jun 2023 18:35:10 +0100	[thread overview]
Message-ID: <20230626173521.459345-2-willy@infradead.org> (raw)
In-Reply-To: <20230626173521.459345-1-willy@infradead.org>

Instead of having a 'done' variable that controls the nested loops,
have a writeback_finish() that can be returned directly.  This involves
keeping more things in writeback_control, but it's just moving stuff
allocated on the stack to being allocated slightly earlier on the stack.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/writeback.h |  6 ++++
 mm/page-writeback.c       | 74 +++++++++++++++++++++------------------
 2 files changed, 45 insertions(+), 35 deletions(-)

diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fba937999fbf..5b7d11f54013 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -11,6 +11,7 @@
 #include <linux/flex_proportions.h>
 #include <linux/backing-dev-defs.h>
 #include <linux/blk_types.h>
+#include <linux/pagevec.h>
 
 struct bio;
 
@@ -52,6 +53,10 @@ struct writeback_control {
 	loff_t range_start;
 	loff_t range_end;
 
+	struct folio_batch fbatch;
+	pgoff_t done_index;
+	int err;
+
 	enum writeback_sync_modes sync_mode;
 
 	unsigned for_kupdate:1;		/* A kupdate writeback */
@@ -59,6 +64,7 @@ struct writeback_control {
 	unsigned tagged_writepages:1;	/* tag-and-write to avoid livelock */
 	unsigned for_reclaim:1;		/* Invoked from the page allocator */
 	unsigned range_cyclic:1;	/* range_start is cyclic */
+	unsigned range_whole:1;		/* entire file */
 	unsigned for_sync:1;		/* sync(2) WB_SYNC_ALL writeback */
 	unsigned unpinned_fscache_wb:1;	/* Cleared I_PINNING_FSCACHE_WB */
 
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 1d17fb1ec863..abd7c0eebc72 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2360,6 +2360,24 @@ void tag_pages_for_writeback(struct address_space *mapping,
 }
 EXPORT_SYMBOL(tag_pages_for_writeback);
 
+static int writeback_finish(struct address_space *mapping,
+		struct writeback_control *wbc, bool done)
+{
+	folio_batch_release(&wbc->fbatch);
+
+	/*
+	 * If we hit the last page and there is more work to be done:
+	 * wrap the index back to the start of the file for the next
+	 * time we are called.
+	 */
+	if (wbc->range_cyclic && !done)
+		wbc->done_index = 0;
+	if (wbc->range_cyclic || (wbc->range_whole && wbc->nr_to_write > 0))
+		mapping->writeback_index = wbc->done_index;
+
+	return wbc->err;
+}
+
 /**
  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
  * @mapping: address space structure to write
@@ -2395,18 +2413,12 @@ int write_cache_pages(struct address_space *mapping,
 		      struct writeback_control *wbc, writepage_t writepage,
 		      void *data)
 {
-	int ret = 0;
-	int done = 0;
 	int error;
-	struct folio_batch fbatch;
 	int nr_folios;
 	pgoff_t index;
 	pgoff_t end;		/* Inclusive */
-	pgoff_t done_index;
-	int range_whole = 0;
 	xa_mark_t tag;
 
-	folio_batch_init(&fbatch);
 	if (wbc->range_cyclic) {
 		index = mapping->writeback_index; /* prev offset */
 		end = -1;
@@ -2414,7 +2426,7 @@ int write_cache_pages(struct address_space *mapping,
 		index = wbc->range_start >> PAGE_SHIFT;
 		end = wbc->range_end >> PAGE_SHIFT;
 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
-			range_whole = 1;
+			wbc->range_whole = 1;
 	}
 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) {
 		tag_pages_for_writeback(mapping, index, end);
@@ -2422,20 +2434,24 @@ int write_cache_pages(struct address_space *mapping,
 	} else {
 		tag = PAGECACHE_TAG_DIRTY;
 	}
-	done_index = index;
-	while (!done && (index <= end)) {
+
+	wbc->done_index = index;
+	folio_batch_init(&wbc->fbatch);
+	wbc->err = 0;
+
+	while (index <= end) {
 		int i;
 
 		nr_folios = filemap_get_folios_tag(mapping, &index, end,
-				tag, &fbatch);
+				tag, &wbc->fbatch);
 
 		if (nr_folios == 0)
 			break;
 
 		for (i = 0; i < nr_folios; i++) {
-			struct folio *folio = fbatch.folios[i];
+			struct folio *folio = wbc->fbatch.folios[i];
 
-			done_index = folio->index;
+			wbc->done_index = folio->index;
 
 			folio_lock(folio);
 
@@ -2488,14 +2504,14 @@ int write_cache_pages(struct address_space *mapping,
 					folio_unlock(folio);
 					error = 0;
 				} else if (wbc->sync_mode != WB_SYNC_ALL) {
-					ret = error;
-					done_index = folio->index +
-						folio_nr_pages(folio);
-					done = 1;
-					break;
+					wbc->err = error;
+					wbc->done_index = folio->index +
+							folio_nr_pages(folio);
+					return writeback_finish(mapping,
+							wbc, true);
 				}
-				if (!ret)
-					ret = error;
+				if (!wbc->err)
+					wbc->err = error;
 			}
 
 			/*
@@ -2505,26 +2521,14 @@ int write_cache_pages(struct address_space *mapping,
 			 * we tagged for writeback prior to entering this loop.
 			 */
 			if (--wbc->nr_to_write <= 0 &&
-			    wbc->sync_mode == WB_SYNC_NONE) {
-				done = 1;
-				break;
-			}
+			    wbc->sync_mode == WB_SYNC_NONE)
+				return writeback_finish(mapping, wbc, true);
 		}
-		folio_batch_release(&fbatch);
+		folio_batch_release(&wbc->fbatch);
 		cond_resched();
 	}
 
-	/*
-	 * If we hit the last page and there is more work to be done: wrap
-	 * back the index back to the start of the file for the next
-	 * time we are called.
-	 */
-	if (wbc->range_cyclic && !done)
-		done_index = 0;
-	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
-		mapping->writeback_index = done_index;
-
-	return ret;
+	return writeback_finish(mapping, wbc, false);
 }
 EXPORT_SYMBOL(write_cache_pages);
 
-- 
2.39.2



  reply	other threads:[~2023-06-26 17:35 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-06-26 17:35 [PATCH 00/12] Convert write_cache_pages() to an iterator Matthew Wilcox (Oracle)
2023-06-26 17:35 ` Matthew Wilcox (Oracle) [this message]
2023-06-27  4:05   ` [PATCH 01/12] writeback: Factor out writeback_finish() Christoph Hellwig
2023-06-26 17:35 ` [PATCH 02/12] writeback: Factor writeback_get_batch() out of write_cache_pages() Matthew Wilcox (Oracle)
2023-06-26 17:35 ` [PATCH 03/12] writeback: Factor should_writeback_folio() " Matthew Wilcox (Oracle)
2023-06-27  4:12   ` Christoph Hellwig
2023-06-27 11:16     ` Matthew Wilcox
2023-06-27 14:48       ` Matthew Wilcox
2023-06-26 17:35 ` [PATCH 04/12] writeback: Simplify the loops in write_cache_pages() Matthew Wilcox (Oracle)
2023-06-27  4:16   ` Christoph Hellwig
2023-06-26 17:35 ` [PATCH 05/12] pagevec: Add ability to iterate a queue Matthew Wilcox (Oracle)
2023-06-26 17:35 ` [PATCH 06/12] writeback: Use the folio_batch queue iterator Matthew Wilcox (Oracle)
2023-06-27  4:25   ` Christoph Hellwig
2023-06-26 17:35 ` [PATCH 07/12] writeback: Factor writeback_iter_init() out of write_cache_pages() Matthew Wilcox (Oracle)
2023-06-27  4:30   ` Christoph Hellwig
2023-06-27  4:31     ` Christoph Hellwig
2023-06-27 11:08       ` Matthew Wilcox
2023-06-26 17:35 ` [PATCH 08/12] writeback: Factor writeback_get_folio() " Matthew Wilcox (Oracle)
2023-06-27  4:34   ` Christoph Hellwig
2023-06-27 15:25     ` Matthew Wilcox
2023-06-26 17:35 ` [PATCH 09/12] writeback: Factor writeback_iter_next() " Matthew Wilcox (Oracle)
2023-06-27  4:39   ` Christoph Hellwig
2023-06-27 15:31     ` Matthew Wilcox
2023-06-27 16:28       ` Christoph Hellwig
2023-06-28  9:10       ` Jan Kara
2023-06-26 17:35 ` [PATCH 10/12] writeback: Add for_each_writeback_folio() Matthew Wilcox (Oracle)
2023-06-26 17:35 ` [PATCH 11/12] iomap: Convert iomap_writepages() to use for_each_writeback_folio() Matthew Wilcox (Oracle)
2023-06-26 17:35 ` [PATCH 12/12] writeback: Remove a use of write_cache_pages() from do_writepages() Matthew Wilcox (Oracle)
2023-06-27  4:03 ` [PATCH 00/12] Convert write_cache_pages() to an iterator Christoph Hellwig
2023-06-27 10:53 ` David Howells
2023-06-28 19:31   ` Matthew Wilcox
2023-12-12  7:46     ` Christoph Hellwig
2023-06-28 20:03   ` David Howells
2023-07-04 18:08     ` Matthew Wilcox
2023-11-21  5:18 ` Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230626173521.459345-2-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=dhowells@redhat.com \
    --cc=jack@suse.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).