From: Christoph Hellwig <hch@lst.de>
To: Eric Biggers <ebiggers@kernel.org>
Cc: Al Viro <viro@zeniv.linux.org.uk>,
Christian Brauner <brauner@kernel.org>, Jan Kara <jack@suse.cz>,
David Sterba <dsterba@suse.com>, "Theodore Ts'o" <tytso@mit.edu>,
Jaegeuk Kim <jaegeuk@kernel.org>, Chao Yu <chao@kernel.org>,
Andrey Albershteyn <aalbersh@redhat.com>,
Matthew Wilcox <willy@infradead.org>,
linux-fsdevel@vger.kernel.org, linux-btrfs@vger.kernel.org,
linux-ext4@vger.kernel.org,
linux-f2fs-devel@lists.sourceforge.net, fsverity@lists.linux.dev,
"Darrick J. Wong" <djwong@kernel.org>
Subject: [PATCH 06/11] fsverity: push out fsverity_info lookup
Date: Mon, 2 Feb 2026 07:06:35 +0100 [thread overview]
Message-ID: <20260202060754.270269-7-hch@lst.de> (raw)
In-Reply-To: <20260202060754.270269-1-hch@lst.de>
Pass a struct fsverity_info to the verification and readahead helpers,
and push the lookup into the callers. Right now this is a very
dumb almost mechanic move that open codes a lot of fsverity_info_addr()
calls int the file systems. The subsequent patches will clean this up.
This prepares for reducing the number of fsverity_info lookups, which
will allow to amortize them better when using a more expensive lookup
method.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: "Darrick J. Wong" <djwong@kernel.org>
Acked-by: David Sterba <dsterba@suse.com> [btrfs]
---
fs/btrfs/extent_io.c | 3 ++-
fs/buffer.c | 4 +++-
fs/ext4/readpage.c | 14 +++++++++-----
fs/f2fs/compress.c | 4 +++-
fs/f2fs/data.c | 19 +++++++++++++------
fs/verity/verify.c | 24 ++++++++++++------------
include/linux/fsverity.h | 32 ++++++++++++++++++++++----------
7 files changed, 64 insertions(+), 36 deletions(-)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index a4b74023618d..21430b7d8f27 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -484,7 +484,8 @@ static bool btrfs_verify_folio(struct folio *folio, u64 start, u32 len)
btrfs_folio_test_uptodate(fs_info, folio, start, len) ||
start >= i_size_read(folio->mapping->host))
return true;
- return fsverity_verify_folio(folio);
+ return fsverity_verify_folio(*fsverity_info_addr(folio->mapping->host),
+ folio);
}
static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 len)
diff --git a/fs/buffer.c b/fs/buffer.c
index 838c0c571022..3982253b6805 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -309,9 +309,11 @@ static void verify_bh(struct work_struct *work)
struct postprocess_bh_ctx *ctx =
container_of(work, struct postprocess_bh_ctx, work);
struct buffer_head *bh = ctx->bh;
+ struct inode *inode = bh->b_folio->mapping->host;
bool valid;
- valid = fsverity_verify_blocks(bh->b_folio, bh->b_size, bh_offset(bh));
+ valid = fsverity_verify_blocks(*fsverity_info_addr(inode), bh->b_folio,
+ bh->b_size, bh_offset(bh));
end_buffer_async_read(bh, valid);
kfree(ctx);
}
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 8438b14da37a..823d67e98c70 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -97,6 +97,7 @@ static void verity_work(struct work_struct *work)
struct bio_post_read_ctx *ctx =
container_of(work, struct bio_post_read_ctx, work);
struct bio *bio = ctx->bio;
+ struct inode *inode = bio_first_folio_all(bio)->mapping->host;
/*
* fsverity_verify_bio() may call readahead() again, and although verity
@@ -109,7 +110,7 @@ static void verity_work(struct work_struct *work)
mempool_free(ctx, bio_post_read_ctx_pool);
bio->bi_private = NULL;
- fsverity_verify_bio(bio);
+ fsverity_verify_bio(*fsverity_info_addr(inode), bio);
__read_end_io(bio);
}
@@ -331,7 +332,9 @@ static int ext4_mpage_readpages(struct inode *inode,
folio_size(folio));
if (first_hole == 0) {
if (ext4_need_verity(inode, folio->index) &&
- !fsverity_verify_folio(folio))
+ !fsverity_verify_folio(
+ *fsverity_info_addr(inode),
+ folio))
goto set_error_page;
folio_end_read(folio, true);
continue;
@@ -409,7 +412,8 @@ int ext4_read_folio(struct file *file, struct folio *folio)
}
if (ext4_need_verity(inode, folio->index))
- fsverity_readahead(inode, folio->index, folio_nr_pages(folio));
+ fsverity_readahead(*fsverity_info_addr(inode), folio->index,
+ folio_nr_pages(folio));
return ext4_mpage_readpages(inode, NULL, folio);
}
@@ -422,8 +426,8 @@ void ext4_readahead(struct readahead_control *rac)
return;
if (ext4_need_verity(inode, readahead_index(rac)))
- fsverity_readahead(inode, readahead_index(rac),
- readahead_count(rac));
+ fsverity_readahead(*fsverity_info_addr(inode),
+ readahead_index(rac), readahead_count(rac));
ext4_mpage_readpages(inode, rac, NULL);
}
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 40a62f1dee4d..3de4a7e66959 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -1814,7 +1814,9 @@ static void f2fs_verify_cluster(struct work_struct *work)
if (!rpage)
continue;
- if (fsverity_verify_page(rpage))
+ if (fsverity_verify_page(
+ *fsverity_info_addr(rpage->mapping->host),
+ rpage))
SetPageUptodate(rpage);
else
ClearPageUptodate(rpage);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 58d8a311ef2c..3593208c99db 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -185,15 +185,19 @@ static void f2fs_verify_bio(struct work_struct *work)
bio_for_each_folio_all(fi, bio) {
struct folio *folio = fi.folio;
+ struct fsverity_info *vi =
+ *fsverity_info_addr(folio->mapping->host);
if (!f2fs_is_compressed_page(folio) &&
- !fsverity_verify_page(&folio->page)) {
+ !fsverity_verify_page(vi, &folio->page)) {
bio->bi_status = BLK_STS_IOERR;
break;
}
}
} else {
- fsverity_verify_bio(bio);
+ struct inode *inode = bio_first_folio_all(bio)->mapping->host;
+
+ fsverity_verify_bio(*fsverity_info_addr(inode), bio);
}
f2fs_finish_read_bio(bio, true);
@@ -2121,7 +2125,9 @@ static int f2fs_read_single_page(struct inode *inode, struct folio *folio,
zero_out:
folio_zero_segment(folio, 0, folio_size(folio));
if (f2fs_need_verity(inode, index) &&
- !fsverity_verify_folio(folio)) {
+ !fsverity_verify_folio(
+ *fsverity_info_addr(folio->mapping->host),
+ folio)) {
ret = -EIO;
goto out;
}
@@ -2475,7 +2481,8 @@ static int f2fs_read_data_folio(struct file *file, struct folio *folio)
}
if (f2fs_need_verity(inode, folio->index))
- fsverity_readahead(inode, folio->index, folio_nr_pages(folio));
+ fsverity_readahead(*fsverity_info_addr(inode), folio->index,
+ folio_nr_pages(folio));
return f2fs_mpage_readpages(inode, NULL, folio);
}
@@ -2493,8 +2500,8 @@ static void f2fs_readahead(struct readahead_control *rac)
return;
if (f2fs_need_verity(inode, readahead_index(rac)))
- fsverity_readahead(inode, readahead_index(rac),
- readahead_count(rac));
+ fsverity_readahead(*fsverity_info_addr(inode),
+ readahead_index(rac), readahead_count(rac));
f2fs_mpage_readpages(inode, rac, NULL);
}
diff --git a/fs/verity/verify.c b/fs/verity/verify.c
index 0de55c8e4217..cf4c00273c16 100644
--- a/fs/verity/verify.c
+++ b/fs/verity/verify.c
@@ -39,7 +39,7 @@ static struct workqueue_struct *fsverity_read_workqueue;
/**
* fsverity_readahead() - kick off readahead on fsverity hashes
- * @inode: inode that is being read
+ * @vi: fsverity_info for the inode to be read
* @index: first file data page index that is being read
* @nr_pages: number of file data pages to be read
*
@@ -50,10 +50,10 @@ static struct workqueue_struct *fsverity_read_workqueue;
* ensure that the hashes are already cached on completion of the file data
* read if possible.
*/
-void fsverity_readahead(struct inode *inode, pgoff_t index,
+void fsverity_readahead(struct fsverity_info *vi, pgoff_t index,
unsigned long nr_pages)
{
- const struct fsverity_info *vi = *fsverity_info_addr(inode);
+ struct inode *inode = vi->inode;
const struct merkle_tree_params *params = &vi->tree_params;
u64 start_hidx = (u64)index << params->log_blocks_per_page;
u64 end_hidx =
@@ -315,11 +315,9 @@ static bool verify_data_block(struct fsverity_info *vi,
static void
fsverity_init_verification_context(struct fsverity_verification_context *ctx,
- struct inode *inode)
+ struct fsverity_info *vi)
{
- struct fsverity_info *vi = *fsverity_info_addr(inode);
-
- ctx->inode = inode;
+ ctx->inode = vi->inode;
ctx->vi = vi;
ctx->num_pending = 0;
if (vi->tree_params.hash_alg->algo_id == HASH_ALGO_SHA256 &&
@@ -399,6 +397,7 @@ static bool fsverity_add_data_blocks(struct fsverity_verification_context *ctx,
/**
* fsverity_verify_blocks() - verify data in a folio
+ * @vi: fsverity_info for the inode to be read
* @folio: the folio containing the data to verify
* @len: the length of the data to verify in the folio
* @offset: the offset of the data to verify in the folio
@@ -409,11 +408,12 @@ static bool fsverity_add_data_blocks(struct fsverity_verification_context *ctx,
*
* Return: %true if the data is valid, else %false.
*/
-bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset)
+bool fsverity_verify_blocks(struct fsverity_info *vi, struct folio *folio,
+ size_t len, size_t offset)
{
struct fsverity_verification_context ctx;
- fsverity_init_verification_context(&ctx, folio->mapping->host);
+ fsverity_init_verification_context(&ctx, vi);
if (fsverity_add_data_blocks(&ctx, folio, len, offset) &&
fsverity_verify_pending_blocks(&ctx))
@@ -426,6 +426,7 @@ EXPORT_SYMBOL_GPL(fsverity_verify_blocks);
#ifdef CONFIG_BLOCK
/**
* fsverity_verify_bio() - verify a 'read' bio that has just completed
+ * @vi: fsverity_info for the inode to be read
* @bio: the bio to verify
*
* Verify the bio's data against the file's Merkle tree. All bio data segments
@@ -438,13 +439,12 @@ EXPORT_SYMBOL_GPL(fsverity_verify_blocks);
* filesystems) must instead call fsverity_verify_page() directly on each page.
* All filesystems must also call fsverity_verify_page() on holes.
*/
-void fsverity_verify_bio(struct bio *bio)
+void fsverity_verify_bio(struct fsverity_info *vi, struct bio *bio)
{
- struct inode *inode = bio_first_folio_all(bio)->mapping->host;
struct fsverity_verification_context ctx;
struct folio_iter fi;
- fsverity_init_verification_context(&ctx, inode);
+ fsverity_init_verification_context(&ctx, vi);
bio_for_each_folio_all(fi, bio) {
if (!fsverity_add_data_blocks(&ctx, fi.folio, fi.length,
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index 580234d8ed2f..ab7244f7d172 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -197,12 +197,20 @@ int fsverity_ioctl_read_metadata(struct file *filp, const void __user *uarg);
/* verify.c */
-bool fsverity_verify_blocks(struct folio *folio, size_t len, size_t offset);
-void fsverity_verify_bio(struct bio *bio);
+bool fsverity_verify_blocks(struct fsverity_info *vi, struct folio *folio,
+ size_t len, size_t offset);
+void fsverity_verify_bio(struct fsverity_info *vi, struct bio *bio);
void fsverity_enqueue_verify_work(struct work_struct *work);
#else /* !CONFIG_FS_VERITY */
+/*
+ * Provide a stub to allow code using this to compile. All callsites should be
+ * guarded by compiler dead code elimination, and this forces a link error if
+ * not.
+ */
+struct fsverity_info **fsverity_info_addr(const struct inode *inode);
+
static inline struct fsverity_info *fsverity_get_info(const struct inode *inode)
{
return NULL;
@@ -251,14 +259,16 @@ static inline int fsverity_ioctl_read_metadata(struct file *filp,
/* verify.c */
-static inline bool fsverity_verify_blocks(struct folio *folio, size_t len,
+static inline bool fsverity_verify_blocks(struct fsverity_info *vi,
+ struct folio *folio, size_t len,
size_t offset)
{
WARN_ON_ONCE(1);
return false;
}
-static inline void fsverity_verify_bio(struct bio *bio)
+static inline void fsverity_verify_bio(struct fsverity_info *vi,
+ struct bio *bio)
{
WARN_ON_ONCE(1);
}
@@ -270,14 +280,16 @@ static inline void fsverity_enqueue_verify_work(struct work_struct *work)
#endif /* !CONFIG_FS_VERITY */
-static inline bool fsverity_verify_folio(struct folio *folio)
+static inline bool fsverity_verify_folio(struct fsverity_info *vi,
+ struct folio *folio)
{
- return fsverity_verify_blocks(folio, folio_size(folio), 0);
+ return fsverity_verify_blocks(vi, folio, folio_size(folio), 0);
}
-static inline bool fsverity_verify_page(struct page *page)
+static inline bool fsverity_verify_page(struct fsverity_info *vi,
+ struct page *page)
{
- return fsverity_verify_blocks(page_folio(page), PAGE_SIZE, 0);
+ return fsverity_verify_blocks(vi, page_folio(page), PAGE_SIZE, 0);
}
/**
@@ -319,8 +331,8 @@ static inline int fsverity_file_open(struct inode *inode, struct file *filp)
}
void fsverity_cleanup_inode(struct inode *inode);
-void fsverity_readahead(struct inode *inode, pgoff_t index,
- unsigned long nr_pages);
+void fsverity_readahead(struct fsverity_info *vi, pgoff_t index,
+ unsigned long nr_pages);
struct page *generic_read_merkle_tree_page(struct inode *inode, pgoff_t index);
void generic_readahead_merkle_tree(struct inode *inode, pgoff_t index,
--
2.47.3
next prev parent reply other threads:[~2026-02-02 6:08 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-02 6:06 fsverity speedup and memory usage optimization v5 Christoph Hellwig
2026-02-02 6:06 ` [PATCH 01/11] fsverity: don't issue readahead for non-ENOENT errors from __filemap_get_folio Christoph Hellwig
2026-02-02 13:23 ` Jan Kara
2026-02-17 21:14 ` [f2fs-dev] " patchwork-bot+f2fs
2026-02-02 6:06 ` [PATCH 02/11] readahead: push invalidate_lock out of page_cache_ra_unbounded Christoph Hellwig
2026-02-02 13:36 ` Jan Kara
2026-02-02 15:11 ` Matthew Wilcox
2026-02-02 15:17 ` Christoph Hellwig
2026-02-02 21:04 ` Eric Biggers
2026-02-02 6:06 ` [PATCH 03/11] ext4: move ->read_folio and ->readahead to readahead.c Christoph Hellwig
2026-02-02 13:32 ` Jan Kara
2026-02-03 0:57 ` Theodore Tso
2026-02-02 6:06 ` [PATCH 04/11] fsverity: kick off hash readahead at data I/O submission time Christoph Hellwig
2026-02-02 6:06 ` [PATCH 05/11] fsverity: deconstify the inode pointer in struct fsverity_info Christoph Hellwig
2026-02-02 6:06 ` Christoph Hellwig [this message]
2026-02-02 6:06 ` [PATCH 07/11] fs: consolidate fsverity_info lookup in buffer.c Christoph Hellwig
2026-02-02 13:38 ` Jan Kara
2026-02-02 6:06 ` [PATCH 08/11] ext4: consolidate fsverity_info lookup Christoph Hellwig
2026-02-02 6:06 ` [PATCH 09/11] f2fs: " Christoph Hellwig
2026-02-02 6:06 ` [PATCH 10/11] btrfs: " Christoph Hellwig
2026-02-02 6:06 ` [PATCH 11/11] fsverity: use a hashtable to find the fsverity_info Christoph Hellwig
2026-02-02 21:14 ` fsverity speedup and memory usage optimization v5 Eric Biggers
2026-02-02 22:34 ` Eric Biggers
2026-02-03 5:36 ` Christoph Hellwig
2026-02-04 14:54 ` Matthew Wilcox
2026-02-04 19:02 ` Eric Biggers
2026-02-04 19:09 ` Matthew Wilcox
2026-02-04 19:37 ` Eric Biggers
-- strict thread matches above, loose matches on Subject: below --
2026-01-22 8:21 fsverity cleanups, speedup and memory usage optimization v2 Christoph Hellwig
2026-01-22 8:22 ` [PATCH 06/11] fsverity: push out fsverity_info lookup Christoph Hellwig
2026-01-22 21:45 ` Darrick J. Wong
2026-01-24 21:19 ` Eric Biggers
2026-01-26 4:33 ` Christoph Hellwig
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260202060754.270269-7-hch@lst.de \
--to=hch@lst.de \
--cc=aalbersh@redhat.com \
--cc=brauner@kernel.org \
--cc=chao@kernel.org \
--cc=djwong@kernel.org \
--cc=dsterba@suse.com \
--cc=ebiggers@kernel.org \
--cc=fsverity@lists.linux.dev \
--cc=jack@suse.cz \
--cc=jaegeuk@kernel.org \
--cc=linux-btrfs@vger.kernel.org \
--cc=linux-ext4@vger.kernel.org \
--cc=linux-f2fs-devel@lists.sourceforge.net \
--cc=linux-fsdevel@vger.kernel.org \
--cc=tytso@mit.edu \
--cc=viro@zeniv.linux.org.uk \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox