From: Mark Harmstone <maharmstone@fb.com>
To: <linux-btrfs@vger.kernel.org>, <io-uring@vger.kernel.org>
Cc: Mark Harmstone <maharmstone@fb.com>
Subject: [PATCH 2/5] btrfs: change btrfs_encoded_read_regular_fill_pages to take a callback
Date: Mon, 14 Oct 2024 18:18:24 +0100 [thread overview]
Message-ID: <20241014171838.304953-3-maharmstone@fb.com> (raw)
In-Reply-To: <20241014171838.304953-1-maharmstone@fb.com>
Change btrfs_encoded_read_regular_fill_pages so that it takes a callback
rather than waiting, and add new helper function btrfs_encoded_read_wait_cb
to match the existing behaviour.
Signed-off-by: Mark Harmstone <maharmstone@fb.com>
---
fs/btrfs/btrfs_inode.h | 13 +++++++-
fs/btrfs/inode.c | 70 ++++++++++++++++++++++++++++++++----------
fs/btrfs/send.c | 15 ++++++++-
3 files changed, 79 insertions(+), 19 deletions(-)
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 3056c8aed8ef..6aea5bedc968 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -601,10 +601,21 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page
int btrfs_writepage_cow_fixup(struct page *page);
int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info *fs_info,
int compress_type);
+typedef void (btrfs_encoded_read_cb_t)(void *, int);
+
+struct btrfs_encoded_read_wait_ctx {
+ wait_queue_head_t wait;
+ bool done;
+ int err;
+};
+
+void btrfs_encoded_read_wait_cb(void *ctx, int err);
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
u64 file_offset, u64 disk_bytenr,
u64 disk_io_size,
- struct page **pages);
+ struct page **pages,
+ btrfs_encoded_read_cb_t cb,
+ void *cb_ctx);
ssize_t btrfs_encoded_read(struct kiocb *iocb, struct iov_iter *iter,
struct btrfs_ioctl_encoded_io_args *encoded);
ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from,
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index b024ebc3dcd6..b5abe98f3af4 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9080,9 +9080,10 @@ static ssize_t btrfs_encoded_read_inline(
}
struct btrfs_encoded_read_private {
- wait_queue_head_t wait;
atomic_t pending;
blk_status_t status;
+ btrfs_encoded_read_cb_t *cb;
+ void *cb_ctx;
};
static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
@@ -9100,26 +9101,33 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
*/
WRITE_ONCE(priv->status, bbio->bio.bi_status);
}
- if (!atomic_dec_return(&priv->pending))
- wake_up(&priv->wait);
+ if (!atomic_dec_return(&priv->pending)) {
+ priv->cb(priv->cb_ctx,
+ blk_status_to_errno(READ_ONCE(priv->status)));
+ kfree(priv);
+ }
bio_put(&bbio->bio);
}
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
u64 file_offset, u64 disk_bytenr,
- u64 disk_io_size, struct page **pages)
+ u64 disk_io_size, struct page **pages,
+ btrfs_encoded_read_cb_t cb,
+ void *cb_ctx)
{
struct btrfs_fs_info *fs_info = inode->root->fs_info;
- struct btrfs_encoded_read_private priv = {
- .pending = ATOMIC_INIT(1),
- };
+ struct btrfs_encoded_read_private *priv;
unsigned long i = 0;
struct btrfs_bio *bbio;
- init_waitqueue_head(&priv.wait);
+ priv = kmalloc(sizeof(struct btrfs_encoded_read_private), GFP_NOFS);
+ if (!priv)
+ return -ENOMEM;
+
+ atomic_set(&priv->pending, 1);
bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
- btrfs_encoded_read_endio, &priv);
+ btrfs_encoded_read_endio, priv);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
bbio->inode = inode;
@@ -9127,11 +9135,11 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
size_t bytes = min_t(u64, disk_io_size, PAGE_SIZE);
if (bio_add_page(&bbio->bio, pages[i], bytes, 0) < bytes) {
- atomic_inc(&priv.pending);
+ atomic_inc(&priv->pending);
btrfs_submit_bio(bbio, 0);
bbio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ, fs_info,
- btrfs_encoded_read_endio, &priv);
+ btrfs_encoded_read_endio, priv);
bbio->bio.bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
bbio->inode = inode;
continue;
@@ -9142,13 +9150,28 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
disk_io_size -= bytes;
} while (disk_io_size);
- atomic_inc(&priv.pending);
+ atomic_inc(&priv->pending);
+ priv->cb = cb;
+ priv->cb_ctx = cb_ctx;
+
btrfs_submit_bio(bbio, 0);
- if (atomic_dec_return(&priv.pending))
- io_wait_event(priv.wait, !atomic_read(&priv.pending));
- /* See btrfs_encoded_read_endio() for ordering. */
- return blk_status_to_errno(READ_ONCE(priv.status));
+ if (!atomic_dec_return(&priv->pending)) {
+ cb(cb_ctx, blk_status_to_errno(READ_ONCE(priv->status)));
+ kfree(priv);
+ }
+
+ return 0;
+}
+
+void btrfs_encoded_read_wait_cb(void *ctx, int err)
+{
+ struct btrfs_encoded_read_wait_ctx *priv = ctx;
+
+ priv->err = err;
+ priv->done = true;
+
+ wake_up(&priv->wait);
}
static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
@@ -9166,6 +9189,7 @@ static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
u64 cur;
size_t page_offset;
ssize_t ret;
+ struct btrfs_encoded_read_wait_ctx wait_ctx;
nr_pages = DIV_ROUND_UP(disk_io_size, PAGE_SIZE);
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
@@ -9177,11 +9201,23 @@ static ssize_t btrfs_encoded_read_regular(struct kiocb *iocb,
goto out;
}
+ wait_ctx.done = false;
+ init_waitqueue_head(&wait_ctx.wait);
+
ret = btrfs_encoded_read_regular_fill_pages(inode, start, disk_bytenr,
- disk_io_size, pages);
+ disk_io_size, pages,
+ btrfs_encoded_read_wait_cb,
+ &wait_ctx);
if (ret)
goto out;
+ io_wait_event(wait_ctx.wait, wait_ctx.done);
+
+ if (wait_ctx.err) {
+ ret = wait_ctx.err;
+ goto out;
+ }
+
unlock_extent(io_tree, start, lockend, cached_state);
btrfs_inode_unlock(inode, BTRFS_ILOCK_SHARED);
*unlocked = true;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 619fa0b8b3f6..52f653c6671e 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5613,6 +5613,7 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
u64 disk_bytenr, disk_num_bytes;
u32 data_offset;
struct btrfs_cmd_header *hdr;
+ struct btrfs_encoded_read_wait_ctx wait_ctx;
u32 crc;
int ret;
@@ -5671,6 +5672,9 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
goto out;
}
+ wait_ctx.done = false;
+ init_waitqueue_head(&wait_ctx.wait);
+
/*
* Note that send_buf is a mapping of send_buf_pages, so this is really
* reading into send_buf.
@@ -5678,10 +5682,19 @@ static int send_encoded_extent(struct send_ctx *sctx, struct btrfs_path *path,
ret = btrfs_encoded_read_regular_fill_pages(BTRFS_I(inode), offset,
disk_bytenr, disk_num_bytes,
sctx->send_buf_pages +
- (data_offset >> PAGE_SHIFT));
+ (data_offset >> PAGE_SHIFT),
+ btrfs_encoded_read_wait_cb,
+ &wait_ctx);
if (ret)
goto out;
+ io_wait_event(wait_ctx.wait, wait_ctx.done);
+
+ if (wait_ctx.err) {
+ ret = wait_ctx.err;
+ goto out;
+ }
+
hdr = (struct btrfs_cmd_header *)sctx->send_buf;
hdr->len = cpu_to_le32(sctx->send_size + disk_num_bytes - sizeof(*hdr));
hdr->crc = 0;
--
2.44.2
next prev parent reply other threads:[~2024-10-14 17:18 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2024-10-14 17:18 [PATCH v3 0/5] btrfs: encoded reads via io_uring Mark Harmstone
2024-10-14 17:18 ` [PATCH 1/5] btrfs: remove pointless addition in btrfs_encoded_read Mark Harmstone
2024-10-14 17:18 ` Mark Harmstone [this message]
2024-10-15 15:23 ` [PATCH 2/5] btrfs: change btrfs_encoded_read_regular_fill_pages to take a callback David Sterba
2024-10-21 13:21 ` David Sterba
2024-10-14 17:18 ` [PATCH 3/5] btrfs: change btrfs_encoded_read so that reading of extent is done by caller Mark Harmstone
2024-10-14 17:18 ` [PATCH 4/5] btrfs: add nowait parameter to btrfs_encoded_read Mark Harmstone
2024-10-14 22:12 ` Jens Axboe
2024-10-15 8:48 ` Mark Harmstone
2024-10-14 17:18 ` [PATCH 5/5] btrfs: add io_uring command for encoded reads Mark Harmstone
2024-10-21 13:50 ` David Sterba
2024-10-21 16:15 ` Pavel Begunkov
2024-10-21 17:05 ` Mark Harmstone
2024-10-21 18:23 ` David Sterba
2024-10-22 9:12 ` Mark Harmstone
2024-10-14 17:44 ` [PATCH v3 0/5] btrfs: encoded reads via io_uring Boris Burkov
2024-10-15 8:50 ` Mark Harmstone
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20241014171838.304953-3-maharmstone@fb.com \
--to=maharmstone@fb.com \
--cc=io-uring@vger.kernel.org \
--cc=linux-btrfs@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox