* [PATCH 1/5] brd: pass a bvec pointer to brd_do_bvec
2025-04-28 14:09 brd cleanups v2 Christoph Hellwig
@ 2025-04-28 14:09 ` Christoph Hellwig
2025-04-28 14:09 ` [PATCH 2/5] brd: remove the sector variable in brd_submit_bio Christoph Hellwig
` (5 subsequent siblings)
6 siblings, 0 replies; 15+ messages in thread
From: Christoph Hellwig @ 2025-04-28 14:09 UTC (permalink / raw)
To: Jens Axboe, Yu Kuai; +Cc: linux-block, Yu Kuai, Hannes Reinecke
Pass the bvec to brd_do_bvec instead of marshalling the information into
individual arguments.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/block/brd.c | 35 +++++++++++++----------------------
1 file changed, 13 insertions(+), 22 deletions(-)
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 292f127cae0a..c8974bc545fb 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -189,12 +189,10 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
/*
* Process a single bvec of a bio.
*/
-static int brd_do_bvec(struct brd_device *brd, struct page *page,
- unsigned int len, unsigned int off, blk_opf_t opf,
- sector_t sector)
+static int brd_rw_bvec(struct brd_device *brd, struct bio_vec *bv,
+ blk_opf_t opf, sector_t sector)
{
void *mem;
- int err = 0;
if (op_is_write(opf)) {
/*
@@ -202,24 +200,23 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
* block or filesystem layers from page reclaim.
*/
gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
+ int err;
- err = copy_to_brd_setup(brd, sector, len, gfp);
+ err = copy_to_brd_setup(brd, sector, bv->bv_len, gfp);
if (err)
- goto out;
+ return err;
}
- mem = kmap_atomic(page);
+ mem = kmap_atomic(bv->bv_page);
if (!op_is_write(opf)) {
- copy_from_brd(mem + off, brd, sector, len);
- flush_dcache_page(page);
+ copy_from_brd(mem + bv->bv_offset, brd, sector, bv->bv_len);
+ flush_dcache_page(bv->bv_page);
} else {
- flush_dcache_page(page);
- copy_to_brd(brd, mem + off, sector, len);
+ flush_dcache_page(bv->bv_page);
+ copy_to_brd(brd, mem + bv->bv_offset, sector, bv->bv_len);
}
kunmap_atomic(mem);
-
-out:
- return err;
+ return 0;
}
static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
@@ -255,15 +252,9 @@ static void brd_submit_bio(struct bio *bio)
}
bio_for_each_segment(bvec, bio, iter) {
- unsigned int len = bvec.bv_len;
int err;
- /* Don't support un-aligned buffer */
- WARN_ON_ONCE((bvec.bv_offset & (SECTOR_SIZE - 1)) ||
- (len & (SECTOR_SIZE - 1)));
-
- err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
- bio->bi_opf, sector);
+ err = brd_rw_bvec(brd, &bvec, bio->bi_opf, sector);
if (err) {
if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
bio_wouldblock_error(bio);
@@ -272,7 +263,7 @@ static void brd_submit_bio(struct bio *bio)
bio_io_error(bio);
return;
}
- sector += len >> SECTOR_SHIFT;
+ sector += bvec.bv_len >> SECTOR_SHIFT;
}
bio_endio(bio);
--
2.47.2
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH 2/5] brd: remove the sector variable in brd_submit_bio
2025-04-28 14:09 brd cleanups v2 Christoph Hellwig
2025-04-28 14:09 ` [PATCH 1/5] brd: pass a bvec pointer to brd_do_bvec Christoph Hellwig
@ 2025-04-28 14:09 ` Christoph Hellwig
2025-04-28 14:09 ` [PATCH 3/5] brd: use bvec_kmap_local in brd_do_bvec Christoph Hellwig
` (4 subsequent siblings)
6 siblings, 0 replies; 15+ messages in thread
From: Christoph Hellwig @ 2025-04-28 14:09 UTC (permalink / raw)
To: Jens Axboe, Yu Kuai; +Cc: linux-block, Yu Kuai, Hannes Reinecke
The bvec iter iterates over the sector already, no need to duplicate the
work.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/block/brd.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index c8974bc545fb..91eb50126355 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -241,12 +241,12 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
static void brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
- sector_t sector = bio->bi_iter.bi_sector;
struct bio_vec bvec;
struct bvec_iter iter;
if (unlikely(op_is_discard(bio->bi_opf))) {
- brd_do_discard(brd, sector, bio->bi_iter.bi_size);
+ brd_do_discard(brd, bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size);
bio_endio(bio);
return;
}
@@ -254,7 +254,7 @@ static void brd_submit_bio(struct bio *bio)
bio_for_each_segment(bvec, bio, iter) {
int err;
- err = brd_rw_bvec(brd, &bvec, bio->bi_opf, sector);
+ err = brd_rw_bvec(brd, &bvec, bio->bi_opf, iter.bi_sector);
if (err) {
if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
bio_wouldblock_error(bio);
@@ -263,7 +263,6 @@ static void brd_submit_bio(struct bio *bio)
bio_io_error(bio);
return;
}
- sector += bvec.bv_len >> SECTOR_SHIFT;
}
bio_endio(bio);
--
2.47.2
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH 3/5] brd: use bvec_kmap_local in brd_do_bvec
2025-04-28 14:09 brd cleanups v2 Christoph Hellwig
2025-04-28 14:09 ` [PATCH 1/5] brd: pass a bvec pointer to brd_do_bvec Christoph Hellwig
2025-04-28 14:09 ` [PATCH 2/5] brd: remove the sector variable in brd_submit_bio Christoph Hellwig
@ 2025-04-28 14:09 ` Christoph Hellwig
2025-04-28 14:09 ` [PATCH 4/5] brd: split I/O at page boundaries Christoph Hellwig
` (3 subsequent siblings)
6 siblings, 0 replies; 15+ messages in thread
From: Christoph Hellwig @ 2025-04-28 14:09 UTC (permalink / raw)
To: Jens Axboe, Yu Kuai; +Cc: linux-block, Yu Kuai, Hannes Reinecke
Use the proper helper to kmap a bvec in brd_do_bvec instead of directly
accessing the bvec fields and use the deprecated kmap_atomic API.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
---
drivers/block/brd.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 91eb50126355..0c70d29379f1 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -207,15 +207,15 @@ static int brd_rw_bvec(struct brd_device *brd, struct bio_vec *bv,
return err;
}
- mem = kmap_atomic(bv->bv_page);
+ mem = bvec_kmap_local(bv);
if (!op_is_write(opf)) {
- copy_from_brd(mem + bv->bv_offset, brd, sector, bv->bv_len);
+ copy_from_brd(mem, brd, sector, bv->bv_len);
flush_dcache_page(bv->bv_page);
} else {
flush_dcache_page(bv->bv_page);
- copy_to_brd(brd, mem + bv->bv_offset, sector, bv->bv_len);
+ copy_to_brd(brd, mem, sector, bv->bv_len);
}
- kunmap_atomic(mem);
+ kunmap_local(mem);
return 0;
}
--
2.47.2
^ permalink raw reply related [flat|nested] 15+ messages in thread* [PATCH 4/5] brd: split I/O at page boundaries
2025-04-28 14:09 brd cleanups v2 Christoph Hellwig
` (2 preceding siblings ...)
2025-04-28 14:09 ` [PATCH 3/5] brd: use bvec_kmap_local in brd_do_bvec Christoph Hellwig
@ 2025-04-28 14:09 ` Christoph Hellwig
2025-04-28 18:07 ` Keith Busch
2025-04-28 14:09 ` [PATCH 5/5] brd: use memcpy_{to,from]_page in brd_rw_bvec Christoph Hellwig
` (2 subsequent siblings)
6 siblings, 1 reply; 15+ messages in thread
From: Christoph Hellwig @ 2025-04-28 14:09 UTC (permalink / raw)
To: Jens Axboe, Yu Kuai; +Cc: linux-block, Hannes Reinecke, Yu Kuai
A lot of complexity in brd stems from the fact that it tries to handle
I/O spanning two backing pages. Instead limit the size of a single
bvec iteration so that it never crosses a page boundary and remove all
the now unneeded code.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
---
drivers/block/brd.c | 116 +++++++++++++-------------------------------
1 file changed, 34 insertions(+), 82 deletions(-)
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 0c70d29379f1..580b2d8ce99c 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -99,27 +99,6 @@ static void brd_free_pages(struct brd_device *brd)
xa_destroy(&brd->brd_pages);
}
-/*
- * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
- */
-static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
- gfp_t gfp)
-{
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
- int ret;
-
- copy = min_t(size_t, n, PAGE_SIZE - offset);
- ret = brd_insert_page(brd, sector, gfp);
- if (ret)
- return ret;
- if (copy < n) {
- sector += copy >> SECTOR_SHIFT;
- ret = brd_insert_page(brd, sector, gfp);
- }
- return ret;
-}
-
/*
* Copy n bytes from src to the brd starting at sector. Does not sleep.
*/
@@ -129,27 +108,13 @@ static void copy_to_brd(struct brd_device *brd, const void *src,
struct page *page;
void *dst;
unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
- copy = min_t(size_t, n, PAGE_SIZE - offset);
page = brd_lookup_page(brd, sector);
BUG_ON(!page);
dst = kmap_atomic(page);
- memcpy(dst + offset, src, copy);
+ memcpy(dst + offset, src, n);
kunmap_atomic(dst);
-
- if (copy < n) {
- src += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- page = brd_lookup_page(brd, sector);
- BUG_ON(!page);
-
- dst = kmap_atomic(page);
- memcpy(dst, src, copy);
- kunmap_atomic(dst);
- }
}
/*
@@ -161,62 +126,60 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
struct page *page;
void *src;
unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
- copy = min_t(size_t, n, PAGE_SIZE - offset);
page = brd_lookup_page(brd, sector);
if (page) {
src = kmap_atomic(page);
- memcpy(dst, src + offset, copy);
+ memcpy(dst, src + offset, n);
kunmap_atomic(src);
} else
- memset(dst, 0, copy);
-
- if (copy < n) {
- dst += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- page = brd_lookup_page(brd, sector);
- if (page) {
- src = kmap_atomic(page);
- memcpy(dst, src, copy);
- kunmap_atomic(src);
- } else
- memset(dst, 0, copy);
- }
+ memset(dst, 0, n);
}
/*
- * Process a single bvec of a bio.
+ * Process a single segment. The segment is capped to not cross page boundaries
+ * in both the bio and the brd backing memory.
*/
-static int brd_rw_bvec(struct brd_device *brd, struct bio_vec *bv,
- blk_opf_t opf, sector_t sector)
+static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
{
+ struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
+ sector_t sector = bio->bi_iter.bi_sector;
+ u32 offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
+ blk_opf_t opf = bio->bi_opf;
void *mem;
+ bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
+
if (op_is_write(opf)) {
+ int err;
+
/*
* Must use NOIO because we don't want to recurse back into the
* block or filesystem layers from page reclaim.
*/
- gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
- int err;
-
- err = copy_to_brd_setup(brd, sector, bv->bv_len, gfp);
- if (err)
- return err;
+ err = brd_insert_page(brd, sector,
+ (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO);
+ if (err) {
+ if (err == -ENOMEM && (opf & REQ_NOWAIT))
+ bio_wouldblock_error(bio);
+ else
+ bio_io_error(bio);
+ return false;
+ }
}
- mem = bvec_kmap_local(bv);
+ mem = bvec_kmap_local(&bv);
if (!op_is_write(opf)) {
- copy_from_brd(mem, brd, sector, bv->bv_len);
- flush_dcache_page(bv->bv_page);
+ copy_from_brd(mem, brd, sector, bv.bv_len);
+ flush_dcache_page(bv.bv_page);
} else {
- flush_dcache_page(bv->bv_page);
- copy_to_brd(brd, mem, sector, bv->bv_len);
+ flush_dcache_page(bv.bv_page);
+ copy_to_brd(brd, mem, sector, bv.bv_len);
}
kunmap_local(mem);
- return 0;
+
+ bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len);
+ return true;
}
static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
@@ -241,8 +204,6 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
static void brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
- struct bio_vec bvec;
- struct bvec_iter iter;
if (unlikely(op_is_discard(bio->bi_opf))) {
brd_do_discard(brd, bio->bi_iter.bi_sector,
@@ -251,19 +212,10 @@ static void brd_submit_bio(struct bio *bio)
return;
}
- bio_for_each_segment(bvec, bio, iter) {
- int err;
-
- err = brd_rw_bvec(brd, &bvec, bio->bi_opf, iter.bi_sector);
- if (err) {
- if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
- bio_wouldblock_error(bio);
- return;
- }
- bio_io_error(bio);
+ do {
+ if (!brd_rw_bvec(brd, bio))
return;
- }
- }
+ } while (bio->bi_iter.bi_size);
bio_endio(bio);
}
--
2.47.2
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH 4/5] brd: split I/O at page boundaries
2025-04-28 14:09 ` [PATCH 4/5] brd: split I/O at page boundaries Christoph Hellwig
@ 2025-04-28 18:07 ` Keith Busch
2025-04-29 1:38 ` Yu Kuai
0 siblings, 1 reply; 15+ messages in thread
From: Keith Busch @ 2025-04-28 18:07 UTC (permalink / raw)
To: Christoph Hellwig
Cc: Jens Axboe, Yu Kuai, linux-block, Hannes Reinecke, Yu Kuai
On Mon, Apr 28, 2025 at 07:09:50AM -0700, Christoph Hellwig wrote:
> A lot of complexity in brd stems from the fact that it tries to handle
> I/O spanning two backing pages. Instead limit the size of a single
> bvec iteration so that it never crosses a page boundary and remove all
> the now unneeded code.
Doesn't bio_for_each_segment() already limit bvecs on page boundaries?
You'd need to use bio_for_each_bvec() to get multi-page bvecs.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 4/5] brd: split I/O at page boundaries
2025-04-28 18:07 ` Keith Busch
@ 2025-04-29 1:38 ` Yu Kuai
2025-04-29 12:15 ` Christoph Hellwig
0 siblings, 1 reply; 15+ messages in thread
From: Yu Kuai @ 2025-04-29 1:38 UTC (permalink / raw)
To: Keith Busch, Christoph Hellwig
Cc: Jens Axboe, Yu Kuai, linux-block, Hannes Reinecke, yukuai (C)
Hi,
在 2025/04/29 2:07, Keith Busch 写道:
> On Mon, Apr 28, 2025 at 07:09:50AM -0700, Christoph Hellwig wrote:
>> A lot of complexity in brd stems from the fact that it tries to handle
>> I/O spanning two backing pages. Instead limit the size of a single
>> bvec iteration so that it never crosses a page boundary and remove all
>> the now unneeded code.
>
> Doesn't bio_for_each_segment() already limit bvecs on page boundaries?
> You'd need to use bio_for_each_bvec() to get multi-page bvecs.
I think it only limit bvecs on page boundaries on the issue side, not
disk side.
For example, if user issue an IO (2k + 4k), will bio_for_each_segment()
split this IO into (2k + 2k) and (4k + 2k), I do not test yet, but I
think the answer is no.
Thanks,
Kuai
>
> .
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 4/5] brd: split I/O at page boundaries
2025-04-29 1:38 ` Yu Kuai
@ 2025-04-29 12:15 ` Christoph Hellwig
2025-04-29 21:17 ` Keith Busch
0 siblings, 1 reply; 15+ messages in thread
From: Christoph Hellwig @ 2025-04-29 12:15 UTC (permalink / raw)
To: Yu Kuai
Cc: Keith Busch, Christoph Hellwig, Jens Axboe, linux-block,
Hannes Reinecke, yukuai (C)
On Tue, Apr 29, 2025 at 09:38:28AM +0800, Yu Kuai wrote:
> Hi,
>
> 在 2025/04/29 2:07, Keith Busch 写道:
>> On Mon, Apr 28, 2025 at 07:09:50AM -0700, Christoph Hellwig wrote:
>>> A lot of complexity in brd stems from the fact that it tries to handle
>>> I/O spanning two backing pages. Instead limit the size of a single
>>> bvec iteration so that it never crosses a page boundary and remove all
>>> the now unneeded code.
>>
>> Doesn't bio_for_each_segment() already limit bvecs on page boundaries?
>> You'd need to use bio_for_each_bvec() to get multi-page bvecs.
>
> I think it only limit bvecs on page boundaries on the issue side, not
> disk side.
>
> For example, if user issue an IO (2k + 4k), will bio_for_each_segment()
> split this IO into (2k + 2k) and (4k + 2k), I do not test yet, but I
> think the answer is no.
Exactly. I got this wrong with zram, where it only triggers with larger
than 4k page sizes, and I got this wrong here on my first attempt as
well. Fortunately testing found it quickly. I thought the comment and
commit message document the issue well enough, but I'm open to better
wording.
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 4/5] brd: split I/O at page boundaries
2025-04-29 12:15 ` Christoph Hellwig
@ 2025-04-29 21:17 ` Keith Busch
0 siblings, 0 replies; 15+ messages in thread
From: Keith Busch @ 2025-04-29 21:17 UTC (permalink / raw)
To: Christoph Hellwig
Cc: Yu Kuai, Jens Axboe, linux-block, Hannes Reinecke, yukuai (C)
On Tue, Apr 29, 2025 at 02:15:29PM +0200, Christoph Hellwig wrote:
> On Tue, Apr 29, 2025 at 09:38:28AM +0800, Yu Kuai wrote:
> > Hi,
> >
> > 在 2025/04/29 2:07, Keith Busch 写道:
> >> On Mon, Apr 28, 2025 at 07:09:50AM -0700, Christoph Hellwig wrote:
> >>> A lot of complexity in brd stems from the fact that it tries to handle
> >>> I/O spanning two backing pages. Instead limit the size of a single
> >>> bvec iteration so that it never crosses a page boundary and remove all
> >>> the now unneeded code.
> >>
> >> Doesn't bio_for_each_segment() already limit bvecs on page boundaries?
> >> You'd need to use bio_for_each_bvec() to get multi-page bvecs.
> >
> > I think it only limit bvecs on page boundaries on the issue side, not
> > disk side.
> >
> > For example, if user issue an IO (2k + 4k), will bio_for_each_segment()
> > split this IO into (2k + 2k) and (4k + 2k), I do not test yet, but I
> > think the answer is no.
>
> Exactly. I got this wrong with zram, where it only triggers with larger
> than 4k page sizes, and I got this wrong here on my first attempt as
> well. Fortunately testing found it quickly. I thought the comment and
> commit message document the issue well enough, but I'm open to better
> wording.
Ah, it just clicked for me that you're talking about the pages returned
from brd_lookup_page (the "backing pages", as you said), not the bio's
pages. Sorry about that.
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH 5/5] brd: use memcpy_{to,from]_page in brd_rw_bvec
2025-04-28 14:09 brd cleanups v2 Christoph Hellwig
` (3 preceding siblings ...)
2025-04-28 14:09 ` [PATCH 4/5] brd: split I/O at page boundaries Christoph Hellwig
@ 2025-04-28 14:09 ` Christoph Hellwig
2025-04-28 15:30 ` brd cleanups v2 Johannes Thumshirn
2025-04-28 17:46 ` Jens Axboe
6 siblings, 0 replies; 15+ messages in thread
From: Christoph Hellwig @ 2025-04-28 14:09 UTC (permalink / raw)
To: Jens Axboe, Yu Kuai; +Cc: linux-block, Hannes Reinecke, Yu Kuai
Use the proper helpers to copy to/from potential highmem pages, which
do a local instead of atomic kmap underneath, and perform
flush_dcache_page where needed. This also simplifies the code so much
that the separate read write helpers are not required any more.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
---
drivers/block/brd.c | 58 ++++++++++-----------------------------------
1 file changed, 13 insertions(+), 45 deletions(-)
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 580b2d8ce99c..fa1290992a7f 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -99,43 +99,6 @@ static void brd_free_pages(struct brd_device *brd)
xa_destroy(&brd->brd_pages);
}
-/*
- * Copy n bytes from src to the brd starting at sector. Does not sleep.
- */
-static void copy_to_brd(struct brd_device *brd, const void *src,
- sector_t sector, size_t n)
-{
- struct page *page;
- void *dst;
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
-
- page = brd_lookup_page(brd, sector);
- BUG_ON(!page);
-
- dst = kmap_atomic(page);
- memcpy(dst + offset, src, n);
- kunmap_atomic(dst);
-}
-
-/*
- * Copy n bytes to dst from the brd starting at sector. Does not sleep.
- */
-static void copy_from_brd(void *dst, struct brd_device *brd,
- sector_t sector, size_t n)
-{
- struct page *page;
- void *src;
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
-
- page = brd_lookup_page(brd, sector);
- if (page) {
- src = kmap_atomic(page);
- memcpy(dst, src + offset, n);
- kunmap_atomic(src);
- } else
- memset(dst, 0, n);
-}
-
/*
* Process a single segment. The segment is capped to not cross page boundaries
* in both the bio and the brd backing memory.
@@ -146,7 +109,8 @@ static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
sector_t sector = bio->bi_iter.bi_sector;
u32 offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
blk_opf_t opf = bio->bi_opf;
- void *mem;
+ struct page *page;
+ void *kaddr;
bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
@@ -168,15 +132,19 @@ static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
}
}
- mem = bvec_kmap_local(&bv);
- if (!op_is_write(opf)) {
- copy_from_brd(mem, brd, sector, bv.bv_len);
- flush_dcache_page(bv.bv_page);
+ page = brd_lookup_page(brd, sector);
+
+ kaddr = bvec_kmap_local(&bv);
+ if (op_is_write(opf)) {
+ BUG_ON(!page);
+ memcpy_to_page(page, offset, kaddr, bv.bv_len);
} else {
- flush_dcache_page(bv.bv_page);
- copy_to_brd(brd, mem, sector, bv.bv_len);
+ if (page)
+ memcpy_from_page(kaddr, page, offset, bv.bv_len);
+ else
+ memset(kaddr, 0, bv.bv_len);
}
- kunmap_local(mem);
+ kunmap_local(kaddr);
bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len);
return true;
--
2.47.2
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: brd cleanups v2
2025-04-28 14:09 brd cleanups v2 Christoph Hellwig
` (4 preceding siblings ...)
2025-04-28 14:09 ` [PATCH 5/5] brd: use memcpy_{to,from]_page in brd_rw_bvec Christoph Hellwig
@ 2025-04-28 15:30 ` Johannes Thumshirn
2025-04-28 17:46 ` Jens Axboe
6 siblings, 0 replies; 15+ messages in thread
From: Johannes Thumshirn @ 2025-04-28 15:30 UTC (permalink / raw)
To: hch, Jens Axboe, Yu Kuai; +Cc: linux-block@vger.kernel.org
For the series:
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
^ permalink raw reply [flat|nested] 15+ messages in thread* Re: brd cleanups v2
2025-04-28 14:09 brd cleanups v2 Christoph Hellwig
` (5 preceding siblings ...)
2025-04-28 15:30 ` brd cleanups v2 Johannes Thumshirn
@ 2025-04-28 17:46 ` Jens Axboe
6 siblings, 0 replies; 15+ messages in thread
From: Jens Axboe @ 2025-04-28 17:46 UTC (permalink / raw)
To: Yu Kuai, Christoph Hellwig; +Cc: linux-block
On Mon, 28 Apr 2025 07:09:46 -0700, Christoph Hellwig wrote:
> this series has various brd cleanups mostly to get rid kmap_atomic and
> poking into the bvec fields. It is used as the baseline for the discard
> fixes from Yu Kuai.
>
> Changes since v1:
> - fix a subject
> - minor tweaks to formating in brd_rw_bvec
>
> [...]
Applied, thanks!
[1/5] brd: pass a bvec pointer to brd_do_bvec
commit: 75d99aa279561fc6d91afec8bdd1b56548f860a2
[2/5] brd: remove the sector variable in brd_submit_bio
commit: 857aba38b56a0d8fa868706c57053dcd4282e436
[3/5] brd: use bvec_kmap_local in brd_do_bvec
commit: 95a375a3bed3b8734059351ba046a6fabdbde485
[4/5] brd: split I/O at page boundaries
commit: 3185444f0504ca8ff54e2a7275f1ff60a6a6cf0c
[5/5] _page in brd_rw_bvec
commit: 53ec1abce79c986dc59e59d0c60d00088bcdf32a
Best regards,
--
Jens Axboe
^ permalink raw reply [flat|nested] 15+ messages in thread
* [PATCH 4/5] brd: split I/O at page boundaries
2025-04-21 7:26 brd cleanups Christoph Hellwig
@ 2025-04-21 7:26 ` Christoph Hellwig
2025-04-22 9:30 ` Hannes Reinecke
2025-04-22 11:10 ` Yu Kuai
0 siblings, 2 replies; 15+ messages in thread
From: Christoph Hellwig @ 2025-04-21 7:26 UTC (permalink / raw)
To: Jens Axboe, Yu Kuai; +Cc: linux-block
A lot of complexity in brd stems from the fact that it tries to handle
I/O spanning two backing pages. Instead limit the size of a single
bvec iteration so that it never crosses a page boundary and remove all
the now unneeded code.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
drivers/block/brd.c | 116 +++++++++++++-------------------------------
1 file changed, 34 insertions(+), 82 deletions(-)
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 0c70d29379f1..580b2d8ce99c 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -99,27 +99,6 @@ static void brd_free_pages(struct brd_device *brd)
xa_destroy(&brd->brd_pages);
}
-/*
- * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
- */
-static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
- gfp_t gfp)
-{
- unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
- int ret;
-
- copy = min_t(size_t, n, PAGE_SIZE - offset);
- ret = brd_insert_page(brd, sector, gfp);
- if (ret)
- return ret;
- if (copy < n) {
- sector += copy >> SECTOR_SHIFT;
- ret = brd_insert_page(brd, sector, gfp);
- }
- return ret;
-}
-
/*
* Copy n bytes from src to the brd starting at sector. Does not sleep.
*/
@@ -129,27 +108,13 @@ static void copy_to_brd(struct brd_device *brd, const void *src,
struct page *page;
void *dst;
unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
- copy = min_t(size_t, n, PAGE_SIZE - offset);
page = brd_lookup_page(brd, sector);
BUG_ON(!page);
dst = kmap_atomic(page);
- memcpy(dst + offset, src, copy);
+ memcpy(dst + offset, src, n);
kunmap_atomic(dst);
-
- if (copy < n) {
- src += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- page = brd_lookup_page(brd, sector);
- BUG_ON(!page);
-
- dst = kmap_atomic(page);
- memcpy(dst, src, copy);
- kunmap_atomic(dst);
- }
}
/*
@@ -161,62 +126,60 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
struct page *page;
void *src;
unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
- size_t copy;
- copy = min_t(size_t, n, PAGE_SIZE - offset);
page = brd_lookup_page(brd, sector);
if (page) {
src = kmap_atomic(page);
- memcpy(dst, src + offset, copy);
+ memcpy(dst, src + offset, n);
kunmap_atomic(src);
} else
- memset(dst, 0, copy);
-
- if (copy < n) {
- dst += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- page = brd_lookup_page(brd, sector);
- if (page) {
- src = kmap_atomic(page);
- memcpy(dst, src, copy);
- kunmap_atomic(src);
- } else
- memset(dst, 0, copy);
- }
+ memset(dst, 0, n);
}
/*
- * Process a single bvec of a bio.
+ * Process a single segment. The segment is capped to not cross page boundaries
+ * in both the bio and the brd backing memory.
*/
-static int brd_rw_bvec(struct brd_device *brd, struct bio_vec *bv,
- blk_opf_t opf, sector_t sector)
+static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
{
+ struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
+ sector_t sector = bio->bi_iter.bi_sector;
+ u32 offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
+ blk_opf_t opf = bio->bi_opf;
void *mem;
+ bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
+
if (op_is_write(opf)) {
+ int err;
+
/*
* Must use NOIO because we don't want to recurse back into the
* block or filesystem layers from page reclaim.
*/
- gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
- int err;
-
- err = copy_to_brd_setup(brd, sector, bv->bv_len, gfp);
- if (err)
- return err;
+ err = brd_insert_page(brd, sector,
+ (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO);
+ if (err) {
+ if (err == -ENOMEM && (opf & REQ_NOWAIT))
+ bio_wouldblock_error(bio);
+ else
+ bio_io_error(bio);
+ return false;
+ }
}
- mem = bvec_kmap_local(bv);
+ mem = bvec_kmap_local(&bv);
if (!op_is_write(opf)) {
- copy_from_brd(mem, brd, sector, bv->bv_len);
- flush_dcache_page(bv->bv_page);
+ copy_from_brd(mem, brd, sector, bv.bv_len);
+ flush_dcache_page(bv.bv_page);
} else {
- flush_dcache_page(bv->bv_page);
- copy_to_brd(brd, mem, sector, bv->bv_len);
+ flush_dcache_page(bv.bv_page);
+ copy_to_brd(brd, mem, sector, bv.bv_len);
}
kunmap_local(mem);
- return 0;
+
+ bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len);
+ return true;
}
static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
@@ -241,8 +204,6 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
static void brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
- struct bio_vec bvec;
- struct bvec_iter iter;
if (unlikely(op_is_discard(bio->bi_opf))) {
brd_do_discard(brd, bio->bi_iter.bi_sector,
@@ -251,19 +212,10 @@ static void brd_submit_bio(struct bio *bio)
return;
}
- bio_for_each_segment(bvec, bio, iter) {
- int err;
-
- err = brd_rw_bvec(brd, &bvec, bio->bi_opf, iter.bi_sector);
- if (err) {
- if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
- bio_wouldblock_error(bio);
- return;
- }
- bio_io_error(bio);
+ do {
+ if (!brd_rw_bvec(brd, bio))
return;
- }
- }
+ } while (bio->bi_iter.bi_size);
bio_endio(bio);
}
--
2.47.2
^ permalink raw reply related [flat|nested] 15+ messages in thread* Re: [PATCH 4/5] brd: split I/O at page boundaries
2025-04-21 7:26 ` [PATCH 4/5] brd: split I/O at page boundaries Christoph Hellwig
@ 2025-04-22 9:30 ` Hannes Reinecke
2025-04-22 11:10 ` Yu Kuai
1 sibling, 0 replies; 15+ messages in thread
From: Hannes Reinecke @ 2025-04-22 9:30 UTC (permalink / raw)
To: Christoph Hellwig, Jens Axboe, Yu Kuai; +Cc: linux-block
On 4/21/25 09:26, Christoph Hellwig wrote:
> A lot of complexity in brd stems from the fact that it tries to handle
> I/O spanning two backing pages. Instead limit the size of a single
> bvec iteration so that it never crosses a page boundary and remove all
> the now unneeded code.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> drivers/block/brd.c | 116 +++++++++++++-------------------------------
> 1 file changed, 34 insertions(+), 82 deletions(-)
>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Cheers,
Hannes
--
Dr. Hannes Reinecke Kernel Storage Architect
hare@suse.de +49 911 74053 688
SUSE Software Solutions GmbH, Frankenstr. 146, 90461 Nürnberg
HRB 36809 (AG Nürnberg), GF: I. Totev, A. McDonald, W. Knoblich
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH 4/5] brd: split I/O at page boundaries
2025-04-21 7:26 ` [PATCH 4/5] brd: split I/O at page boundaries Christoph Hellwig
2025-04-22 9:30 ` Hannes Reinecke
@ 2025-04-22 11:10 ` Yu Kuai
1 sibling, 0 replies; 15+ messages in thread
From: Yu Kuai @ 2025-04-22 11:10 UTC (permalink / raw)
To: Christoph Hellwig, Jens Axboe, Yu Kuai; +Cc: linux-block, yukuai (C)
在 2025/04/21 15:26, Christoph Hellwig 写道:
> A lot of complexity in brd stems from the fact that it tries to handle
> I/O spanning two backing pages. Instead limit the size of a single
> bvec iteration so that it never crosses a page boundary and remove all
> the now unneeded code.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> drivers/block/brd.c | 116 +++++++++++++-------------------------------
> 1 file changed, 34 insertions(+), 82 deletions(-)
>
LGTM
Reviewed-by: Yu Kuai <yukuai3@huawei.com>
> diff --git a/drivers/block/brd.c b/drivers/block/brd.c
> index 0c70d29379f1..580b2d8ce99c 100644
> --- a/drivers/block/brd.c
> +++ b/drivers/block/brd.c
> @@ -99,27 +99,6 @@ static void brd_free_pages(struct brd_device *brd)
> xa_destroy(&brd->brd_pages);
> }
>
> -/*
> - * copy_to_brd_setup must be called before copy_to_brd. It may sleep.
> - */
> -static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n,
> - gfp_t gfp)
> -{
> - unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
> - size_t copy;
> - int ret;
> -
> - copy = min_t(size_t, n, PAGE_SIZE - offset);
> - ret = brd_insert_page(brd, sector, gfp);
> - if (ret)
> - return ret;
> - if (copy < n) {
> - sector += copy >> SECTOR_SHIFT;
> - ret = brd_insert_page(brd, sector, gfp);
> - }
> - return ret;
> -}
> -
> /*
> * Copy n bytes from src to the brd starting at sector. Does not sleep.
> */
> @@ -129,27 +108,13 @@ static void copy_to_brd(struct brd_device *brd, const void *src,
> struct page *page;
> void *dst;
> unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
> - size_t copy;
>
> - copy = min_t(size_t, n, PAGE_SIZE - offset);
> page = brd_lookup_page(brd, sector);
> BUG_ON(!page);
>
> dst = kmap_atomic(page);
> - memcpy(dst + offset, src, copy);
> + memcpy(dst + offset, src, n);
> kunmap_atomic(dst);
> -
> - if (copy < n) {
> - src += copy;
> - sector += copy >> SECTOR_SHIFT;
> - copy = n - copy;
> - page = brd_lookup_page(brd, sector);
> - BUG_ON(!page);
> -
> - dst = kmap_atomic(page);
> - memcpy(dst, src, copy);
> - kunmap_atomic(dst);
> - }
> }
>
> /*
> @@ -161,62 +126,60 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
> struct page *page;
> void *src;
> unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
> - size_t copy;
>
> - copy = min_t(size_t, n, PAGE_SIZE - offset);
> page = brd_lookup_page(brd, sector);
> if (page) {
> src = kmap_atomic(page);
> - memcpy(dst, src + offset, copy);
> + memcpy(dst, src + offset, n);
> kunmap_atomic(src);
> } else
> - memset(dst, 0, copy);
> -
> - if (copy < n) {
> - dst += copy;
> - sector += copy >> SECTOR_SHIFT;
> - copy = n - copy;
> - page = brd_lookup_page(brd, sector);
> - if (page) {
> - src = kmap_atomic(page);
> - memcpy(dst, src, copy);
> - kunmap_atomic(src);
> - } else
> - memset(dst, 0, copy);
> - }
> + memset(dst, 0, n);
> }
>
> /*
> - * Process a single bvec of a bio.
> + * Process a single segment. The segment is capped to not cross page boundaries
> + * in both the bio and the brd backing memory.
> */
> -static int brd_rw_bvec(struct brd_device *brd, struct bio_vec *bv,
> - blk_opf_t opf, sector_t sector)
> +static bool brd_rw_bvec(struct brd_device *brd, struct bio *bio)
> {
> + struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
> + sector_t sector = bio->bi_iter.bi_sector;
> + u32 offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
> + blk_opf_t opf = bio->bi_opf;
> void *mem;
>
> + bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset);
> +
> if (op_is_write(opf)) {
> + int err;
> +
> /*
> * Must use NOIO because we don't want to recurse back into the
> * block or filesystem layers from page reclaim.
> */
> - gfp_t gfp = opf & REQ_NOWAIT ? GFP_NOWAIT : GFP_NOIO;
> - int err;
> -
> - err = copy_to_brd_setup(brd, sector, bv->bv_len, gfp);
> - if (err)
> - return err;
> + err = brd_insert_page(brd, sector,
> + (opf & REQ_NOWAIT) ? GFP_NOWAIT : GFP_NOIO);
> + if (err) {
> + if (err == -ENOMEM && (opf & REQ_NOWAIT))
> + bio_wouldblock_error(bio);
> + else
> + bio_io_error(bio);
> + return false;
> + }
> }
>
> - mem = bvec_kmap_local(bv);
> + mem = bvec_kmap_local(&bv);
> if (!op_is_write(opf)) {
> - copy_from_brd(mem, brd, sector, bv->bv_len);
> - flush_dcache_page(bv->bv_page);
> + copy_from_brd(mem, brd, sector, bv.bv_len);
> + flush_dcache_page(bv.bv_page);
> } else {
> - flush_dcache_page(bv->bv_page);
> - copy_to_brd(brd, mem, sector, bv->bv_len);
> + flush_dcache_page(bv.bv_page);
> + copy_to_brd(brd, mem, sector, bv.bv_len);
> }
> kunmap_local(mem);
> - return 0;
> +
> + bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len);
> + return true;
> }
>
> static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
> @@ -241,8 +204,6 @@ static void brd_do_discard(struct brd_device *brd, sector_t sector, u32 size)
> static void brd_submit_bio(struct bio *bio)
> {
> struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
> - struct bio_vec bvec;
> - struct bvec_iter iter;
>
> if (unlikely(op_is_discard(bio->bi_opf))) {
> brd_do_discard(brd, bio->bi_iter.bi_sector,
> @@ -251,19 +212,10 @@ static void brd_submit_bio(struct bio *bio)
> return;
> }
>
> - bio_for_each_segment(bvec, bio, iter) {
> - int err;
> -
> - err = brd_rw_bvec(brd, &bvec, bio->bi_opf, iter.bi_sector);
> - if (err) {
> - if (err == -ENOMEM && bio->bi_opf & REQ_NOWAIT) {
> - bio_wouldblock_error(bio);
> - return;
> - }
> - bio_io_error(bio);
> + do {
> + if (!brd_rw_bvec(brd, bio))
> return;
> - }
> - }
> + } while (bio->bi_iter.bi_size);
>
> bio_endio(bio);
> }
>
^ permalink raw reply [flat|nested] 15+ messages in thread